diff --git a/README.rst b/README.rst index 86f6ba868fd..671d9543688 100644 --- a/README.rst +++ b/README.rst @@ -15,7 +15,10 @@ The latest and most in-depth documentation on how to use Neutron is available at: . This includes: Neutron Administrator Guide - http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html + http://docs.openstack.org/admin-guide-cloud/networking.html + +Networking Guide + http://docs.openstack.org/networking-guide/ Neutron API Reference: http://docs.openstack.org/api/openstack-network/2.0/content/ diff --git a/TESTING.rst b/TESTING.rst index fae4f7cf84d..d29728cf42c 100644 --- a/TESTING.rst +++ b/TESTING.rst @@ -309,6 +309,10 @@ current unit tests coverage by running:: $ ./run_tests.sh -c +Since the coverage command can only show unit test coverage, a coverage +document is maintained that shows test coverage per area of code in: +doc/source/devref/testing_coverage.rst. + Debugging --------- diff --git a/bin/neutron-rootwrap-xen-dom0 b/bin/neutron-rootwrap-xen-dom0 index 8e92d33fed1..b4e2e31b5cf 100755 --- a/bin/neutron-rootwrap-xen-dom0 +++ b/bin/neutron-rootwrap-xen-dom0 @@ -24,7 +24,8 @@ responsible determining whether a command is safe to execute. from __future__ import print_function from six.moves import configparser as ConfigParser -import json +from oslo_serialization import jsonutils as json + import os import select import sys diff --git a/devstack/lib/l2_agent b/devstack/lib/l2_agent new file mode 100644 index 00000000000..b70efb1d4a4 --- /dev/null +++ b/devstack/lib/l2_agent @@ -0,0 +1,13 @@ +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" + fi +} + + +function configure_l2_agent { + iniset /$Q_PLUGIN_CONF_FILE agent extensions "$L2_AGENT_EXTENSIONS" +} diff --git a/devstack/lib/ml2 b/devstack/lib/ml2 new file mode 100644 index 00000000000..2275c11c072 --- /dev/null +++ b/devstack/lib/ml2 @@ -0,0 +1,13 @@ +function enable_ml2_extension_driver { + local extension_driver=$1 + if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension_driver + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension_driver}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS+=",$extension_driver" + fi +} + + +function configure_qos_ml2 { + enable_ml2_extension_driver "qos" +} diff --git a/devstack/lib/qos b/devstack/lib/qos new file mode 100644 index 00000000000..e9270c04321 --- /dev/null +++ b/devstack/lib/qos @@ -0,0 +1,20 @@ +function configure_qos_service_plugin { + _neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$Q_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} diff --git a/devstack/plugin.sh b/devstack/plugin.sh new file mode 100644 index 00000000000..5b245490d20 --- /dev/null +++ b/devstack/plugin.sh @@ -0,0 +1,18 @@ +LIBDIR=$DEST/neutron/devstack/lib + +source $LIBDIR/l2_agent +source $LIBDIR/ml2 +source $LIBDIR/qos + + +if [[ "$1" == "stack" && "$2" == "install" ]]; then + if is_service_enabled q-qos; then + configure_qos + fi +fi + +if [[ "$1" == "stack" && "$2" == "post-config" ]]; then + if is_service_enabled q-agt; then + configure_l2_agent + fi +fi diff --git a/devstack/settings b/devstack/settings new file mode 100644 index 00000000000..976317cd5ca --- /dev/null +++ b/devstack/settings @@ -0,0 +1,3 @@ +L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-} + +enable_service q-qos diff --git a/doc/dashboards/graphite.dashboard.html b/doc/dashboards/graphite.dashboard.html new file mode 100644 index 00000000000..932f2b25a72 --- /dev/null +++ b/doc/dashboards/graphite.dashboard.html @@ -0,0 +1,34 @@ + +

+Neutron Graphite Thumbnails - Click to see full size figure +

+ + + + + + + + + +
+Failure Percentage - Last 10 Days - DVR and Full Jobs
+ + + +
+Failure Percentage - Last 10 Days - Grenade, DSVM API/Functional/Fullstack
+ + + +
+Failure Percentage - Last 10 Days - Rally, LinuxBridge, LBaaS v1/v2
+ + + +
+Failure Percentage - Last 10 Days - Large Opts
+ + + +
diff --git a/doc/source/devref/alembic_migrations.rst b/doc/source/devref/alembic_migrations.rst new file mode 100644 index 00000000000..725bc46f648 --- /dev/null +++ b/doc/source/devref/alembic_migrations.rst @@ -0,0 +1,313 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Alembic Migrations +================== + +Introduction +------------ + +The migrations in the alembic/versions contain the changes needed to migrate +from older Neutron releases to newer versions. A migration occurs by executing +a script that details the changes needed to upgrade the database. The migration +scripts are ordered so that multiple scripts can run sequentially to update the +database. + + +The Migration Wrapper +--------------------- + +The scripts are executed by Neutron's migration wrapper ``neutron-db-manage`` +which uses the Alembic library to manage the migration. Pass the ``--help`` +option to the wrapper for usage information. + +The wrapper takes some options followed by some commands:: + + neutron-db-manage + +The wrapper needs to be provided with the database connection string, which is +usually provided in the ``neutron.conf`` configuration file in an installation. +The wrapper automatically reads from ``/etc/neutron/neutron.conf`` if it is +present. If the configuration is in a different location:: + + neutron-db-manage --config-file /path/to/neutron.conf + +Multiple ``--config-file`` options can be passed if needed. + +Instead of reading the DB connection from the configuration file(s) the +``--database-connection`` option can be used:: + + neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 + +For some commands the wrapper needs to know the entrypoint of the core plugin +for the installation. This can be read from the configuration file(s) or +specified using the ``--core_plugin`` option:: + + neutron-db-manage --core_plugin neutron.plugins.ml2.plugin.Ml2Plugin + +When giving examples below of using the wrapper the options will not be shown. +It is assumed you will use the options that you need for your environment. + +For new deployments you will start with an empty database. You then upgrade +to the latest database version via:: + + neutron-db-manage upgrade heads + +For existing deployments the database will already be at some version. To +check the current database version:: + + neutron-db-manage current + +After installing a new version of Neutron server, upgrading the database is +the same command:: + + neutron-db-manage upgrade heads + +To create a script to run the migration offline:: + + neutron-db-manage upgrade heads --sql + +To run the offline migration between specific migration versions:: + + neutron-db-manage upgrade : --sql + +Upgrade the database incrementally:: + + neutron-db-manage upgrade --delta <# of revs> + +**NOTE:** Database downgrade is not supported. + + +Migration Branches +------------------ + +Neutron makes use of alembic branches for two purposes. + +1. Indepedent Sub-Project Tables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Various `sub-projects `_ can be installed with Neutron. Each +sub-project registers its own alembic branch which is responsible for migrating +the schemas of the tables owned by the sub-project. + +The neutron-db-manage script detects which sub-projects have been installed by +enumerating the ``neutron.db.alembic_migrations`` entrypoints. For more details +see the `Entry Points section of Contributing extensions to Neutron +`_. + +The neutron-db-manage script runs the given alembic command against all +installed sub-projects. (An exception is the ``revision`` command, which is +discussed in the `Developers`_ section below.) + +2. Offline/Online Migrations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since Liberty, Neutron maintains two parallel alembic migration branches. + +The first one, called 'expand', is used to store expansion-only migration +rules. Those rules are strictly additive and can be applied while +neutron-server is running. Examples of additive database schema changes are: +creating a new table, adding a new table column, adding a new index, etc. + +The second branch, called 'contract', is used to store those migration rules +that are not safe to apply while neutron-server is running. Those include: +column or table removal, moving data from one part of the database into another +(renaming a column, transforming single table into multiple, etc.), introducing +or modifying constraints, etc. + +The intent of the split is to allow invoking those safe migrations from +'expand' branch while neutron-server is running, reducing downtime needed to +upgrade the service. + +For more details, see the `Expand and Contract Scripts`_ section below. + + +Developers +---------- + +A database migration script is required when you submit a change to Neutron or +a sub-project that alters the database model definition. The migration script +is a special python file that includes code to upgrade the database to match +the changes in the model definition. Alembic will execute these scripts in +order to provide a linear migration path between revisions. The +neutron-db-manage command can be used to generate migration scripts for you to +complete. The operations in the template are those supported by the Alembic +migration library. + + +Script Auto-generation +~~~~~~~~~~~~~~~~~~~~~~ + +:: + + neutron-db-manage revision -m "description of revision" --autogenerate + +This generates a prepopulated template with the changes needed to match the +database state with the models. You should inspect the autogenerated template +to ensure that the proper models have been altered. + +In rare circumstances, you may want to start with an empty migration template +and manually author the changes necessary for an upgrade. You can create a +blank file via:: + + neutron-db-manage revision -m "description of revision" + +The timeline on each alembic branch should remain linear and not interleave +with other branches, so that there is a clear path when upgrading. To verify +that alembic branches maintain linear timelines, you can run this command:: + + neutron-db-manage check_migration + +If this command reports an error, you can troubleshoot by showing the migration +timelines using the ``history`` command:: + + neutron-db-manage history + + +Expand and Contract Scripts +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The obsolete "branchless" design of a migration script included that it +indicates a specific "version" of the schema, and includes directives that +apply all necessary changes to the database at once. If we look for example at +the script ``2d2a8a565438_hierarchical_binding.py``, we will see:: + + # .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py + + def upgrade(): + + # .. inspection code ... + + op.create_table( + 'ml2_port_binding_levels', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + # ... more columns ... + ) + + for table in port_binding_tables: + op.execute(( + "INSERT INTO ml2_port_binding_levels " + "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " + "FROM %s " + "WHERE host <> '' " + "AND driver <> '';" + ) % table) + + op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') + op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') + op.drop_column('ml2_dvr_port_bindings', 'segment') + op.drop_column('ml2_dvr_port_bindings', 'driver') + + # ... more DROP instructions ... + +The above script contains directives that are both under the "expand" +and "contract" categories, as well as some data migrations. the ``op.create_table`` +directive is an "expand"; it may be run safely while the old version of the +application still runs, as the old code simply doesn't look for this table. +The ``op.drop_constraint`` and ``op.drop_column`` directives are +"contract" directives (the drop column moreso than the drop constraint); running +at least the ``op.drop_column`` directives means that the old version of the +application will fail, as it will attempt to access these columns which no longer +exist. + +The data migrations in this script are adding new +rows to the newly added ``ml2_port_binding_levels`` table. + +Under the new migration script directory structure, the above script would be +stated as two scripts; an "expand" and a "contract" script:: + + # expansion operations + # .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py + + def upgrade(): + + op.create_table( + 'ml2_port_binding_levels', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + # ... more columns ... + ) + + + # contraction operations + # .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py + + def upgrade(): + + for table in port_binding_tables: + op.execute(( + "INSERT INTO ml2_port_binding_levels " + "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " + "FROM %s " + "WHERE host <> '' " + "AND driver <> '';" + ) % table) + + op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') + op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') + op.drop_column('ml2_dvr_port_bindings', 'segment') + op.drop_column('ml2_dvr_port_bindings', 'driver') + + # ... more DROP instructions ... + +The two scripts would be present in different subdirectories and also part of +entirely separate versioning streams. The "expand" operations are in the +"expand" script, and the "contract" operations are in the "contract" script. + +For the time being, data migration rules also belong to contract branch. There +is expectation that eventually live data migrations move into middleware that +will be aware about different database schema elements to converge on, but +Neutron is still not there. + +Scripts that contain only expansion or contraction rules do not require a split +into two parts. + +If a contraction script depends on a script from expansion stream, the +following directive should be added in the contraction script:: + + depends_on = ('',) + + +Applying database migration rules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To apply just expansion rules, execute:: + + neutron-db-manage upgrade expand@head + +After the first step is done, you can stop neutron-server, apply remaining +non-expansive migration rules, if any:: + + neutron-db-manage upgrade contract@head + +and finally, start your neutron-server again. + +If you are not interested in applying safe migration rules while the service is +running, you can still upgrade database the old way, by stopping the service, +and then applying all available rules:: + + neutron-db-manage upgrade head[s] + +It will apply all the rules from both the expand and the contract branches, in +proper order. diff --git a/doc/source/devref/callbacks.rst b/doc/source/devref/callbacks.rst index 71c85f80edb..ff6cfc77e8f 100644 --- a/doc/source/devref/callbacks.rst +++ b/doc/source/devref/callbacks.rst @@ -300,6 +300,14 @@ The output is: FAQ === +Can I use the callbacks registry to subscribe and notify non-core resources and events? + + Short answer is yes. The callbacks module defines literals for what are considered core Neutron + resources and events. However, the ability to subscribe/notify is not limited to these as you + can use your own defined resources and/or events. Just make sure you use string literals, as + typos are common, and the registry does not provide any runtime validation. Therefore, make + sure you test your code! + What is the relationship between Callbacks and Taskflow? There is no overlap between Callbacks and Taskflow or mutual exclusion; as matter of fact they @@ -315,6 +323,16 @@ Is there any ordering guarantee during notifications? notified. Priorities can be a future extension, if a use case arises that require enforced ordering. +How is the the notifying object expected to interact with the subscribing objects? + + The ``notify`` method implements a one-way communication paradigm: the notifier sends a message + without expecting a response back (in other words it fires and forget). However, due to the nature + of Python, the payload can be mutated by the subscribing objects, and this can lead to unexpected + behavior of your code, if you assume that this is the intentional design. Bear in mind, that + passing-by-value using deepcopy was not chosen for efficiency reasons. Having said that, if you + intend for the notifier object to expect a response, then the notifier itself would need to act + as a subscriber. + Is the registry thread-safe? Short answer is no: it is not safe to make mutations while callbacks are being called (more diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index d83de01b03a..1ba7adaffef 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -439,7 +439,7 @@ should take these steps to move the models for the tables out of tree. third-party repo as is done in the neutron repo, i.e. ``networking_foo/db/migration/alembic_migrations/versions/*.py`` #. Remove the models from the neutron repo. -#. Add the names of the removed tables to ``DRIVER_TABLES`` in +#. Add the names of the removed tables to ``REPO_FOO_TABLES`` in ``neutron/db/migration/alembic_migrations/external.py`` (this is used for testing, see below). @@ -452,7 +452,7 @@ DB Model/Migration Testing ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is a `template functional test -`_ (TODO:Ann) third-party +`_ third-party maintainers can use to develop tests for model-vs-migration sync in their repos. It is recommended that each third-party CI sets up such a test, and runs it regularly against Neutron master. @@ -461,7 +461,7 @@ Liberty Steps +++++++++++++ The model_sync test will be updated to ignore the models that have been moved -out of tree. A ``DRIVER_TABLES`` list will be maintained in +out of tree. ``REPO_FOO_TABLES`` lists will be maintained in ``neutron/db/migration/alembic_migrations/external.py``. @@ -520,9 +520,11 @@ the installer to configure this item in the ``[default]`` section. For example:: interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver **ToDo: Interface Driver port bindings.** - These are currently defined by the ``VIF_TYPES`` in - ``neutron/extensions/portbindings.py``. We could make this config-driven - for agents. For Nova, selecting the VIF driver can be done outside of + ``VIF_TYPE_*`` constants in ``neutron/extensions/portbindings.py`` should be + moved from neutron core to the repositories where their drivers are + implemented. We need to provide some config or hook mechanism for VIF types + to be registered by external interface drivers. For Nova, selecting the VIF + driver can be done outside of Neutron (using the new `os-vif python library `_?). Armando and Akihiro to discuss. diff --git a/doc/source/devref/db_layer.rst b/doc/source/devref/db_layer.rst index 2b6ded3fa05..248c85e0b52 100644 --- a/doc/source/devref/db_layer.rst +++ b/doc/source/devref/db_layer.rst @@ -23,150 +23,11 @@ should also be added in model. If default value in database is not needed, business logic. -How we manage database migration rules --------------------------------------- +Database migrations +------------------- -Since Liberty, Neutron maintains two parallel alembic migration branches. - -The first one, called 'expand', is used to store expansion-only migration -rules. Those rules are strictly additive and can be applied while -neutron-server is running. Examples of additive database schema changes are: -creating a new table, adding a new table column, adding a new index, etc. - -The second branch, called 'contract', is used to store those migration rules -that are not safe to apply while neutron-server is running. Those include: -column or table removal, moving data from one part of the database into another -(renaming a column, transforming single table into multiple, etc.), introducing -or modifying constraints, etc. - -The intent of the split is to allow invoking those safe migrations from -'expand' branch while neutron-server is running, reducing downtime needed to -upgrade the service. - -To apply just expansion rules, execute: - -- neutron-db-manage upgrade liberty_expand@head - -After the first step is done, you can stop neutron-server, apply remaining -non-expansive migration rules, if any: - -- neutron-db-manage upgrade liberty_contract@head - -and finally, start your neutron-server again. - -If you are not interested in applying safe migration rules while the service is -running, you can still upgrade database the old way, by stopping the service, -and then applying all available rules: - -- neutron-db-manage upgrade head[s] - -It will apply all the rules from both the expand and the contract branches, in -proper order. - - -Expand and Contract Scripts ---------------------------- - -The obsolete "branchless" design of a migration script included that it -indicates a specific "version" of the schema, and includes directives that -apply all necessary changes to the database at once. If we look for example at -the script ``2d2a8a565438_hierarchical_binding.py``, we will see:: - - # .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py - - def upgrade(): - - # .. inspection code ... - - op.create_table( - 'ml2_port_binding_levels', - sa.Column('port_id', sa.String(length=36), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - # ... more columns ... - ) - - for table in port_binding_tables: - op.execute(( - "INSERT INTO ml2_port_binding_levels " - "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " - "FROM %s " - "WHERE host <> '' " - "AND driver <> '';" - ) % table) - - op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') - op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') - op.drop_column('ml2_dvr_port_bindings', 'segment') - op.drop_column('ml2_dvr_port_bindings', 'driver') - - # ... more DROP instructions ... - -The above script contains directives that are both under the "expand" -and "contract" categories, as well as some data migrations. the ``op.create_table`` -directive is an "expand"; it may be run safely while the old version of the -application still runs, as the old code simply doesn't look for this table. -The ``op.drop_constraint`` and ``op.drop_column`` directives are -"contract" directives (the drop column moreso than the drop constraint); running -at least the ``op.drop_column`` directives means that the old version of the -application will fail, as it will attempt to access these columns which no longer -exist. - -The data migrations in this script are adding new -rows to the newly added ``ml2_port_binding_levels`` table. - -Under the new migration script directory structure, the above script would be -stated as two scripts; an "expand" and a "contract" script:: - - # expansion operations - # .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py - - def upgrade(): - - op.create_table( - 'ml2_port_binding_levels', - sa.Column('port_id', sa.String(length=36), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - # ... more columns ... - ) - - - # contraction operations - # .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py - - def upgrade(): - - for table in port_binding_tables: - op.execute(( - "INSERT INTO ml2_port_binding_levels " - "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " - "FROM %s " - "WHERE host <> '' " - "AND driver <> '';" - ) % table) - - op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') - op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') - op.drop_column('ml2_dvr_port_bindings', 'segment') - op.drop_column('ml2_dvr_port_bindings', 'driver') - - # ... more DROP instructions ... - -The two scripts would be present in different subdirectories and also part of -entirely separate versioning streams. The "expand" operations are in the -"expand" script, and the "contract" operations are in the "contract" script. - -For the time being, data migration rules also belong to contract branch. There -is expectation that eventually live data migrations move into middleware that -will be aware about different database schema elements to converge on, but -Neutron is still not there. - -Scripts that contain only expansion or contraction rules do not require a split -into two parts. - -If a contraction script depends on a script from expansion stream, the -following directive should be added in the contraction script:: - - depends_on = ('',) +For details on the neutron-db-manage wrapper and alembic migrations, see +`Alembic Migrations `_. Tests to verify that database migrations and models are in sync diff --git a/doc/source/devref/fullstack_testing.rst b/doc/source/devref/fullstack_testing.rst index 67a827ab535..f1ff581dc35 100644 --- a/doc/source/devref/fullstack_testing.rst +++ b/doc/source/devref/fullstack_testing.rst @@ -28,20 +28,23 @@ Why? ---- The idea behind "fullstack" testing is to fill a gap between unit + functional -tests and Tempest. Tempest tests are expensive to run, difficult to run in -a multi node environment, and are often very high level and provide little -indication to what is wrong, only that something is wrong. Developers further -benefit from full stack testing as it can sufficiently simulate a real -environment and provide a rapidly reproducible way to verify code as you're -still writing it. +tests and Tempest. Tempest tests are expensive to run, and operate only +through the REST API. So they can only provide an explanation of what went wrong +gets reported to an end user via the REST API, which is often too high level. +Additionally, Tempest requires an OpenStack deployment to be run against, which +can be difficult to configure and setup. The full stack testing addresses +these issues by taking care of the deployment itself, according to the topology +that the test requires. Developers further benefit from full stack testing as +it can sufficiently simulate a real environment and provide a rapidly +reproducible way to verify code as you're still writing it. How? ---- Full stack tests set up their own Neutron processes (Server & agents). They assume a working Rabbit and MySQL server before the run starts. Instructions -on how to run fullstack tests on a VM are available at TESTING.rst: -http://git.openstack.org/cgit/openstack/neutron/tree/TESTING.rst +on how to run fullstack tests on a VM are available in our +`TESTING.rst. `_ Each test defines its own topology (What and how many servers and agents should be running). @@ -52,10 +55,10 @@ through the API and then assert that a namespace was created for it. Full stack tests run in the Neutron tree with Neutron resources alone. You may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone -is out of the picture). instances may be simulated with a helper class that -contains a container-like object in its own namespace and IP address. It has -helper methods to send different kinds of traffic. The "instance" may be -connected to br-int or br-ex, to simulate internal or external traffic. +is out of the picture). VMs may be simulated with a container-like class: +neutron.tests.fullstack.resources.machine.FakeFullstackMachine. +An example of its usage may be found at: +neutron/tests/fullstack/test_connectivity.py. Full stack testing can simulate multi node testing by starting an agent multiple times. Specifically, each node would have its own copy of the @@ -63,7 +66,7 @@ OVS/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent is connected to its own pair of br-int/br-ex, and those bridges are then interconnected. -.. image:: images/fullstack-multinode-simulation.png +.. image:: images/fullstack_multinode_simulation.png When? ----- diff --git a/doc/source/devref/images/fullstack-multinode-simulation.png b/doc/source/devref/images/fullstack-multinode-simulation.png deleted file mode 100644 index c124e4311e3..00000000000 Binary files a/doc/source/devref/images/fullstack-multinode-simulation.png and /dev/null differ diff --git a/doc/source/devref/images/fullstack_multinode_simulation.png b/doc/source/devref/images/fullstack_multinode_simulation.png new file mode 100644 index 00000000000..9736944b7fa Binary files /dev/null and b/doc/source/devref/images/fullstack_multinode_simulation.png differ diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 390023e3702..0885cd2523b 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -43,7 +43,9 @@ Programming HowTos and Tutorials contribute neutron_api sub_projects + sub_project_guidelines client_command_extensions + alembic_migrations Neutron Internals @@ -53,12 +55,15 @@ Neutron Internals services_and_agents api_layer + quota api_extensions plugin-api db_layer rpc_api + rpc_callbacks layer3 l2_agents + quality_of_service advanced_services oslo-incubator callbacks @@ -70,6 +75,8 @@ Testing :maxdepth: 3 fullstack_testing + testing_coverage + template_model_sync_test Module Reference ---------------- diff --git a/doc/source/devref/layer3.rst b/doc/source/devref/layer3.rst index 1960b5d70f0..809940722ec 100644 --- a/doc/source/devref/layer3.rst +++ b/doc/source/devref/layer3.rst @@ -50,7 +50,7 @@ Neutron logical network setup Neutron logical router setup ---------------------------- -* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html#under_the_hood_openvswitch_scenario1_network +* http://docs.openstack.org/networking-guide/scenario_legacy_ovs.html :: @@ -147,7 +147,7 @@ Neutron Routers are realized in OpenVSwitch Finding the router in ip/ipconfig --------------------------------- -* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html +* http://docs.openstack.org/admin-guide-cloud/networking.html The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent @@ -189,11 +189,11 @@ For example:: Provider Networking ------------------- -Neutron can also be configured to create `provider networks `_ +Neutron can also be configured to create `provider networks `_ Further Reading --------------- -* `Packet Pushers - Neutron Network Implementation on Linux `_ -* `OpenStack Cloud Administrator Guide `_ +* `Packet Pushers - Neutron Network Implementation on Linux `_ +* `OpenStack Cloud Administrator Guide `_ * `Neutron - Layer 3 API extension usage guide `_ * `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ diff --git a/doc/source/devref/linuxbridge_agent.rst b/doc/source/devref/linuxbridge_agent.rst index 8dbe1578833..ef21cf4a8ae 100644 --- a/doc/source/devref/linuxbridge_agent.rst +++ b/doc/source/devref/linuxbridge_agent.rst @@ -6,8 +6,8 @@ This Agent uses the `Linux Bridge `_ to provide L2 connectivity for VM instances running on the compute node to the public network. A graphical illustration of the deployment can be found in -`OpenStack Admin Guide Linux Bridge -`_ +`Networking Guide +`_ In most common deployments, there is a compute and a network node. On both the compute and the network node, the Linux Bridge Agent will manage virtual diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst index a4b2685320a..177520071bc 100644 --- a/doc/source/devref/openvswitch_agent.rst +++ b/doc/source/devref/openvswitch_agent.rst @@ -26,7 +26,6 @@ GRE Tunneling is documented in depth in the `Networking in too much detail `_ by RedHat. - VXLAN Tunnels ------------- @@ -35,6 +34,16 @@ at layer 2 into a UDP header. More information can be found in `The VXLAN wiki page. `_ +Geneve Tunnels +-------------- + +Geneve uses UDP as its transport protocol and is dynamic +in size using extensible option headers. +It is important to note that currently it is only supported in +newer kernels. (kernel >= 3.18, OVS version >=2.4) +More information can be found in the `Geneve RFC document. +`_ + Bridge Management ----------------- @@ -71,6 +80,7 @@ future to support existing VLAN-tagged traffic (coming from NFV VMs for instance) and/or to deal with potential QinQ support natively available in the Open vSwitch. + Further Reading --------------- diff --git a/doc/source/devref/quality_of_service.rst b/doc/source/devref/quality_of_service.rst new file mode 100644 index 00000000000..bd4a8c716dc --- /dev/null +++ b/doc/source/devref/quality_of_service.rst @@ -0,0 +1,357 @@ +================== +Quality of Service +================== + +Quality of Service advanced service is designed as a service plugin. The +service is decoupled from the rest of Neutron code on multiple levels (see +below). + +QoS extends core resources (ports, networks) without using mixins inherited +from plugins but through an ml2 extension driver. + +Details about the DB models, API extension, and use cases can be found here: `qos spec `_ +. + +Service side design +=================== +* neutron.extensions.qos: + base extension + API controller definition. Note that rules are subattributes + of policies and hence embedded into their URIs. + +* neutron.services.qos.qos_plugin: + QoSPlugin, service plugin that implements 'qos' extension, receiving and + handling API calls to create/modify policies and rules. + +* neutron.services.qos.notification_drivers.manager: + the manager that passes object notifications down to every enabled + notification driver. + +* neutron.services.qos.notification_drivers.qos_base: + the interface class for pluggable notification drivers that are used to + update backends about new {create, update, delete} events on any rule or + policy change. + +* neutron.services.qos.notification_drivers.message_queue: + MQ-based reference notification driver which updates agents via messaging + bus, using `RPC callbacks `_. + +* neutron.core_extensions.base: + Contains an interface class to implement core resource (port/network) + extensions. Core resource extensions are then easily integrated into + interested plugins. We may need to have a core resource extension manager + that would utilize those extensions, to avoid plugin modifications for every + new core resource extension. + +* neutron.core_extensions.qos: + Contains QoS core resource extension that conforms to the interface described + above. + +* neutron.plugins.ml2.extensions.qos: + Contains ml2 extension driver that handles core resource updates by reusing + the core_extensions.qos module mentioned above. In the future, we would like + to see a plugin-agnostic core resource extension manager that could be + integrated into other plugins with ease. + + +Supported QoS rule types +------------------------ + +Any plugin or Ml2 mechanism driver can claim support for some QoS rule types by +providing a plugin/driver class property called 'supported_qos_rule_types' that +should return a list of strings that correspond to QoS rule types (for the list +of all rule types, see: neutron.extensions.qos.VALID_RULE_TYPES). + +In the most simple case, the property can be represented by a simple Python +list defined on the class. + +For Ml2 plugin, the list of supported QoS rule types is defined as a common +subset of rules supported by all active mechanism drivers. + +Note: the list of supported rule types reported by core plugin is not enforced +when accessing QoS rule resources. This is mostly because then we would not be +able to create any rules while at least one ml2 driver in gate lacks support +for QoS (at the moment of writing, linuxbridge is such a driver). + + +Database models +--------------- + +QoS design defines the following two conceptual resources to apply QoS rules +for a port or a network: + +* QoS policy +* QoS rule (type specific) + +Each QoS policy contains zero or more QoS rules. A policy is then applied to a +network or a port, making all rules of the policy applied to the corresponding +Neutron resource (for a network, applying a policy means that the policy will +be applied to all ports that belong to it). + +From database point of view, following objects are defined in schema: + +* QosPolicy: directly maps to the conceptual policy resource. +* QosNetworkPolicyBinding, QosPortPolicyBinding: defines attachment between a + Neutron resource and a QoS policy. +* QosBandwidthLimitRule: defines the only rule type available at the moment. + + +All database models are defined under: + +* neutron.db.qos.models + + +QoS versioned objects +--------------------- + +There is a long history of passing database dictionaries directly into business +logic of Neutron. This path is not the one we wanted to take for QoS effort, so +we've also introduced a new objects middleware to encapsulate the database logic +from the rest of the Neutron code that works with QoS resources. For this, we've +adopted oslo.versionedobjects library and introduced a new NeutronObject class +that is a base for all other objects that will belong to the middle layer. +There is an expectation that Neutron will evolve into using objects for all +resources it handles, though that part was obviously out of scope for the QoS +effort. + +Every NeutronObject supports the following operations: + +* get_by_id: returns specific object that is represented by the id passed as an + argument. +* get_objects: returns all objects of the type, potentially with a filter + applied. +* create/update/delete: usual persistence operations. + +Base object class is defined in: + +* neutron.objects.base + +For QoS, new neutron objects were implemented: + +* QosPolicy: directly maps to the conceptual policy resource, as defined above. +* QosBandwidthLimitRule: class that represents the only rule type supported by + initial QoS design. + +Those are defined in: + +* neutron.objects.qos.policy +* neutron.objects.qos.rule + +For QosPolicy neutron object, the following public methods were implemented: + +* get_network_policy/get_port_policy: returns a policy object that is attached + to the corresponding Neutron resource. +* attach_network/attach_port: attach a policy to the corresponding Neutron + resource. +* detach_network/detach_port: detach a policy from the corresponding Neutron + resource. + +In addition to the fields that belong to QoS policy database object itself, +synthetic fields were added to the object that represent lists of rules that +belong to the policy. To get a list of all rules for a specific policy, a +consumer of the object can just access the corresponding attribute via: + +* policy.rules + +Implementation is done in a way that will allow adding a new rule list field +with little or no modifications in the policy object itself. This is achieved +by smart introspection of existing available rule object definitions and +automatic definition of those fields on the policy class. + +Note that rules are loaded in a non lazy way, meaning they are all fetched from +the database on policy fetch. + +For QosRule objects, an extendable approach was taken to allow easy +addition of objects for new rule types. To accomodate this, fields common to +all types are put into a base class called QosRule that is then inherited into +type-specific rule implementations that, ideally, only define additional fields +and some other minor things. + +Note that the QosRule base class is not registered with oslo.versionedobjects +registry, because it's not expected that 'generic' rules should be +instantiated (and to suggest just that, the base rule class is marked as ABC). + +QoS objects rely on some primitive database API functions that are added in: + +* neutron.db.api: those can be reused to fetch other models that do not have + corresponding versioned objects yet, if needed. +* neutron.db.qos.api: contains database functions that are specific to QoS + models. + + +RPC communication +----------------- +Details on RPC communication implemented in reference backend driver are +discussed in `a separate page `_. + +One thing that should be mentioned here explicitly is that RPC callback +endpoints communicate using real versioned objects (as defined by serialization +for oslo.versionedobjects library), not vague json dictionaries. Meaning, +oslo.versionedobjects are on the wire and not just used internally inside a +component. + +One more thing to note is that though RPC interface relies on versioned +objects, it does not yet rely on versioning features the oslo.versionedobjects +library provides. This is because Liberty is the first release where we start +using the RPC interface, so we have no way to get different versions in a +cluster. That said, the versioning strategy for QoS is thought through and +described in `the separate page `_. + +There is expectation that after RPC callbacks are introduced in Neutron, we +will be able to migrate propagation from server to agents for other resources +(f.e. security groups) to the new mechanism. This will need to wait until those +resources get proper NeutronObject implementations. + +The flow of updates is as follows: + +* if a port that is bound to the agent is attached to a QoS policy, then ML2 + plugin detects the change by relying on ML2 QoS extension driver, and + notifies the agent about a port change. The agent proceeds with the + notification by calling to get_device_details() and getting the new port dict + that contains a new qos_policy_id. Each device details dict is passed into l2 + agent extension manager that passes it down into every enabled extension, + including QoS. QoS extension sees that there is a new unknown QoS policy for + a port, so it uses ResourcesPullRpcApi to fetch the current state of the + policy (with all the rules included) from the server. After that, the QoS + extension applies the rules by calling into QoS driver that corresponds to + the agent. +* on existing QoS policy update (it includes any policy or its rules change), + server pushes the new policy object state through ResourcesPushRpcApi + interface. The interface fans out the serialized (dehydrated) object to any + agent that is listening for QoS policy updates. If an agent have seen the + policy before (it is attached to one of the ports it maintains), then it goes + with applying the updates to the port. Otherwise, the agent silently ignores + the update. + + +Agent side design +================= + +To ease code reusability between agents and to avoid the need to patch an agent +for each new core resource extension, pluggable L2 agent extensions were +introduced. They can be especially interesting to third parties that don't want +to maintain their code in Neutron tree. + +Extensions are meant to receive handle_port events, and do whatever they need +with them. + +* neutron.agent.l2.agent_extension: + This module defines an abstract extension interface. + +* neutron.agent.l2.extensions.manager: + This module contains a manager that allows to register multiple extensions, + and passes handle_port events down to all enabled extensions. + +* neutron.agent.l2.extensions.qos + defines QoS L2 agent extension. It receives handle_port and delete_port + events and passes them down into QoS agent backend driver (see below). The + file also defines the QosAgentDriver interface. Note: each backend implements + its own driver. The driver handles low level interaction with the underlying + networking technology, while the QoS extension handles operations that are + common to all agents. + + +Agent backends +-------------- + +At the moment, QoS is supported by Open vSwitch and SR-IOV ml2 drivers. + +Each agent backend defines a QoS driver that implements the QosAgentDriver +interface: + +* Open vSwitch (QosOVSAgentDriver); +* SR-IOV (QosSRIOVAgentDriver). + + +Open vSwitch +~~~~~~~~~~~~ + +Open vSwitch implementation relies on the new ovs_lib OVSBridge functions: + +* get_egress_bw_limit_for_port +* create_egress_bw_limit_for_port +* delete_egress_bw_limit_for_port + +An egress bandwidth limit is effectively configured on the port by setting +the port Interface parameters ingress_policing_rate and +ingress_policing_burst. + +That approach is less flexible than linux-htb, Queues and OvS QoS profiles, +which we may explore in the future, but which will need to be used in +combination with openflow rules. + +SR-IOV +~~~~~~ + +SR-IOV bandwidth limit implementation relies on the new pci_lib function: + +* set_vf_max_rate + +As the name of the function suggests, the limit is applied on a Virtual +Function (VF). + +ip link interface has the following limitation for bandwidth limit: it uses +Mbps as units of bandwidth measurement, not kbps, and does not support float +numbers. So in case the limit is set to something less than 1000 kbps, it's set +to 1 Mbps only. If the limit is set to something that does not divide to 1000 +kbps chunks, then the effective limit is rounded to the nearest integer Mbps +value. + + +Configuration +============= + +To enable the service, the following steps should be followed: + +On server side: + +* enable qos service in service_plugins; +* set the needed notification_drivers in [qos] section (message_queue is the default); +* for ml2, add 'qos' to extension_drivers in [ml2] section. + +On agent side (OVS): + +* add 'qos' to extensions in [agent] section. + + +Testing strategy +================ + +All the code added or extended as part of the effort got reasonable unit test +coverage. + + +Neutron objects +--------------- + +Base unit test classes to validate neutron objects were implemented in a way +that allows code reuse when introducing a new object type. + +There are two test classes that are utilized for that: + +* BaseObjectIfaceTestCase: class to validate basic object operations (mostly + CRUD) with database layer isolated. +* BaseDbObjectTestCase: class to validate the same operations with models in + place and database layer unmocked. + +Every new object implemented on top of one of those classes is expected to +either inherit existing test cases as is, or reimplement it, if it makes sense +in terms of how those objects are implemented. Specific test classes can +obviously extend the set of test cases as they see needed (f.e. you need to +define new test cases for those additional methods that you may add to your +object implementations on top of base semantics common to all neutron objects). + + +Functional tests +---------------- + +Additions to ovs_lib to set bandwidth limits on ports are covered in: + +* neutron.tests.functional.agent.test_ovs_lib + + +API tests +--------- + +API tests for basic CRUD operations for ports, networks, policies, and rules were added in: + +* neutron.tests.api.test_qos diff --git a/doc/source/devref/quota.rst b/doc/source/devref/quota.rst new file mode 100644 index 00000000000..53bd6ce515b --- /dev/null +++ b/doc/source/devref/quota.rst @@ -0,0 +1,332 @@ +================================ +Quota Management and Enforcement +================================ + +Most resources exposed by the Neutron API are subject to quota limits. +The Neutron API exposes an extension for managing such quotas. Quota limits are +enforced at the API layer, before the request is dispatched to the plugin. + +Default values for quota limits are specified in neutron.conf. Admin users +can override those defaults values on a per-tenant basis. Limits are stored +in the Neutron database; if no limit is found for a given resource and tenant, +then the default value for such resource is used. +Configuration-based quota management, where every tenant gets the same quota +limit specified in the configuration file, has been deprecated as of the +Liberty release. + +Please note that Neutron does not support both specification of quota limits +per user and quota management for hierarchical multitenancy (as a matter of +fact Neutron does not support hierarchical multitenancy at all). Also, quota +limits are currently not enforced on RPC interfaces listening on the AMQP +bus. + +Plugin and ML2 drivers are not supposed to enforce quotas for resources they +manage. However, the subnet_allocation [#]_ extension is an exception and will +be discussed below. + +The quota management and enforcement mechanisms discussed here apply to every +resource which has been registered with the Quota engine, regardless of +whether such resource belongs to the core Neutron API or one of its extensions. + +High Level View +--------------- + +There are two main components in the Neutron quota system: + + * The Quota API extension; + * The Quota Engine. + +Both components rely on a quota driver. The neutron codebase currently defines +two quota drivers: + + * neutron.db.quota.driver.DbQuotaDriver + * neutron.quota.ConfDriver + +The latter driver is however deprecated. + +The Quota API extension handles quota management, whereas the Quota Engine +component handles quota enforcement. This API extension is loaded like any +other extension. For this reason plugins must explicitly support it by including +"quotas" in the support_extension_aliases attribute. + +In the Quota API simple CRUD operations are used for managing tenant quotas. +Please note that the current behaviour when deleting a tenant quota is to reset +quota limits for that tenant to configuration defaults. The API +extension does not validate the tenant identifier with the identity service. + +Performing quota enforcement is the responsibility of the Quota Engine. +RESTful API controllers, before sending a request to the plugin, try to obtain +a reservation from the quota engine for the resources specified in the client +request. If the reservation is successful, then it proceeds to dispatch the +operation to the plugin. + +For a reservation to be successful, the total amount of resources requested, +plus the total amount of resources reserved, plus the total amount of resources +already stored in the database should not exceed the tenant's quota limit. + +Finally, both quota management and enforcement rely on a "quota driver" [#]_, +whose task is basically to perform database operations. + +Quota Management +---------------- + +The quota management component is fairly straightforward. + +However, unlike the vast majority of Neutron extensions, it uses it own +controller class [#]_. +This class does not implement the POST operation. List, get, update, and +delete operations are implemented by the usual index, show, update and +delete methods. These method simply call into the quota driver for either +fetching tenant quotas or updating them. + +The _update_attributes method is called only once in the controller lifetime. +This method dynamically updates Neutron's resource attribute map [#]_ so that +an attribute is added for every resource managed by the quota engine. +Request authorisation is performed in this controller, and only 'admin' users +are allowed to modify quotas for tenants. As the neutron policy engine is not +used, it is not possible to configure which users should be allowed to manage +quotas using policy.json. + +The driver operations dealing with quota management are: + + * delete_tenant_quota, which simply removes all entries from the 'quotas' + table for a given tenant identifier; + * update_quota_limit, which adds or updates an entry in the 'quotas' tenant for + a given tenant identifier and a given resource name; + * _get_quotas, which fetches limits for a set of resource and a given tenant + identifier + * _get_all_quotas, which behaves like _get_quotas, but for all tenants. + + +Resource Usage Info +------------------- + +Neutron has two ways of tracking resource usage info: + + * CountableResource, where resource usage is calculated every time quotas + limits are enforced by counting rows in the resource table and reservations + for that resource. + * TrackedResource, which instead relies on a specific table tracking usage + data, and performs explicitly counting only when the data in this table are + not in sync with actual used and reserved resources. + +Another difference between CountableResource and TrackedResource is that the +former invokes a plugin method to count resources. CountableResource should be +therefore employed for plugins which do not leverage the Neutron database. +The actual class that the Neutron quota engine will use is determined by the +track_quota_usage variable in the quota configuration section. If True, +TrackedResource instances will be created, otherwise the quota engine will +use CountableResource instances. +Resource creation is performed by the create_resource_instance factory method +in the neutron.quota.resource module. + +From a performance perspective, having a table tracking resource usage +has some advantages, albeit not fundamental. Indeed the time required for +executing queries to explicitly count objects will increase with the number of +records in the table. On the other hand, using TrackedResource will fetch a +single record, but has the drawback of having to execute an UPDATE statement +once the operation is completed. +Nevertheless, CountableResource instances do not simply perform a SELECT query +on the relevant table for a resource, but invoke a plugin method, which might +execute several statements and sometimes even interacts with the backend +before returning. +Resource usage tracking also becomes important for operational correctness +when coupled with the concept of resource reservation, discussed in another +section of this chapter. + +Tracking quota usage is not as simple as updating a counter every time +resources are created or deleted. +Indeed a quota-limited resource in Neutron can be created in several ways. +While a RESTful API request is the most common one, resources can be created +by RPC handlers listing on the AMQP bus, such as those which create DHCP +ports, or by plugin operations, such as those which create router ports. + +To this aim, TrackedResource instances are initialised with a reference to +the model class for the resource for which they track usage data. During +object initialisation, SqlAlchemy event handlers are installed for this class. +The event handler is executed after a record is inserted or deleted. +As result usage data for that resource and will be marked as 'dirty' once +the operation completes, so that the next time usage data is requested, +it will be synchronised counting resource usage from the database. +Even if this solution has some drawbacks, listed in the 'exceptions and +caveats' section, it is more reliable than solutions such as: + + * Updating the usage counters with the new 'correct' value every time an + operation completes. + * Having a periodic task synchronising quota usage data with actual data in + the Neutron DB. + +Finally, regardless of whether CountableResource or TrackedResource is used, +the quota engine always invokes its count() method to retrieve resource usage. +Therefore, from the perspective of the Quota engine there is absolutely no +difference between CountableResource and TrackedResource. + +Quota Enforcement +----------------- + +**NOTE: The reservation engine is currently not wired into the API controller +as issues have been discovered with multiple workers. For more information +see _bug1468134** + +.. _bug1468134: https://bugs.launchpad.net/neutron/+bug/1486134 + +Before dispatching a request to the plugin, the Neutron 'base' controller [#]_ +attempts to make a reservation for requested resource(s). +Reservations are made by calling the make_reservation method in +neutron.quota.QuotaEngine. +The process of making a reservation is fairly straightforward: + + * Get current resource usages. This is achieved by invoking the count method + on every requested resource, and then retrieving the amount of reserved + resources. + * Fetch current quota limits for requested resources, by invoking the + _get_tenant_quotas method. + * Fetch expired reservations for selected resources. This amount will be + subtracted from resource usage. As in most cases there won't be any + expired reservation, this approach actually requires less DB operations than + doing a sum of non-expired, reserved resources for each request. + * For each resource calculate its headroom, and verify the requested + amount of resource is less than the headroom. + * If the above is true for all resource, the reservation is saved in the DB, + otherwise an OverQuotaLimit exception is raised. + +The quota engine is able to make a reservation for multiple resources. +However, it is worth noting that because of the current structure of the +Neutron API layer, there will not be any practical case in which a reservation +for multiple resources is made. For this reason performance optimisation +avoiding repeating queries for every resource are not part of the current +implementation. + +In order to ensure correct operations, a row-level lock is acquired in +the transaction which creates the reservation. The lock is acquired when +reading usage data. In case of write-set certification failures, +which can occur in active/active clusters such as MySQL galera, the decorator +oslo_db.api.wrap_db_retry will retry the transaction if a DBDeadLock +exception is raised. +While non-locking approaches are possible, it has been found out that, since +a non-locking algorithms increases the chances of collision, the cost of +handling a DBDeadlock is still lower than the cost of retrying the operation +when a collision is detected. A study in this direction was conducted for +IP allocation operations, but the same principles apply here as well [#]_. +Nevertheless, moving away for DB-level locks is something that must happen +for quota enforcement in the future. + +Committing and cancelling a reservation is as simple as deleting the +reservation itself. When a reservation is committed, the resources which +were committed are now stored in the database, so the reservation itself +should be deleted. The Neutron quota engine simply removes the record when +cancelling a reservation (ie: the request failed to complete), and also +marks quota usage info as dirty when the reservation is committed (ie: +the request completed correctly). +Reservations are committed or cancelled by respectively calling the +commit_reservation and cancel_reservation methods in neutron.quota.QuotaEngine. + +Reservations are not perennial. Eternal reservation would eventually exhaust +tenants' quotas because they would never be removed when an API worker crashes +whilst in the middle of an operation. +Reservation expiration is currently set to 120 seconds, and is not +configurable, not yet at least. Expired reservations are not counted when +calculating resource usage. While creating a reservation, if any expired +reservation is found, all expired reservation for that tenant and resource +will be removed from the database, thus avoiding build-up of expired +reservations. + +Setting up Resource Tracking for a Plugin +------------------------------------------ + +By default plugins do not leverage resource tracking. Having the plugin +explicitly declare which resources should be tracked is a precise design +choice aimed at limiting as much as possible the chance of introducing +errors in existing plugins. + +For this reason a plugin must declare which resource it intends to track. +This can be achieved using the tracked_resources decorator available in the +neutron.quota.resource_registry module. +The decorator should ideally be applied to the plugin's __init__ method. + +The decorator accepts in input a list of keyword arguments. The name of the +argument must be a resource name, and the value of the argument must be +a DB model class. For example: + +:: + @resource_registry.tracked_resources(network=models_v2.Network, + port=models_v2.Port, + subnet=models_v2.Subnet, + subnetpool=models_v2.SubnetPool) + +Will ensure network, port, subnet and subnetpool resources are tracked. +In theory, it is possible to use this decorator multiple times, and not +exclusively to __init__ methods. However, this would eventually lead to +code readability and maintainability problems, so developers are strongly +encourage to apply this decorator exclusively to the plugin's __init__ +method (or any other method which is called by the plugin only once +during its initialization). + +Notes for Implementors of RPC Interfaces and RESTful Controllers +------------------------------------------------------------------------------- + +Neutron unfortunately does not have a layer which is called before dispatching +the operation from the plugin which can be leveraged both from RESTful and +RPC over AMQP APIs. In particular the RPC handlers call straight into the +plugin, without doing any request authorisation or quota enforcement. + +Therefore RPC handlers must explicitly indicate if they are going to call the +plugin to create or delete any sort of resources. This is achieved in a simple +way, by ensuring modified resources are marked as dirty after the RPC handler +execution terminates. To this aim developers can use the mark_resources_dirty +decorator available in the module neutron.quota.resource_registry. + +The decorator would scan the whole list of registered resources, and store +the dirty status for their usage trackers in the database for those resources +for which items have been created or destroyed during the plugin operation. + +Exceptions and Caveats +----------------------- + +Please be aware of the following limitations of the quota enforcement engine: + + * Subnet allocation from subnet pools, in particularly shared pools, is also + subject to quota limit checks. However this checks are not enforced by the + quota engine, but trough a mechanism implemented in the + neutron.ipam.subnetalloc module. This is because the Quota engine is not + able to satisfy the requirements for quotas on subnet allocation. + * The quota engine also provides a limit_check routine which enforces quota + checks without creating reservations. This way of doing quota enforcement + is extremely unreliable and superseded by the reservation mechanism. It + has not been removed to ensure off-tree plugins and extensions which leverage + are not broken. + * SqlAlchemy events might not be the most reliable way for detecting changes + in resource usage. Since the event mechanism monitors the data model class, + it is paramount for a correct quota enforcement, that resources are always + created and deleted using object relational mappings. For instance, deleting + a resource with a query.delete call, will not trigger the event. SQLAlchemy + events should be considered as a temporary measure adopted as Neutron lacks + persistent API objects. + * As CountableResource instance do not track usage data, when making a + reservation no write-intent lock is acquired. Therefore the quota engine + with CountableResource is not concurrency-safe. + * The mechanism for specifying for which resources enable usage tracking + relies on the fact that the plugin is loaded before quota-limited resources + are registered. For this reason it is not possible to validate whether a + resource actually exists or not when enabling tracking for it. Developers + should pay particular attention into ensuring resource names are correctly + specified. + * The code assumes usage trackers are a trusted source of truth: if they + report a usage counter and the dirty bit is not set, that counter is + correct. If it's dirty than surely that counter is out of sync. + This is not very robust, as there might be issues upon restart when toggling + the use_tracked_resources configuration variable, as stale counters might be + trusted upon for making reservations. Also, the same situation might occur + if a server crashes after the API operation is completed but before the + reservation is committed, as the actual resource usage is changed but + the corresponding usage tracker is not marked as dirty. + +References +---------- + +.. [#] Subnet allocation extension: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/subnetallocation.py +.. [#] DB Quota driver class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/quota_db.py#n33 +.. [#] Quota API extension controller: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/quotasv2.py#n40 +.. [#] Neutron resource attribute map: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/attributes.py#n639 +.. [#] Base controller class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py#n50 +.. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-February/057534.html diff --git a/doc/source/devref/rpc_callbacks.rst b/doc/source/devref/rpc_callbacks.rst new file mode 100644 index 00000000000..f72672482b3 --- /dev/null +++ b/doc/source/devref/rpc_callbacks.rst @@ -0,0 +1,187 @@ +================================= +Neutron Messaging Callback System +================================= + +Neutron already has a callback system [link-to: callbacks.rst] for +in-process resource callbacks where publishers and subscribers are able +to publish and subscribe for resource events. + +This system is different, and is intended to be used for inter-process +callbacks, via the messaging fanout mechanisms. + +In Neutron, agents may need to subscribe to specific resource details which +may change over time. And the purpose of this messaging callback system +is to allow agent subscription to those resources without the need to extend +modify existing RPC calls, or creating new RPC messages. + +A few resource which can benefit of this system: + +* QoS policies; +* Security Groups. + +Using a remote publisher/subscriber pattern, the information about such +resources could be published using fanout messages to all interested nodes, +minimizing messaging requests from agents to server since the agents +get subscribed for their whole lifecycle (unless they unsubscribe). + +Within an agent, there could be multiple subscriber callbacks to the same +resource events, the resources updates would be dispatched to the subscriber +callbacks from a single message. Any update would come in a single message, +doing only a single oslo versioned objects deserialization on each receiving +agent. + +This publishing/subscription mechanism is highly dependent on the format +of the resources passed around. This is why the library only allows +versioned objects to be published and subscribed. Oslo versioned objects +allow object version down/up conversion. #[vo_mkcompat]_ #[vo_mkcptests]_ + +For the VO's versioning schema look here: #[vo_versioning]_ + +versioned_objects serialization/deserialization with the +obj_to_primitive(target_version=..) and primitive_to_obj() #[ov_serdes]_ +methods is used internally to convert/retrieve objects before/after messaging. + +Considering rolling upgrades, there are several scenarios to look at: + +* publisher (generally neutron-server or a service) and subscriber (agent) + know the same version of the objects, so they serialize, and deserialize + without issues. + +* publisher knows (and sends) an older version of the object, subscriber + will get the object updated to latest version on arrival before any + callback is called. + +* publisher sends a newer version of the object, subscriber won't be able + to deserialize the object, in this case (PLEASE DISCUSS), we can think of two + strategies: + + +The strategy for upgrades will be: + During upgrades, we pin neutron-server to a compatible version for resource + fanout updates, and the server sends both the old, and the newer version. + The new agents process updates, taking the newer version of the resource + fanout updates. When the whole system upgraded, we un-pin the compatible + version fanout. + +Serialized versioned objects look like:: + + {'versioned_object.version': '1.0', + 'versioned_object.name': 'QoSPolicy', + 'versioned_object.data': {'rules': [ + {'versioned_object.version': '1.0', + 'versioned_object.name': 'QoSBandwidthLimitRule', + 'versioned_object.data': {'name': u'a'}, + 'versioned_object.namespace': 'versionedobjects'} + ], + 'uuid': u'abcde', + 'name': u'aaa'}, + 'versioned_object.namespace': 'versionedobjects'} + +Topic names for every resource type RPC endpoint +================================================ + +neutron-vo-- + +In the future, we may want to get oslo messaging to support subscribing +topics dynamically, then we may want to use: + +neutron-vo--- instead, + +or something equivalent which would allow fine granularity for the receivers +to only get interesting information to them. + +Subscribing to resources +======================== + +Imagine that you have agent A, which just got to handle a new port, which +has an associated security group, and QoS policy. + +The agent code processing port updates may look like:: + + from neutron.api.rpc.callbacks.consumer import registry + from neutron.api.rpc.callbacks import events + from neutron.api.rpc.callbacks import resources + + + def process_resource_updates(resource_type, resource, event_type): + + # send to the right handler which will update any control plane + # details related to the updated resource... + + + def subscribe_resources(): + registry.subscribe(process_resource_updates, resources.SEC_GROUP) + + registry.subscribe(process_resource_updates, resources.QOS_POLICY) + + def port_update(port): + + # here we extract sg_id and qos_policy_id from port.. + + sec_group = registry.pull(resources.SEC_GROUP, sg_id) + qos_policy = registry.pull(resources.QOS_POLICY, qos_policy_id) + + +The relevant function is: + +* subscribe(callback, resource_type): subscribes callback to a resource type. + + +The callback function will receive the following arguments: + +* resource_type: the type of resource which is receiving the update. +* resource: resource of supported object +* event_type: will be one of CREATED, UPDATED, or DELETED, see + neutron.api.rpc.callbacks.events for details. + +With the underlaying oslo_messaging support for dynamic topics on the receiver +we cannot implement a per "resource type + resource id" topic, rabbitmq seems +to handle 10000's of topics without suffering, but creating 100's of +oslo_messaging receivers on different topics seems to crash. + +We may want to look into that later, to avoid agents receiving resource updates +which are uninteresting to them. + +Unsubscribing from resources +============================ + +To unsubscribe registered callbacks: + +* unsubscribe(callback, resource_type): unsubscribe from specific resource type. +* unsubscribe_all(): unsubscribe from all resources. + + +Sending resource events +======================= + +On the server side, resource updates could come from anywhere, a service plugin, +an extension, anything that updates, creates, or destroys the resource and that +is of any interest to subscribed agents. + +The server/publisher side may look like:: + + from neutron.api.rpc.callbacks.producer import registry + from neutron.api.rpc.callbacks import events + + def create_qos_policy(...): + policy = fetch_policy(...) + update_the_db(...) + registry.push(policy, events.CREATED) + + def update_qos_policy(...): + policy = fetch_policy(...) + update_the_db(...) + registry.push(policy, events.UPDATED) + + def delete_qos_policy(...): + policy = fetch_policy(...) + update_the_db(...) + registry.push(policy, events.DELETED) + + +References +========== +.. [#ov_serdes] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/tests/test_objects.py#L621 +.. [#vo_mkcompat] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/base.py#L460 +.. [#vo_mkcptests] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/tests/test_objects.py#L111 +.. [#vo_versioning] https://github.com/openstack/oslo.versionedobjects/blob/master/oslo_versionedobjects/base.py#L236 diff --git a/doc/source/devref/security_group_api.rst b/doc/source/devref/security_group_api.rst index 750c744f362..c888424f7ba 100644 --- a/doc/source/devref/security_group_api.rst +++ b/doc/source/devref/security_group_api.rst @@ -29,7 +29,7 @@ running on the compute nodes, and modifying the IPTables rules on each hyperviso * `Plugin RPC classes `_ - * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes + * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API * `Agent RPC classes `_ @@ -43,8 +43,8 @@ IPTables Driver * ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` -* ``prepare_port_filter`` `appends the port to an internal dictionary `_, ``filtered_ports`` which is used to track the internal state. +* ``prepare_port_filter`` appends the port to an internal dictionary, ``filtered_ports`` which is used to track the internal state. * Each security group has a `chain `_ in Iptables. -* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements `_ +* The ``IptablesFirewallDriver`` has a method to convert security group rules into iptables statements. diff --git a/doc/source/devref/sub_project_guidelines.rst b/doc/source/devref/sub_project_guidelines.rst new file mode 100644 index 00000000000..3c997f1ee95 --- /dev/null +++ b/doc/source/devref/sub_project_guidelines.rst @@ -0,0 +1,148 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Sub-Project Guidelines +====================== + +This document provides guidance for those who maintain projects that consume +main neutron or neutron advanced services repositories as a dependency. It is +not meant to describe projects that are not tightly coupled with Neutron code. + +Code Reuse +---------- + +At all times, avoid using any Neutron symbols that are explicitly marked as +private (those have an underscore at the start of their names). + +Oslo Incubator +~~~~~~~~~~~~~~ + +Don't ever reuse neutron code that comes from oslo-incubator in your +subprojects. For neutron repository, the code is usually located under the +following path: neutron.openstack.common.* + +If you need any oslo-incubator code in your repository, copy it into your +repository from oslo-incubator and then use it from there. + +Neutron team does not maintain any backwards compatibility strategy for the +code subtree and can break anyone who relies on it at any time. + +Requirements +------------ + +Neutron dependency +~~~~~~~~~~~~~~~~~~ + +Subprojects usually depend on neutron repositories, by using -e git://... +schema to define such a dependency. The dependency *must not* be present in +requirements lists though, and instead belongs to tox.ini deps section. This is +because next pbr library releases do not guarantee -e git://... dependencies +will work. + +You may still put some versioned neutron dependency in your requirements list +to indicate the dependency for anyone who packages your subproject. + +Explicit dependencies +~~~~~~~~~~~~~~~~~~~~~ + +Each neutron project maintains its own lists of requirements. Subprojects that +depend on neutron while directly using some of those libraries that neutron +maintains as its dependencies must not rely on the fact that neutron will pull +the needed dependencies for them. Direct library usage requires that this +library is mentioned in requirements lists of the subproject. + +The reason to duplicate those dependencies is that neutron team does not stick +to any backwards compatibility strategy in regards to requirements lists, and +is free to drop any of those dependencies at any time, breaking anyone who +could rely on those libraries to be pulled by neutron itself. + +Automated requirements updates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At all times, subprojects that use neutron as a dependency should make sure +their dependencies do not conflict with neutron's ones. + +Core neutron projects maintain their requirements lists by utilizing a +so-called proposal bot. To keep your subproject in sync with neutron, it is +highly recommended that you register your project in +openstack/requirements:projects.txt file to enable the bot to update +requirements for you. + +Once a subproject opts in global requirements synchronization, it should enable +check-requirements jobs in project-config. For example, see `this patch +`_. + +Stable branches +--------------- + +Stable branches for libraries should be created at the same time when +corresponding neutron stable branches are cut off. This is to avoid situations +when a postponed cut-off results in a stable branch that contains some patches +that belong to the next release. This would require reverting patches, and this +is something you should avoid. + +Make sure your neutron dependency uses corresponding stable branch for neutron, +not master. + +Note that to keep requirements in sync with core neutron repositories in stable +branches, you should make sure that your project is registered in +openstack/requirements:projects.txt *for the branch in question*. + +Subproject stable branches are supervised by horizontal `neutron-stable-maint +team `_. + +More info on stable branch process can be found on `the following page +`_. + +Releases +-------- + +It is suggested that sub-projects release new tarballs on PyPI from time to +time, especially for stable branches. It will make the life of packagers and +other consumers of your code easier. + +It is highly suggested that you do not strip pieces of the source tree (tests, +executables, tools) before releasing on PyPI: those missing pieces may be +needed to validate the package, or make the packaging easier or more complete. +As a rule of thumb, don't strip anything from the source tree unless completely +needed. + +Sub-Project Release Process +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To release a sub-project, follow the following steps: + +* Only members of the `neutron-release + `_ gerrit group can + do releases. Make sure you talk to a member of neutron-release to perform + your release. +* For projects which have not moved to post-versioning, we need to push an + alpha tag to avoid pbr complaining. The neutron-release group will handle + this. +* Modify setup.cfg to remove the version (if you have one), which moves your + project to post-versioning, similar to all the other Neutron projects. You + can skip this step if you don't have a version in setup.cfg. +* Have neutron-release push the tag to gerrit. +* Have neutron-release `tag the release + `_, + which will release the code to PyPi. diff --git a/doc/source/devref/sub_projects.rst b/doc/source/devref/sub_projects.rst index 4b2aa44147a..a53333ff8a9 100644 --- a/doc/source/devref/sub_projects.rst +++ b/doc/source/devref/sub_projects.rst @@ -67,6 +67,9 @@ working on testing. By being included, the project accepts oversight by the TC as a part of being in OpenStack, and also accepts oversight by the Neutron PTL. +It is also assumed the respective review teams will make sure their projects +stay in line with `current best practices `_. + Inclusion Criteria ------------------ @@ -100,6 +103,10 @@ repo but are summarized here to describe the functionality they provide. +-------------------------------+-----------------------+ | group-based-policy_ | intent | +-------------------------------+-----------------------+ +| kuryr_ | docker | ++-------------------------------+-----------------------+ +| networking-ale-omniswitch_ | ml2 | ++-------------------------------+-----------------------+ | networking-arista_ | ml2,l3 | +-------------------------------+-----------------------+ | networking-bagpipe-l2_ | ml2 | @@ -134,6 +141,8 @@ repo but are summarized here to describe the functionality they provide. +-------------------------------+-----------------------+ | networking-ofagent_ | ml2 | +-------------------------------+-----------------------+ +| networking-onos_ | ml2 | ++-------------------------------+-----------------------+ | networking-ovn_ | ml2 | +-------------------------------+-----------------------+ | networking-ovs-dpdk_ | ml2 | @@ -164,13 +173,23 @@ Functionality legend - vpn: a VPN service plugin; - lb: a Load Balancer service plugin; - intent: a service plugin that provides a declarative API to realize networking; +- docker: a Docker network plugin that uses Neutron to provide networking services to Docker containers; + +.. _networking-ale-omniswitch: + +ALE Omniswitch +++++++++++++++ + +* Git: https://git.openstack.org/cgit/openstack/networking-ale-omniswitch +* Launchpad: https://launchpad.net/networking-ale-omniswitch +* Pypi: https://pypi.python.org/pypi/networking-ale-omniswitch .. _networking-arista: Arista ++++++ -* Git: https://git.openstack.org/cgit/stackforge/networking-arista +* Git: https://git.openstack.org/cgit/openstack/networking-arista * Launchpad: https://launchpad.net/networking-arista * Pypi: https://pypi.python.org/pypi/networking-arista @@ -212,7 +231,7 @@ Brocade Cisco +++++ -* Git: https://git.openstack.org/cgit/stackforge/networking-cisco +* Git: https://git.openstack.org/cgit/openstack/networking-cisco * Launchpad: https://launchpad.net/networking-cisco * PyPI: https://pypi.python.org/pypi/networking-cisco @@ -268,6 +287,15 @@ IBM SDNVE * Git: https://git.openstack.org/cgit/stackforge/networking-ibm * Launchpad: https://launchpad.net/networking-ibm +.. _kuryr: + +Kuryr ++++++ + +* Git: https://git.openstack.org/cgit/openstack/kuryr/ +* Launchpad: https://launchpad.net/kuryr +* PyPI: https://pypi.python.org/pypi/kuryr/ + .. _networking-l2gw: L2 Gateway @@ -326,6 +354,15 @@ OpenFlow Agent (ofagent) * Launchpad: https://launchpad.net/networking-ofagent * PyPI: https://pypi.python.org/pypi/networking-ofagent +.. _networking-onos: + +Open Network Operating System (onos) +++++++++++++++++++++++++++++++++++++ + +* Git: https://git.openstack.org/cgit/openstack/networking-onos +* Launchpad: https://launchpad.net/networking-onos +* PyPI: https://pypi.python.org/pypi/networking-onos + .. _networking-ovn: Open Virtual Network @@ -348,7 +385,7 @@ Open DPDK PLUMgrid ++++++++ -* Git: https://git.openstack.org/cgit/stackforge/networking-plumgrid +* Git: https://git.openstack.org/cgit/openstack/networking-plumgrid * Launchpad: https://launchpad.net/networking-plumgrid * PyPI: https://pypi.python.org/pypi/networking-plumgrid diff --git a/doc/source/devref/template_model_sync_test.rst b/doc/source/devref/template_model_sync_test.rst new file mode 100644 index 00000000000..43f7b87c110 --- /dev/null +++ b/doc/source/devref/template_model_sync_test.rst @@ -0,0 +1,157 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Template for ModelMigrationSync for external repos +================================================== + +This section contains a template for a test which checks that the Python models +for database tables are synchronized with the alembic migrations that create +the database schema. This test should be implemented in all driver/plugin +repositories that were split out from Neutron. + +What does the test do? +---------------------- + +This test compares models with the result of existing migrations. It is based on +`ModelsMigrationsSync +`_ +which is provided by oslo.db and was adapted for Neutron. It compares core +Neutron models and vendor specific models with migrations from Neutron core and +migrations from the driver/plugin repo. This test is functional - it runs against +MySQL and PostgreSQL dialects. The detailed description of this test can be +found in Neutron Database Layer section - `Tests to verify that database +migrations and models are in sync +`_. + +Steps for implementing the test +------------------------------- + +1. Import all models in one place +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create a module ``networking_foo/db/models/head.py`` with the following +content: :: + + from neutron.db.migration.models import head + from networking_foo import models # noqa + # Alternatively, import separate modules here if the models are not in one + # models.py file + + + def get_metadata(): + return head.model_base.BASEV2.metadata + + +2. Implement the test module +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The test uses external.py from Neutron. This file contains lists of table +names, which were moved out of Neutron: :: + + VPNAAS_TABLES = [...] + + LBAAS_TABLES = [...] + + FWAAS_TABLES = [...] + + # Arista ML2 driver Models moved to openstack/networking-arista + REPO_ARISTA_TABLES = [...] + + # Models moved to openstack/networking-cisco + REPO_CISCO_TABLES = [...] + + ... + + TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + ... + + REPO_ARISTA_TABLES + REPO_CISCO_TABLES) + + +Also the test uses **VERSION_TABLE**, it is the name of table in database which +contains revision id of head migration. It is preferred to keep this variable in +``networking_foo/db/migration/alembic_migrations/__init__.py`` so it will be easy +to use in test. + +Create a module ``networking_foo/tests/functional/db/test_migrations.py`` +with the following content: :: + + from oslo_config import cfg + + from neutron.db.migration.alembic_migrations import external + from neutron.db.migration import cli as migration + from neutron.tests.common import base + from neutron.tests.functional.db import test_migrations + + from networking_foo.db.migration import alembic_migrations + from networking_foo.db.models import head + + # EXTERNAL_TABLES should contain all names of tables that are not related to + # current repo. + EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES) + + + class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations): + + def db_sync(self, engine): + cfg.CONF.set_override('connection', engine.url, group='database') + for conf in migration.get_alembic_configs(): + self.alembic_config = conf + self.alembic_config.neutron_config = cfg.CONF + migration.do_alembic_command(conf, 'upgrade', 'heads') + + def get_metadata(self): + return head.get_metadata() + + def include_object(self, object_, name, type_, reflected, compare_to): + if type_ == 'table' and (name == 'alembic' or + name == alembic_migrations.VERSION_TABLE or + name in EXTERNAL_TABLES): + return False + else: + return True + + + class TestModelsMigrationsMysql(_TestModelsMigrationsFoo, + base.MySQLTestCase): + pass + + + class TestModelsMigrationsPsql(_TestModelsMigrationsFoo, + base.PostgreSQLTestCase): + pass + + +3. Add functional requirements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A separate file ``networking_foo/tests/functional/requirements.txt`` should be +created containing the following requirements that are needed for successful +test execution. + +:: + + psutil>=1.1.1,<2.0.0 + psycopg2 + PyMySQL>=0.6.2 # MIT License + + +Example implementation `in VPNaaS `_ diff --git a/doc/source/devref/testing_coverage.rst b/doc/source/devref/testing_coverage.rst new file mode 100644 index 00000000000..bf0b44d95fa --- /dev/null +++ b/doc/source/devref/testing_coverage.rst @@ -0,0 +1,114 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Test Coverage +============= + +The intention is to track merged features or areas of code that lack certain +types of tests. This document may be used both by developers that want to +contribute tests, and operators that are considering adopting a feature. + +Coverage +-------- + +Note that while both API and scenario tests target a deployed OpenStack cloud, +API tests are under the Neutron tree and scenario tests are under the Tempest +tree. + +It is the expectation that API changes involve API tests, agent features +or modifications involve functional tests, and Neutron-wide features involve +fullstack or scenario tests as appropriate. + +The table references tests that explicitly target a feature, and not a job +that is configured to run against a specific backend (Thereby testing it +implicitly). So, for example, while the Linux bridge agent has a job that runs +the API and scenario tests with the Linux bridge agent configured, it does not +have functional tests that target the agent explicitly. The 'gate' column +is about running API/scenario tests with Neutron configured in a certain way, +such as what L2 agent to use or what type of routers to create. + +* V - Merged +* Blank - Not applicable +* X - Absent or lacking +* Patch number - Currently in review +* A name - That person has committed to work on an item + ++------------------------+------------+------------+------------+------------+------------+------------+ +| Area | Unit | Functional | API | Fullstack | Scenario | Gate | ++========================+============+============+============+============+============+============+ +| DVR | Partial* | L3-V OVS-X | V | amuller | X | V | ++------------------------+------------+------------+------------+------------+------------+------------+ +| L3 HA | V | V | X | 196393 | X | X | ++------------------------+------------+------------+------------+------------+------------+------------+ +| L2pop | V | X | | X | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| DHCP HA | V | | | amuller | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| OVS ARP responder | V | X* | | X* | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| OVS agent | V | Partial | | V | | V | ++------------------------+------------+------------+------------+------------+------------+------------+ +| Linux Bridge agent | V | X | | X | | Non-voting | ++------------------------+------------+------------+------------+------------+------------+------------+ +| Metering | V | X | V | X | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| DHCP agent | V | 136834 | | amuller | | V | ++------------------------+------------+------------+------------+------------+------------+------------+ +| rpc_workers | | | | | | X | ++------------------------+------------+------------+------------+------------+------------+------------+ +| Reference ipam driver | V | | | | | X (?) | ++------------------------+------------+------------+------------+------------+------------+------------+ +| MTU advertisement | V | | | X | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| VLAN transparency | V | | X | X | | | ++------------------------+------------+------------+------------+------------+------------+------------+ +| Prefix delegation | V | X | | X | | | ++------------------------+------------+------------+------------+------------+------------+------------+ + +* DVR DB unit tests often assert that internal methods were called instead of + testing functionality. A lot of our unit tests are flawed in this way, + and DVR unit tests especially so. An attempt to remedy this was made + in patch 178880. +* OVS ARP responder cannot be tested at the gate because the gate uses Ubuntu + 14.04 that only packages OVS 2.0. OVS added ARP manipulation support in + version 2.1. +* Prefix delegation doesn't have functional tests for the dibbler and pd + layers, nor for the L3 agent changes. + +Missing Infrastructure +---------------------- + +The following section details missing test *types*. If you want to pick up +an action item, please contact amuller for more context and guidance. + +* The Neutron team would like Rally to persist results over a window of time, + graph and visualize this data, so that reviewers could compare average runs + against a proposed patch. +* It's possible to test RPC methods via the unit tests infrastructure. This was + proposed in patch 162811. The goal is provide developers a light weight + way to rapidly run tests that target the RPC layer, so that a patch that + modifies an RPC method's signature could be verified quickly and locally. +* Neutron currently does not test an in-place upgrade (Upgrading the server + first, followed by agents one machine at a time). We make sure that the RPC + layer remains backwards compatible manually via the review process but have + no CI that verifies this. diff --git a/doc/source/man/neutron-server.rst b/doc/source/man/neutron-server.rst index ea6c4cbbb7c..1540b54bc9c 100644 --- a/doc/source/man/neutron-server.rst +++ b/doc/source/man/neutron-server.rst @@ -60,13 +60,14 @@ OPTIONS FILES ======== -plugins.ini file contains the plugin information -neutron.conf file contains configuration information in the form of python-gflags. +* plugins.ini file contains the plugin information. +* neutron.conf file contains neutron-server's configuration information. SEE ALSO ======== -* `OpenStack Neutron `__ +* `OpenStack Neutron Documents `__ +* `OpenStack Neutron Wiki Page `__ BUGS ==== diff --git a/doc/source/policies/core-reviewers.rst b/doc/source/policies/core-reviewers.rst index d6a54d05fdf..019e0705bea 100644 --- a/doc/source/policies/core-reviewers.rst +++ b/doc/source/policies/core-reviewers.rst @@ -62,9 +62,10 @@ gate and bug triage for their area of focus is under control. The following are the current Neutron Lieutenants. +------------------------+---------------------------+----------------------+ -| Area | Lieutenant | IRC nic | +| Area | Lieutenant | IRC nick | +========================+===========================+======================+ | API and DB | Akihiro Motoki | amotoki | +| +---------------------------+----------------------+ | | Henry Gessau | HenryG | +------------------------+---------------------------+----------------------+ | Built-In Control Plane | Kevin Benton | kevinbenton | @@ -73,6 +74,10 @@ The following are the current Neutron Lieutenants. +------------------------+---------------------------+----------------------+ | Docs | Edgar Magana | emagana | +------------------------+---------------------------+----------------------+ +| Infra | Armando Migliaccio | armax | +| +---------------------------+----------------------+ +| | Doug Wiegley | dougwig | ++------------------------+---------------------------+----------------------+ | L3 | Carl Baldwin | carl_baldwin | +------------------------+---------------------------+----------------------+ | Services | Doug Wiegley | dougwig | @@ -89,6 +94,7 @@ Some notes on the above: * Services includes FWaaS, LBaaS, and VPNaaS. * Note these areas may change as the project evolves due to code refactoring, new feature areas, and libification of certain pieces of code. +* Infra means interactions with infra from a neutron perspective Neutron also consists of several plugins, drivers, and agents that are developed effectively as sub-projects within Neutron in their own git repositories. @@ -100,19 +106,29 @@ updating the core review team for the sub-project's repositories. | Area | Lieutenant | IRC nick | +========================+===========================+======================+ | dragonflow | Eran Gampel | gampel | +| +---------------------------+----------------------+ +| | Gal Sagie | gsagie | ++------------------------+---------------------------+----------------------+ +| kuryr | Antoni Segura Puimedon | apuimedo | +| +---------------------------+----------------------+ | | Gal Sagie | gsagie | +------------------------+---------------------------+----------------------+ | networking-l2gw | Sukhdev Kapur | sukhdev | +------------------------+---------------------------+----------------------+ | networking-midonet | Ryu Ishimoto | ryu_ishimoto | +| +---------------------------+----------------------+ | | Jaume Devesa | devvesa | +| +---------------------------+----------------------+ | | YAMAMOTO Takashi | yamamoto | +------------------------+---------------------------+----------------------+ | networking-odl | Flavio Fernandes | flaviof | +| +---------------------------+----------------------+ | | Kyle Mestery | mestery | +------------------------+---------------------------+----------------------+ | networking-ofagent | YAMAMOTO Takashi | yamamoto | +------------------------+---------------------------+----------------------+ +| networking-onos | Vikram Choudhary | vikram | ++------------------------+---------------------------+----------------------+ | networking-ovn | Russell Bryant | russellb | +------------------------+---------------------------+----------------------+ | networking-plumgrid | Fawad Khaliq | fawadkhaliq | diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini index 115ff86a297..6996ed24fb4 100644 --- a/etc/dhcp_agent.ini +++ b/etc/dhcp_agent.ini @@ -36,11 +36,19 @@ # use_namespaces = True will be enforced. # use_namespaces = True +# In some cases the neutron router is not present to provide the metadata +# IP but the DHCP server can be used to provide this info. Setting this +# value will force the DHCP server to append specific host routes to the +# DHCP request. If this option is set, then the metadata service will be +# activated for all the networks. +# force_metadata = False + # The DHCP server can assist with providing metadata support on isolated # networks. Setting this value to True will cause the DHCP server to append # specific host routes to the DHCP request. The metadata service will only # be activated when the subnet does not contain any router port. The guest # instance must be configured to request host routes via DHCP (Option 121). +# This option doesn't have any effect when force_metadata is set to True. # enable_isolated_metadata = False # Allows for serving metadata requests coming from a dedicated metadata @@ -58,7 +66,8 @@ # Location to store DHCP server config files # dhcp_confs = $state_path/dhcp -# Domain to use for building the hostnames +# Domain to use for building the hostnames. This option will be deprecated in +# a future release. It is being replaced by dns_domain in neutron.conf # dhcp_domain = openstacklocal # Override the default dnsmasq settings with this file diff --git a/etc/l3_agent.ini b/etc/l3_agent.ini index 310b6b59e02..29a20de95e7 100644 --- a/etc/l3_agent.ini +++ b/etc/l3_agent.ini @@ -50,6 +50,11 @@ # and not through this parameter. # ipv6_gateway = +# (StrOpt) Driver used for ipv6 prefix delegation. This needs to be +# an entry point defined in the neutron.agent.linux.pd_drivers namespace. See +# setup.cfg for entry points included with the neutron source. +# prefix_delegation_driver = dibbler + # Indicates that this L3 agent should also handle routers that do not have # an external network gateway configured. This option should be True only # for a single agent in a Neutron deployment, and may be False for all agents diff --git a/etc/neutron.conf b/etc/neutron.conf index ca3baa9cf32..96d574c5fd8 100644 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -75,7 +75,7 @@ # of its entrypoint name. # # service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos # Paste configuration file # api_paste_config = api-paste.ini @@ -114,6 +114,9 @@ # tell dnsmasq to use infinite lease times. # dhcp_lease_duration = 86400 +# Domain to use for building the hostnames +# dns_domain = openstacklocal + # Allow sending resource operation notification to DHCP agent # dhcp_agent_notification = True @@ -178,6 +181,11 @@ # Seconds to regard the agent as down; should be at least twice # report_interval, to be sure the agent is down for good # agent_down_time = 75 + +# Agent starts with admin_state_up=False when enable_new_agents=False. +# In the case, user's resources will not be scheduled automatically to the +# agent until admin changes admin_state_up to True. +# enable_new_agents = True # =========== end of items for agent management extension ===== # =========== items for agent scheduler extension ============= @@ -256,6 +264,17 @@ # # Enable snat by default on external gateway when available # enable_snat_by_default = True +# +# The network type to use when creating the HA network for an HA router. +# By default or if empty, the first 'tenant_network_types' +# is used. This is helpful when the VRRP traffic should use a specific +# network which not the default one. +# ha_network_type = +# Example: ha_network_type = flat +# +# The physical network name with which the HA network can be created. +# ha_network_physical_name = +# Example: ha_network_physical_name = physnet1 # =========== end of items for l3 extension ======= # =========== items for metadata proxy configuration ============== @@ -1017,3 +1036,7 @@ lock_path = $state_path/lock # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) # Deprecated group/name - [DEFAULT]/fake_rabbit # fake_rabbit = false + +[qos] +# Drivers list to use to send the update notification +# notification_drivers = message_queue diff --git a/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini deleted file mode 100644 index 0fab50706df..00000000000 --- a/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini +++ /dev/null @@ -1,50 +0,0 @@ -[sdnve] -# (ListOpt) The IP address of one (or more) SDN-VE controllers -# Default value is: controller_ips = 127.0.0.1 -# Example: controller_ips = 127.0.0.1,127.0.0.2 -# (StrOpt) The integration bridge for OF based implementation -# The default value for integration_bridge is None -# Example: integration_bridge = br-int -# (ListOpt) The interface mapping connecting the integration -# bridge to external network as a list of physical network names and -# interfaces: : -# Example: interface_mappings = default:eth2 -# (BoolOpt) Used to reset the integration bridge, if exists -# The default value for reset_bridge is True -# Example: reset_bridge = False -# (BoolOpt) Used to set the OVS controller as out-of-band -# The default value for out_of_band is True -# Example: out_of_band = False -# -# (BoolOpt) The fake controller for testing purposes -# Default value is: use_fake_controller = False -# (StrOpt) The port number for use with controller -# The default value for the port is 8443 -# Example: port = 8443 -# (StrOpt) The userid for use with controller -# The default value for the userid is admin -# Example: userid = sdnve_user -# (StrOpt) The password for use with controller -# The default value for the password is admin -# Example: password = sdnve_password -# -# (StrOpt) The default type of tenants (and associated resources) -# Available choices are: OVERLAY or OF -# The default value for tenant type is OVERLAY -# Example: default_tenant_type = OVERLAY -# (StrOpt) The string in tenant description that indicates -# Default value for OF tenants: of_signature = SDNVE-OF -# (StrOpt) The string in tenant description that indicates -# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY - -[sdnve_agent] -# (IntOpt) Agent's polling interval in seconds -# polling_interval = 2 -# (StrOpt) What to use for root helper -# The default value: root_helper = 'sudo' -# (BoolOpt) Whether to use rpc or not -# The default value: rpc = True - -[securitygroup] -# The security group is not supported: -# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/etc/neutron/plugins/ml2/ml2_conf.ini b/etc/neutron/plugins/ml2/ml2_conf.ini index 9aad25b7b8b..2cef2c6ffb9 100644 --- a/etc/neutron/plugins/ml2/ml2_conf.ini +++ b/etc/neutron/plugins/ml2/ml2_conf.ini @@ -2,15 +2,16 @@ # (ListOpt) List of network type driver entrypoints to be loaded from # the neutron.ml2.type_drivers namespace. # -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan +# type_drivers = local,flat,vlan,gre,vxlan,geneve +# Example: type_drivers = flat,vlan,gre,vxlan,geneve # (ListOpt) Ordered list of network_types to allocate as tenant # networks. The default value 'local' is useful for single-box testing # but provides no connectivity between hosts. # # tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan +# Example: tenant_network_types = vlan,gre,vxlan,geneve + # (ListOpt) Ordered list of networking mechanism driver entrypoints # to be loaded from the neutron.ml2.mechanism_drivers namespace. @@ -93,6 +94,22 @@ # vxlan_group = # Example: vxlan_group = 239.1.1.1 +[ml2_type_geneve] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of Geneve VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (IntOpt) Geneve encapsulation header size is dynamic, this +# value is used to calculate the maximum MTU for the driver. +# this is the sum of the sizes of the outer ETH+IP+UDP+GENEVE +# header sizes. +# The default size for this field is 50, which is the size of the +# Geneve header without any additional option headers +# +# max_header_size = +# Example: max_header_size = 50 (Geneve headers with no additional options) + [securitygroup] # Controls if neutron security group is enabled or not. # It should be false when you use nova security group. diff --git a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini deleted file mode 100644 index 7900047ad2b..00000000000 --- a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini +++ /dev/null @@ -1,157 +0,0 @@ -[ml2_cisco] - -# (StrOpt) A short prefix to prepend to the VLAN number when creating a -# VLAN interface. For example, if an interface is being created for -# VLAN 2001 it will be named 'q-2001' using the default prefix. -# The total length allowed for the prefix name and VLAN is 32 characters, -# the prefix will be truncated if the total length is greater than 32. -# -# vlan_name_prefix = q- -# Example: vlan_name_prefix = vnet- - -# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. -# svi_round_robin = False - -# -# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. -# This string value must be present in the ml2_conf.ini network_vlan_ranges -# variable. -# -# managed_physical_network = -# Example: managed_physical_network = physnet1 - -# Cisco Nexus Switch configurations. -# Each switch to be managed by Openstack Neutron must be configured here. -# -# Cisco Nexus Switch Format. -# [ml2_mech_cisco_nexus:] -# = (1) -# ssh_port= (2) -# username= (3) -# password= (4) -# nve_src_intf= (5) -# physnet= (6) -# -# (1) For each host connected to a port on the switch, specify the hostname -# and the Nexus physical port (interface) it is connected to. -# Valid intf_type's are 'ethernet' and 'port-channel'. -# The default setting for is 'ethernet' and need not be -# added to this setting. -# (2) The TCP port for connecting via SSH to manage the switch. This is -# port number 22 unless the switch has been configured otherwise. -# (3) The username for logging into the switch to manage it. -# (4) The password for logging into the switch to manage it. -# (5) Only valid if VXLAN overlay is configured and vxlan_global_config is -# set to True. -# The NVE source interface is a loopback interface that is configured on -# the switch with valid /32 IP address. This /32 IP address must be known -# by the transient devices in the transport network and the remote VTEPs. -# This is accomplished by advertising it through a dynamic routing protocol -# in the transport network. (NB: If no nve_src_intf is defined then a -# default setting of 0 (creates "loopback0") will be used.) -# (6) Only valid if VXLAN overlay is configured. -# The physical network name defined in the network_vlan_ranges variable -# (defined under the ml2_type_vlan section) that this switch is controlling. -# The configured 'physnet' is the physical network domain that is connected -# to this switch. The vlan ranges defined in network_vlan_ranges for a -# a physical network are allocated dynamically and are unique per physical -# network. These dynamic vlans may be reused across physical networks. -# -# Example: -# [ml2_mech_cisco_nexus:1.1.1.1] -# compute1=1/1 -# compute2=ethernet:1/2 -# compute3=port-channel:1 -# ssh_port=22 -# username=admin -# password=mySecretPassword -# nve_src_intf=1 -# physnet=physnet1 - -# (StrOpt) A short prefix to prepend to the VLAN number when creating a -# provider VLAN interface. For example, if an interface is being created -# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. -# The total length allowed for the prefix name and VLAN is 32 characters, -# the prefix will be truncated if the total length is greater than 32. -# -# provider_vlan_name_prefix = p- -# Example: provider_vlan_name_prefix = PV- - -# (BoolOpt) A flag indicating whether OpenStack networking should manage the -# creation and removal of VLANs for provider networks on the Nexus -# switches. If the flag is set to False then OpenStack will not create or -# remove VLANs for provider networks, and the administrator needs to -# manage these interfaces manually or by external orchestration. -# -# provider_vlan_auto_create = True - -# (BoolOpt) A flag indicating whether OpenStack networking should manage -# the adding and removing of provider VLANs from trunk ports on the Nexus -# switches. If the flag is set to False then OpenStack will not add or -# remove provider VLANs from trunk ports, and the administrator needs to -# manage these operations manually or by external orchestration. -# -# provider_vlan_auto_trunk = True - -# (BoolOpt) A flag indicating whether OpenStack networking should manage the -# creating and removing of the Nexus switch VXLAN global settings of 'feature -# nv overlay', 'feature vn-segment-vlan-based', 'interface nve 1' and the NVE -# subcommand 'source-interface loopback #'. If the flag is set to False -# (default) then OpenStack will not add or remove these VXLAN settings, and -# the administrator needs to manage these operations manually or by external -# orchestration. -# -# vxlan_global_config = True - -# (BoolOpt) To make Nexus device persistent by running the Nexus -# CLI 'copy run start' after applying successful configurations. -# (default) This flag defaults to False keep consistent with -# existing functionality. -# -# persistent_switch_config = False - -# (IntOpt) Time interval to check the state of the Nexus device. -# (default) This value defaults to 0 seconds which disables this -# functionality. When enabled, 30 seconds is suggested. -# -# switch_heartbeat_time = 0 - -# (IntOpt) Number of times to attempt config replay with switch. -# This variable depends on switch_heartbeat_time being enabled. -# (default) This value defaults to 3 -# -# switch_replay_count = 3 - -[ml2_type_nexus_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN Network IDs that are available for tenant network allocation. -# -# vni_ranges = -# Example: 100:1000,2000:6000 -# -# (ListOpt) Multicast groups for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. Comma separated -# list of min:max ranges of multicast IP's. -# NOTE: must be a valid multicast IP, invalid IP's will be discarded -# -# mcast_ranges = -# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1. - -[ml2_cisco_ucsm] - -# Cisco UCS Manager IP address -# ucsm_ip=1.1.1.1 - -# Username to connect to UCS Manager -# ucsm_username=user - -# Password to connect to UCS Manager -# ucsm_password=password - -# SR-IOV and VM-FEX vendors supported by this plugin -# xxxx:yyyy represents vendor_id:product_id -# supported_pci_devs = ['2222:3333', '4444:5555'] - -# Hostname to Service profile mapping for UCS Manager -# controlled compute hosts -# ucsm_host_list=Hostname1:Serviceprofile1, Hostname2:Serviceprofile2 diff --git a/etc/neutron/plugins/ml2/openvswitch_agent.ini b/etc/neutron/plugins/ml2/openvswitch_agent.ini index 5dd11a8ce88..99cbaca5465 100644 --- a/etc/neutron/plugins/ml2/openvswitch_agent.ini +++ b/etc/neutron/plugins/ml2/openvswitch_agent.ini @@ -57,6 +57,11 @@ # 'ovs-ofctl' is currently the only available choice. # of_interface = ovs-ofctl +# (StrOpt) ovs datapath to use. +# 'system' is the default value and corresponds to the kernel datapath. +# To enable the userspace datapath set this value to 'netdev' +# datapath_type = system + [agent] # Log agent heartbeats from this OVS agent # log_agent_heartbeats = False @@ -133,6 +138,11 @@ # # quitting_rpc_timeout = 10 +# (ListOpt) Extensions list to use +# Example: extensions = qos +# +# extensions = + [securitygroup] # Firewall driver for realizing neutron security group function. # firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/etc/neutron/plugins/nec/nec.ini b/etc/neutron/plugins/nec/nec.ini deleted file mode 100644 index 798a5a61a07..00000000000 --- a/etc/neutron/plugins/nec/nec.ini +++ /dev/null @@ -1,63 +0,0 @@ -# Sample Configurations - -[ovs] -# Do not change this parameter unless you have a good reason to. -# This is the name of the OVS integration bridge. There is one per hypervisor. -# The integration bridge acts as a virtual "patch port". All VM VIFs are -# attached to this bridge and then "patched" according to their network -# connectivity. -# integration_bridge = br-int - -[agent] -# Agent's polling interval in seconds -# polling_interval = 2 - -[securitygroup] -# Firewall driver for realizing neutron security group function -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True - -[ofc] -# Specify OpenFlow Controller Host, Port and Driver to connect. -# host = 127.0.0.1 -# port = 8888 - -# Base URL of OpenFlow Controller REST API. -# It is prepended to a path of each API request. -# path_prefix = - -# Drivers are in neutron/plugins/nec/drivers/ . -# driver = trema - -# PacketFilter is available when it's enabled in this configuration -# and supported by the driver. -# enable_packet_filter = true - -# Support PacketFilter on OFC router interface -# support_packet_filter_on_ofc_router = true - -# Use SSL to connect -# use_ssl = false - -# Key file -# key_file = - -# Certificate file -# cert_file = - -# Disable SSL certificate verification -# insecure_ssl = false - -# Maximum attempts per OFC API request. NEC plugin retries -# API request to OFC when OFC returns ServiceUnavailable (503). -# The value must be greater than 0. -# api_max_attempts = 3 - -[provider] -# Default router provider to use. -# default_router_provider = l3-agent -# List of enabled router providers. -# router_providers = l3-agent,openflow diff --git a/etc/neutron/plugins/plumgrid/plumgrid.ini b/etc/neutron/plugins/plumgrid/plumgrid.ini deleted file mode 100644 index bfe8062ae6d..00000000000 --- a/etc/neutron/plugins/plumgrid/plumgrid.ini +++ /dev/null @@ -1,14 +0,0 @@ -# Config file for Neutron PLUMgrid Plugin - -[plumgriddirector] -# This line should be pointing to the PLUMgrid Director, -# for the PLUMgrid platform. -# director_server= -# director_server_port= -# Authentification parameters for the Director. -# These are the admin credentials to manage and control -# the PLUMgrid Director server. -# username= -# password= -# servertimeout=5 -# driver= diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini deleted file mode 100644 index 560cebccfb4..00000000000 --- a/etc/neutron/plugins/vmware/nsx.ini +++ /dev/null @@ -1,283 +0,0 @@ -[DEFAULT] -# User name for NSX controller -# nsx_user = admin - -# Password for NSX controller -# nsx_password = admin - -# Time before aborting a request on an unresponsive controller (Seconds) -# http_timeout = 75 - -# Maximum number of times a particular request should be retried -# retries = 2 - -# Maximum number of times a redirect response should be followed -# redirects = 2 - -# Comma-separated list of NSX controller endpoints (:). When port -# is omitted, 443 is assumed. This option MUST be specified, e.g.: -# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 - -# UUID of the pre-existing default NSX Transport zone to be used for creating -# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: -# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 - -# (Optional) UUID for the default l3 gateway service to use with this cluster. -# To be specified if planning to use logical routers with external gateways. -# default_l3_gw_service_uuid = - -# (Optional) UUID for the default l2 gateway service to use with this cluster. -# To be specified for providing a predefined gateway tenant for connecting their networks. -# default_l2_gw_service_uuid = - -# (Optional) UUID for the default service cluster. A service cluster is introduced to -# represent a group of gateways and it is needed in order to use Logical Services like -# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this -# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. -# default_service_cluster_uuid = - -# Name of the default interface name to be used on network-gateway. This value -# will be used for any device associated with a network gateway for which an -# interface name was not specified -# nsx_default_interface_name = breth0 - -# Reconnect connection to nsx if not used within this amount of time. -# conn_idle_timeout = 900 - -[quotas] -# number of network gateways allowed per tenant, -1 means unlimited -# quota_network_gateway = 5 - -[nsxv] -# URL for NSXv manager -# manager_uri = https://management_ip - -# User name for NSXv manager -# user = admin - -# Password for NSXv manager -# password = default - -# (Required) Datacenter ID for Edge deployment -# datacenter_moid = - -# (Required) Cluster IDs for clusters containing OpenStack hosts -# cluster_moid = - -# (Optional) Deployment Container ID for NSX Edge deployment -# If not specified, either a default global container will be used, or -# the resource pool and datastore specified below will be used -# deployment_container_id = - -# (Optional) Resource pool ID for NSX Edge deployment -# resource_pool_id = - -# (Optional) Datastore ID for NSX Edge deployment -# datastore_id = - -# (Required) UUID of logic switch for physical network connectivity -# external_network = - -# (Optional) Asynchronous task status check interval -# default is 2000 (millisecond) -# task_status_check_interval = 2000 - -# (Optional) Network scope ID for VXLAN virtual wires -# vdn_scope_id = - -# (Optional) DVS ID for VLANS -# dvs_id = - -# (ListOpt) Define backup edge pool's management range with the four-tuple: -# :[edge_size]::. -# edge_type:'service'(service edge) or 'vdr'(distributed edge). -# edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'. -# -# By default, edge pool manager would manage service edge -# with compact&&large size and distributed edge with large size as following: -# backup_edge_pool = service:large:4:10,service:compact:4:10,vdr:large:4:10 - -# (Optional) Maximum number of sub interfaces supported per vnic in edge -# default is 20 -# maximum_tunnels_per_vnic = 20 - -# Maximum number of API retries -# retries = 10 - -# (Optional) Network ID for management network connectivity -# mgt_net_moid = - -# (Optional) Management network IP address for metadata proxy -# mgt_net_proxy_ips = - -# (Optional) Management network netmask for metadata proxy -# mgt_net_proxy_netmask = - -# (Optional) Management network default gateway for metadata proxy -# mgt_net_default_gateway = - -# (Optional) IP addresses used by Nova metadata service -# nova_metadata_ips = - -# (Optional) TCP Port used by Nova metadata server -# nova_metadata_port = 8775 - -# (Optional) Shared secret to sign metadata requests -# metadata_shared_secret = - -# (Optional) Indicates if Nsxv spoofguard component is used to implement -# port-security feature. -# spoofguard_enabled = True - -# (ListOpt) Ordered list of router_types to allocate as tenant routers. -# It limits the router types that the Nsxv can support for tenants: -# distributed: router is supported by distributed edge at the backend. -# shared: multiple routers share the same service edge at the backend. -# exclusive: router exclusivly occupies one service edge at the backend. -# Nsxv would select the first available router type from tenant_router_types -# list if router-type is not specified. -# If the tenant defines the router type with "--distributed", -# "--router_type exclusive" or "--router_type shared", Nsxv would verify that -# the router type is in tenant_router_types. -# Admin supports all these three router types -# -# tenant_router_types = shared, distributed, exclusive -# Example: tenant_router_types = distributed, shared - -# (Optional) Enable an administrator to configure the edge user and password -# Username to configure for Edge appliance login -# edge_appliance_user = -# (Optional) Password to configure for Edge appliance login -# edge_appliance_password = - -# (Optional) URL for distributed locking coordination resource for lock manager -# This value is passed as a parameter to tooz coordinator. -# By default, value is None and oslo_concurrency is used for single-node -# lock management. -# locking_coordinator_url = - -# (Optional) DHCP lease time -# dhcp_lease_time = 86400 - -[nsx] -# Maximum number of ports for each bridged logical switch -# The recommended value for this parameter varies with NSX version -# Please use: -# NSX 2.x -> 64 -# NSX 3.0, 3.1 -> 5000 -# NSX 3.2 -> 10000 -# max_lp_per_bridged_ls = 5000 - -# Maximum number of ports for each overlay (stt, gre) logical switch -# max_lp_per_overlay_ls = 256 - -# Number of connections to each controller node. -# default is 10 -# concurrent_connections = 10 - -# Number of seconds a generation id should be valid for (default -1 meaning do not time out) -# nsx_gen_timeout = -1 - -# Acceptable values for 'metadata_mode' are: -# - 'access_network': this enables a dedicated connection to the metadata -# proxy for metadata server access via Neutron router. -# - 'dhcp_host_route': this enables host route injection via the dhcp agent. -# This option is only useful if running on a host that does not support -# namespaces otherwise access_network should be used. -# metadata_mode = access_network - -# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) -# default_transport_type = stt - -# Specifies in which mode the plugin needs to operate in order to provide DHCP and -# metadata proxy services to tenant instances. If 'agent' is chosen (default) -# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to -# provide such services. In this mode, the plugin supports API extensions 'agent' -# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), -# the plugin will use NSX logical services for DHCP and metadata proxy. This -# simplifies the deployment model for Neutron, in that the plugin no longer requires -# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode -# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. -# Furthermore, a 'combined' mode is also provided and is used to support existing -# deployments that want to adopt the agentless mode going forward. With this mode, -# existing networks keep being served by the existing infrastructure (thus preserving -# backward compatibility, whereas new networks will be served by the new infrastructure. -# Migration tools are provided to 'move' one network from one model to another; with -# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is -# ignored, as new networks will no longer be scheduled to existing dhcp agents. -# agent_mode = agent - -# Specifies which mode packet replication should be done in. If set to service -# a service node is required in order to perform packet replication. This can -# also be set to source if one wants replication to be performed locally (NOTE: -# usually only useful for testing if one does not want to deploy a service node). -# In order to leverage distributed routers, replication_mode should be set to -# "service". -# replication_mode = service - -[nsx_sync] -# Interval in seconds between runs of the status synchronization task. -# The plugin will aim at resynchronizing operational status for all -# resources in this interval, and it should be therefore large enough -# to ensure the task is feasible. Otherwise the plugin will be -# constantly synchronizing resource status, ie: a new task is started -# as soon as the previous is completed. -# If this value is set to 0, the state synchronization thread for this -# Neutron instance will be disabled. -# state_sync_interval = 10 - -# Random additional delay between two runs of the state synchronization task. -# An additional wait time between 0 and max_random_sync_delay seconds -# will be added on top of state_sync_interval. -# max_random_sync_delay = 0 - -# Minimum delay, in seconds, between two status synchronization requests for NSX. -# Depending on chunk size, controller load, and other factors, state -# synchronization requests might be pretty heavy. This means the -# controller might take time to respond, and its load might be quite -# increased by them. This parameter allows to specify a minimum -# interval between two subsequent requests. -# The value for this parameter must never exceed state_sync_interval. -# If this does, an error will be raised at startup. -# min_sync_req_delay = 1 - -# Minimum number of resources to be retrieved from NSX in a single status -# synchronization request. -# The actual size of the chunk will increase if the number of resources is such -# that using the minimum chunk size will cause the interval between two -# requests to be less than min_sync_req_delay -# min_chunk_size = 500 - -# Enable this option to allow punctual state synchronization on show -# operations. In this way, show operations will always fetch the operational -# status of the resource from the NSX backend, and this might have -# a considerable impact on overall performance. -# always_read_status = False - -[nsx_lsn] -# Pull LSN information from NSX in case it is missing from the local -# data store. This is useful to rebuild the local store in case of -# server recovery -# sync_on_missing_data = False - -[nsx_dhcp] -# (Optional) Comma separated list of additional dns servers. Default is an empty list -# extra_domain_name_servers = - -# Domain to use for building the hostnames -# domain_name = openstacklocal - -# Default DHCP lease time -# default_lease_time = 43200 - -[nsx_metadata] -# IP address used by Metadata server -# metadata_server_address = 127.0.0.1 - -# TCP Port used by Metadata server -# metadata_server_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it MUST match with the configuration used by the Metadata server -# metadata_shared_secret = diff --git a/etc/neutron/plugins/vmware/policy/network-gateways.json b/etc/neutron/plugins/vmware/policy/network-gateways.json deleted file mode 100644 index 48575070898..00000000000 --- a/etc/neutron/plugins/vmware/policy/network-gateways.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "create_network_gateway": "rule:admin_or_owner", - "update_network_gateway": "rule:admin_or_owner", - "delete_network_gateway": "rule:admin_or_owner", - "connect_network": "rule:admin_or_owner", - "disconnect_network": "rule:admin_or_owner", - "create_gateway_device": "rule:admin_or_owner", - "update_gateway_device": "rule:admin_or_owner", - "delete_gateway_device": "rule_admin_or_owner" -} diff --git a/etc/neutron/plugins/vmware/policy/routers.json b/etc/neutron/plugins/vmware/policy/routers.json deleted file mode 100644 index 48665dba836..00000000000 --- a/etc/neutron/plugins/vmware/policy/routers.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "create_router:external_gateway_info:enable_snat": "rule:admin_or_owner", - "create_router:distributed": "rule:admin_or_owner", - "get_router:distributed": "rule:admin_or_owner", - "update_router:external_gateway_info:enable_snat": "rule:admin_or_owner", - "update_router:distributed": "rule:admin_or_owner" -} diff --git a/etc/neutron/rootwrap.d/dibbler.filters b/etc/neutron/rootwrap.d/dibbler.filters new file mode 100644 index 00000000000..eea55252f35 --- /dev/null +++ b/etc/neutron/rootwrap.d/dibbler.filters @@ -0,0 +1,16 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# Filters for the dibbler-based reference implementation of the pluggable +# Prefix Delegation driver. Other implementations using an alternative agent +# should include a similar filter in this folder. + +# prefix_delegation_agent +dibbler-client: CommandFilter, dibbler-client, root diff --git a/etc/neutron/rootwrap.d/nec-plugin.filters b/etc/neutron/rootwrap.d/nec-plugin.filters deleted file mode 100644 index 89c4cfe3558..00000000000 --- a/etc/neutron/rootwrap.d/nec-plugin.filters +++ /dev/null @@ -1,12 +0,0 @@ -# neutron-rootwrap command filters for nodes on which neutron is -# expected to control network -# -# This file should be owned by (and only-writeable by) the root user - -# format seems to be -# cmd-name: filter-name, raw-command, user, args - -[Filters] - -# nec_neutron_agent -ovs-vsctl: CommandFilter, ovs-vsctl, root diff --git a/etc/policy.json b/etc/policy.json index 72756bdb630..9207142582e 100644 --- a/etc/policy.json +++ b/etc/policy.json @@ -1,8 +1,10 @@ { "context_is_admin": "role:admin", - "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "owner": "tenant_id:%(tenant_id)s", + "admin_or_owner": "rule:context_is_admin or rule:owner", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", @@ -62,7 +64,7 @@ "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:allowed_address_pairs": "rule:admin_or_network_owner", - "get_port": "rule:admin_or_owner or rule:context_is_advsvc", + "get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", @@ -76,7 +78,7 @@ "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:allowed_address_pairs": "rule:admin_or_network_owner", - "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", + "delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", @@ -174,5 +176,23 @@ "update_service_profile": "rule:admin_only", "delete_service_profile": "rule:admin_only", "get_service_profiles": "rule:admin_only", - "get_service_profile": "rule:admin_only" + "get_service_profile": "rule:admin_only", + + "get_policy": "rule:regular_user", + "create_policy": "rule:admin_only", + "update_policy": "rule:admin_only", + "delete_policy": "rule:admin_only", + "get_policy_bandwidth_limit_rule": "rule:regular_user", + "create_policy_bandwidth_limit_rule": "rule:admin_only", + "delete_policy_bandwidth_limit_rule": "rule:admin_only", + "update_policy_bandwidth_limit_rule": "rule:admin_only", + "get_rule_type": "rule:regular_user", + + "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only", + "create_rbac_policy": "", + "create_rbac_policy:target_tenant": "rule:restrict_wildcard", + "update_rbac_policy": "rule:admin_or_owner", + "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", + "get_rbac_policy": "rule:admin_or_owner", + "delete_rbac_policy": "rule:admin_or_owner" } diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf index f2d9ce4227e..3a6b11f44dc 100644 --- a/etc/rootwrap.conf +++ b/etc/rootwrap.conf @@ -10,7 +10,7 @@ filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin # Enable logging to syslog # Default value is False diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index 592466720d5..fc0927543e1 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -30,6 +30,8 @@ from neutron.agent.ovsdb import api as ovsdb from neutron.common import exceptions from neutron.i18n import _LE, _LI, _LW from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers.openvswitch.agent.common \ + import constants # Default timeout for ovs-vsctl command DEFAULT_OVS_VSCTL_TIMEOUT = 10 @@ -102,8 +104,11 @@ class BaseOVS(object): self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout self.ovsdb = ovsdb.API.get(self) - def add_bridge(self, bridge_name): - self.ovsdb.add_br(bridge_name).execute() + def add_bridge(self, bridge_name, + datapath_type=constants.OVS_DATAPATH_SYSTEM): + + self.ovsdb.add_br(bridge_name, + datapath_type).execute() br = OVSBridge(bridge_name) # Don't return until vswitchd sets up the internal port br.get_port_ofport(bridge_name) @@ -143,9 +148,10 @@ class BaseOVS(object): class OVSBridge(BaseOVS): - def __init__(self, br_name): + def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM): super(OVSBridge, self).__init__() self.br_name = br_name + self.datapath_type = datapath_type def set_controller(self, controllers): self.ovsdb.set_controller(self.br_name, @@ -171,8 +177,14 @@ class OVSBridge(BaseOVS): self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols, check_error=True) - def create(self): - self.ovsdb.add_br(self.br_name).execute() + def create(self, secure_mode=False): + with self.ovsdb.transaction() as txn: + txn.add( + self.ovsdb.add_br(self.br_name, + datapath_type=self.datapath_type)) + if secure_mode: + txn.add(self.ovsdb.set_fail_mode(self.br_name, + FAILMODE_SECURE)) # Don't return until vswitchd sets up the internal port self.get_port_ofport(self.br_name) @@ -182,7 +194,8 @@ class OVSBridge(BaseOVS): def reset_bridge(self, secure_mode=False): with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.del_br(self.br_name)) - txn.add(self.ovsdb.add_br(self.br_name)) + txn.add(self.ovsdb.add_br(self.br_name, + datapath_type=self.datapath_type)) if secure_mode: txn.add(self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE)) @@ -268,6 +281,10 @@ class OVSBridge(BaseOVS): if 'NXST' not in item) return retval + def dump_all_flows(self): + return [f for f in self.run_ofctl("dump-flows", []).splitlines() + if 'NXST' not in f] + def deferred(self, **kwargs): return DeferredOVSBridge(self, **kwargs) @@ -489,6 +506,36 @@ class OVSBridge(BaseOVS): txn.add(self.ovsdb.db_set('Controller', controller_uuid, *attr)) + def _set_egress_bw_limit_for_port(self, port_name, max_kbps, + max_burst_kbps): + with self.ovsdb.transaction(check_error=True) as txn: + txn.add(self.ovsdb.db_set('Interface', port_name, + ('ingress_policing_rate', max_kbps))) + txn.add(self.ovsdb.db_set('Interface', port_name, + ('ingress_policing_burst', + max_burst_kbps))) + + def create_egress_bw_limit_for_port(self, port_name, max_kbps, + max_burst_kbps): + self._set_egress_bw_limit_for_port( + port_name, max_kbps, max_burst_kbps) + + def get_egress_bw_limit_for_port(self, port_name): + + max_kbps = self.db_get_val('Interface', port_name, + 'ingress_policing_rate') + max_burst_kbps = self.db_get_val('Interface', port_name, + 'ingress_policing_burst') + + max_kbps = max_kbps or None + max_burst_kbps = max_burst_kbps or None + + return max_kbps, max_burst_kbps + + def delete_egress_bw_limit_for_port(self, port_name): + self._set_egress_bw_limit_for_port( + port_name, 0, 0) + def __enter__(self): self.create() return self diff --git a/neutron/agent/dhcp/config.py b/neutron/agent/dhcp/config.py index eefac85d449..1ff185d83f1 100644 --- a/neutron/agent/dhcp/config.py +++ b/neutron/agent/dhcp/config.py @@ -24,6 +24,8 @@ DHCP_AGENT_OPTS = [ help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("Support Metadata requests on isolated networks.")), + cfg.BoolOpt('force_metadata', default=False, + help=_("Force to use DHCP to get Metadata on all networks.")), cfg.BoolOpt('enable_metadata_network', default=False, help=_("Allows for serving metadata requests from a " "dedicated network. Requires " @@ -38,7 +40,11 @@ DHCP_OPTS = [ help=_('Location to store DHCP server config files')), cfg.StrOpt('dhcp_domain', default='openstacklocal', - help=_('Domain to use for building the hostnames')), + help=_('Domain to use for building the hostnames.' + 'This option is deprecated. It has been moved to ' + 'neutron.conf as dns_domain. It will removed from here ' + 'in a future release'), + deprecated_for_removal=True), ] DNSMASQ_OPTS = [ diff --git a/neutron/plugins/ibm/__init__.py b/neutron/agent/l2/__init__.py similarity index 100% rename from neutron/plugins/ibm/__init__.py rename to neutron/agent/l2/__init__.py diff --git a/neutron/agent/l2/agent_extension.py b/neutron/agent/l2/agent_extension.py new file mode 100644 index 00000000000..4144d5fbe5a --- /dev/null +++ b/neutron/agent/l2/agent_extension.py @@ -0,0 +1,59 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AgentCoreResourceExtension(object): + """Define stable abstract interface for agent extensions. + + An agent extension extends the agent core functionality. + """ + + def initialize(self, connection, driver_type): + """Perform agent core resource extension initialization. + + :param connection: RPC connection that can be reused by the extension + to define its RPC endpoints + :param driver_type: a string that defines the agent type to the + extension. Can be used to choose the right backend + implementation. + + Called after all extensions have been loaded. + No port handling will be called before this method. + """ + + @abc.abstractmethod + def handle_port(self, context, data): + """Handle agent extension for port. + + This can be called on either create or update, depending on the + code flow. Thus, it's this function's responsibility to check what + actually changed. + + :param context - rpc context + :param data - port data + """ + + @abc.abstractmethod + def delete_port(self, context, data): + """Delete port from agent extension. + + :param context - rpc context + :param data - port data + """ diff --git a/neutron/plugins/ibm/agent/__init__.py b/neutron/agent/l2/extensions/__init__.py similarity index 100% rename from neutron/plugins/ibm/agent/__init__.py rename to neutron/agent/l2/extensions/__init__.py diff --git a/neutron/agent/l2/extensions/manager.py b/neutron/agent/l2/extensions/manager.py new file mode 100644 index 00000000000..bc8f3006f07 --- /dev/null +++ b/neutron/agent/l2/extensions/manager.py @@ -0,0 +1,85 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log +import stevedore + +from neutron.i18n import _LE, _LI + +LOG = log.getLogger(__name__) + + +L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions' +L2_AGENT_EXT_MANAGER_OPTS = [ + cfg.ListOpt('extensions', + default=[], + help=_('Extensions list to use')), +] + + +def register_opts(conf): + conf.register_opts(L2_AGENT_EXT_MANAGER_OPTS, 'agent') + + +class AgentExtensionsManager(stevedore.named.NamedExtensionManager): + """Manage agent extensions.""" + + def __init__(self, conf): + super(AgentExtensionsManager, self).__init__( + L2_AGENT_EXT_MANAGER_NAMESPACE, conf.agent.extensions, + invoke_on_load=True, name_order=True) + LOG.info(_LI("Loaded agent extensions: %s"), self.names()) + + def initialize(self, connection, driver_type): + """Initialize enabled L2 agent extensions. + + :param connection: RPC connection that can be reused by extensions to + define their RPC endpoints + :param driver_type: a string that defines the agent type to the + extension. Can be used by the extension to choose + the right backend implementation. + """ + # Initialize each agent extension in the list. + for extension in self: + LOG.info(_LI("Initializing agent extension '%s'"), extension.name) + extension.obj.initialize(connection, driver_type) + + def handle_port(self, context, data): + """Notify all agent extensions to handle port.""" + for extension in self: + try: + extension.obj.handle_port(context, data) + # TODO(QoS) add agent extensions exception and catch them here + except AttributeError: + LOG.exception( + _LE("Agent Extension '%(name)s' failed " + "while handling port update"), + {'name': extension.name} + ) + + def delete_port(self, context, data): + """Notify all agent extensions to delete port.""" + for extension in self: + try: + extension.obj.delete_port(context, data) + # TODO(QoS) add agent extensions exception and catch them here + # instead of AttributeError + except AttributeError: + LOG.exception( + _LE("Agent Extension '%(name)s' failed " + "while handling port deletion"), + {'name': extension.name} + ) diff --git a/neutron/agent/l2/extensions/qos.py b/neutron/agent/l2/extensions/qos.py new file mode 100644 index 00000000000..13e94cb290d --- /dev/null +++ b/neutron/agent/l2/extensions/qos.py @@ -0,0 +1,149 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import collections + +from oslo_concurrency import lockutils +import six + +from neutron.agent.l2 import agent_extension +from neutron.api.rpc.callbacks.consumer import registry +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron import manager + + +@six.add_metaclass(abc.ABCMeta) +class QosAgentDriver(object): + """Defines stable abstract interface for QoS Agent Driver. + + QoS Agent driver defines the interface to be implemented by Agent + for applying QoS Rules on a port. + """ + + @abc.abstractmethod + def initialize(self): + """Perform QoS agent driver initialization. + """ + + @abc.abstractmethod + def create(self, port, qos_policy): + """Apply QoS rules on port for the first time. + + :param port: port object. + :param qos_policy: the QoS policy to be applied on port. + """ + #TODO(QoS) we may want to provide default implementations of calling + #delete and then update + + @abc.abstractmethod + def update(self, port, qos_policy): + """Apply QoS rules on port. + + :param port: port object. + :param qos_policy: the QoS policy to be applied on port. + """ + + @abc.abstractmethod + def delete(self, port, qos_policy): + """Remove QoS rules from port. + + :param port: port object. + :param qos_policy: the QoS policy to be removed from port. + """ + + +class QosAgentExtension(agent_extension.AgentCoreResourceExtension): + SUPPORTED_RESOURCES = [resources.QOS_POLICY] + + def initialize(self, connection, driver_type): + """Perform Agent Extension initialization. + + """ + self.resource_rpc = resources_rpc.ResourcesPullRpcApi() + self.qos_driver = manager.NeutronManager.load_class_for_provider( + 'neutron.qos.agent_drivers', driver_type)() + self.qos_driver.initialize() + + # we cannot use a dict of sets here because port dicts are not hashable + self.qos_policy_ports = collections.defaultdict(dict) + self.known_ports = set() + + registry.subscribe(self._handle_notification, resources.QOS_POLICY) + self._register_rpc_consumers(connection) + + def _register_rpc_consumers(self, connection): + endpoints = [resources_rpc.ResourcesPushRpcCallback()] + for resource_type in self.SUPPORTED_RESOURCES: + # we assume that neutron-server always broadcasts the latest + # version known to the agent + topic = resources_rpc.resource_type_versioned_topic(resource_type) + connection.create_consumer(topic, endpoints, fanout=True) + + @lockutils.synchronized('qos-port') + def _handle_notification(self, qos_policy, event_type): + # server does not allow to remove a policy that is attached to any + # port, so we ignore DELETED events. Also, if we receive a CREATED + # event for a policy, it means that there are no ports so far that are + # attached to it. That's why we are interested in UPDATED events only + if event_type == events.UPDATED: + self._process_update_policy(qos_policy) + + @lockutils.synchronized('qos-port') + def handle_port(self, context, port): + """Handle agent QoS extension for port. + + This method applies a new policy to a port using the QoS driver. + Update events are handled in _handle_notification. + """ + port_id = port['port_id'] + qos_policy_id = port.get('qos_policy_id') + if qos_policy_id is None: + self._process_reset_port(port) + return + + #Note(moshele) check if we have seen this port + #and it has the same policy we do nothing. + if (port_id in self.known_ports and + port_id in self.qos_policy_ports[qos_policy_id]): + return + + self.qos_policy_ports[qos_policy_id][port_id] = port + self.known_ports.add(port_id) + qos_policy = self.resource_rpc.pull( + context, resources.QOS_POLICY, qos_policy_id) + self.qos_driver.create(port, qos_policy) + + def delete_port(self, context, port): + self._process_reset_port(port) + + def _process_update_policy(self, qos_policy): + for port_id, port in self.qos_policy_ports[qos_policy.id].items(): + # TODO(QoS): for now, just reflush the rules on the port. Later, we + # may want to apply the difference between the rules lists only. + self.qos_driver.delete(port, None) + self.qos_driver.update(port, qos_policy) + + def _process_reset_port(self, port): + port_id = port['port_id'] + if port_id in self.known_ports: + self.known_ports.remove(port_id) + for qos_policy_id, port_dict in self.qos_policy_ports.items(): + if port_id in port_dict: + del port_dict[port_id] + self.qos_driver.delete(port, None) + return diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 3bfcee9e496..99921846c3e 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -36,6 +36,7 @@ from neutron.agent.l3 import router_info as rinf from neutron.agent.l3 import router_processing_queue as queue from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib +from neutron.agent.linux import pd from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.callbacks import events @@ -78,6 +79,7 @@ class L3PluginApi(object): 1.4 - Added L3 HA update_router_state. This method was reworked in to update_ha_routers_states 1.5 - Added update_ha_routers_states + 1.6 - Added process_prefix_update """ @@ -131,6 +133,12 @@ class L3PluginApi(object): return cctxt.call(context, 'update_ha_routers_states', host=self.host, states=states) + def process_prefix_update(self, context, prefix_update): + """Process prefix update whenever prefixes get changed.""" + cctxt = self.client.prepare(version='1.6') + return cctxt.call(context, 'process_prefix_update', + subnets=prefix_update) + class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, ha.AgentMixin, @@ -218,6 +226,12 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled() + self.pd = pd.PrefixDelegation(self.context, self.process_monitor, + self.driver, + self.plugin_rpc.process_prefix_update, + self.create_pd_router_update, + self.conf) + def _check_config_params(self): """Check items in configuration files. @@ -440,6 +454,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, for rp, update in self._queue.each_update_to_next_router(): LOG.debug("Starting router update for %s, action %s, priority %s", update.id, update.action, update.priority) + if update.action == queue.PD_UPDATE: + self.pd.process_prefix_update() + continue router = update.router if update.action != queue.DELETE_ROUTER and not router: try: @@ -574,6 +591,14 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, # When L3 agent is ready, we immediately do a full sync self.periodic_sync_routers_task(self.context) + def create_pd_router_update(self): + router_id = None + update = queue.RouterUpdate(router_id, + queue.PRIORITY_PD_UPDATE, + timestamp=timeutils.utcnow(), + action=queue.PD_UPDATE) + self._queue.add(update) + class L3NATAgentWithStateReport(L3NATAgent): @@ -646,6 +671,8 @@ class L3NATAgentWithStateReport(L3NATAgent): # When L3 agent is ready, we immediately do a full sync self.periodic_sync_routers_task(self.context) + self.pd.after_start() + def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.fullsync = True diff --git a/neutron/agent/l3/config.py b/neutron/agent/l3/config.py index edb5c5c90f1..dfb72bf1d5d 100644 --- a/neutron/agent/l3/config.py +++ b/neutron/agent/l3/config.py @@ -74,6 +74,13 @@ OPTS = [ "next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated " "to the network and not through this parameter. ")), + cfg.StrOpt('prefix_delegation_driver', + default='dibbler', + help=_('Driver used for ipv6 prefix delegation. This needs to ' + 'be an entry point defined in the ' + 'neutron.agent.linux.pd_drivers namespace. See ' + 'setup.cfg for entry points included with the neutron ' + 'source.')), cfg.BoolOpt('enable_metadata_proxy', default=True, help=_("Allow running metadata proxy.")), cfg.BoolOpt('router_delete_namespaces', default=True, diff --git a/neutron/agent/l3/dvr_edge_router.py b/neutron/agent/l3/dvr_edge_router.py index b68af5cdecf..a610bdb18a9 100644 --- a/neutron/agent/l3/dvr_edge_router.py +++ b/neutron/agent/l3/dvr_edge_router.py @@ -40,17 +40,27 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): if not self._is_this_snat_host(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) + if self.snat_namespace: + LOG.debug("SNAT was rescheduled to host %s. Clearing snat " + "namespace.", self.router.get('gw_port_host')) + return self.external_gateway_removed( + ex_gw_port, interface_name) return - self._external_gateway_added(ex_gw_port, - interface_name, - self.snat_namespace.name, - preserve_ips=[]) + if not self.snat_namespace: + # SNAT might be rescheduled to this agent; need to process like + # newly created gateway + return self.external_gateway_added(ex_gw_port, interface_name) + else: + self._external_gateway_added(ex_gw_port, + interface_name, + self.snat_namespace.name, + preserve_ips=[]) def external_gateway_removed(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port, interface_name) - if not self._is_this_snat_host(): + if not self._is_this_snat_host() and not self.snat_namespace: # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) return @@ -75,7 +85,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) - interface_name = self.get_snat_int_device_name(sn_port['id']) + interface_name = self._get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], @@ -100,7 +110,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): if not is_this_snat_host: return - snat_interface = self.get_snat_int_device_name(sn_port['id']) + snat_interface = self._get_snat_int_device_name(sn_port['id']) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): @@ -109,11 +119,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): def _create_dvr_gateway(self, ex_gw_port, gw_interface_name): """Create SNAT namespace.""" - snat_ns = self.create_snat_namespace() + snat_ns = self._create_snat_namespace() # connect snat_ports to br_int from SNAT namespace for port in self.get_snat_interfaces(): # create interface_name - interface_name = self.get_snat_int_device_name(port['id']) + interface_name = self._get_snat_int_device_name(port['id']) self._internal_network_added( snat_ns.name, port['network_id'], port['id'], port['fixed_ips'], @@ -127,7 +137,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): # kicks the FW Agent to add rules for the snat namespace self.agent.process_router_add(self) - def create_snat_namespace(self): + def _create_snat_namespace(self): # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that creates a gateway for a dvr. The first step # is to move the creation of the snat namespace here @@ -138,7 +148,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): self.snat_namespace.create() return self.snat_namespace - def get_snat_int_device_name(self, port_id): + def _get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] @@ -166,3 +176,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): self._add_snat_rules(ex_gw_port, self.snat_iptables_manager, interface_name) + + def update_routing_table(self, operation, route, namespace=None): + ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) + super(DvrEdgeRouter, self).update_routing_table(operation, route, + namespace=ns_name) diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index 90e24d129d9..7b5894d1942 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -14,13 +14,13 @@ import os -from oslo_log import log as logging - +from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import utils as common_utils +from oslo_log import log as logging LOG = logging.getLogger(__name__) @@ -49,7 +49,10 @@ class FipNamespace(namespaces.Namespace): self.use_ipv6 = use_ipv6 self.agent_gateway_port = None self._subscribers = set() - self._rule_priorities = set(range(FIP_PR_START, FIP_PR_END)) + path = os.path.join(agent_conf.state_path, 'fip-priorities') + self._rule_priorities = frpa.FipRulePriorityAllocator(path, + FIP_PR_START, + FIP_PR_END) self._iptables_manager = iptables_manager.IptablesManager( namespace=self.get_name(), use_ipv6=self.use_ipv6) @@ -85,14 +88,15 @@ class FipNamespace(namespaces.Namespace): self._subscribers.discard(router_id) return not self.has_subscribers() - def allocate_rule_priority(self): - return self._rule_priorities.pop() + def allocate_rule_priority(self, floating_ip): + return self._rule_priorities.allocate(floating_ip) - def deallocate_rule_priority(self, rule_pr): - self._rule_priorities.add(rule_pr) + def deallocate_rule_priority(self, floating_ip): + self._rule_priorities.release(floating_ip) def _gateway_added(self, ex_gw_port, interface_name): """Add Floating IP gateway port.""" + LOG.debug("add gateway interface(%s)", interface_name) ns_name = self.get_name() self.driver.plug(ex_gw_port['network_id'], ex_gw_port['id'], @@ -126,6 +130,7 @@ class FipNamespace(namespaces.Namespace): def create(self): # TODO(Carl) Get this functionality from mlavelle's namespace baseclass + LOG.debug("add fip-namespace(%s)", self.name) ip_wrapper_root = ip_lib.IPWrapper() ip_wrapper_root.netns.execute(['sysctl', '-w', @@ -172,7 +177,6 @@ class FipNamespace(namespaces.Namespace): """ self.agent_gateway_port = agent_gateway_port - # add fip-namespace and agent_gateway_port self.create() iface_name = self.get_ext_device_name(agent_gateway_port['id']) @@ -186,6 +190,7 @@ class FipNamespace(namespaces.Namespace): def create_rtr_2_fip_link(self, ri): """Create interface between router and Floating IP namespace.""" + LOG.debug("Create FIP link interfaces for router %s", ri.router_id) rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.get_int_device_name(ri.router_id) fip_ns_name = self.get_name() @@ -217,7 +222,7 @@ class FipNamespace(namespaces.Namespace): device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name) device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) #setup the NAT rules and chains - ri._handle_fip_nat_rules(rtr_2_fip_name, 'add_rules') + ri._handle_fip_nat_rules(rtr_2_fip_name) def scan_fip_ports(self, ri): # don't scan if not dvr or count is not None @@ -232,4 +237,8 @@ class FipNamespace(namespaces.Namespace): existing_cidrs = [addr['cidr'] for addr in device.addr.list()] fip_cidrs = [c for c in existing_cidrs if common_utils.is_cidr_host(c)] + for fip_cidr in fip_cidrs: + fip_ip = fip_cidr.split('/')[0] + rule_pr = self._rule_priorities.allocate(fip_ip) + ri.floating_ips_dict[fip_ip] = rule_pr ri.dist_fip_count = len(fip_cidrs) diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index e14fc2d172a..e0a1059aef7 100644 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -47,7 +47,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): floating_ips = super(DvrLocalRouter, self).get_floating_ips() return [i for i in floating_ips if i['host'] == self.host] - def _handle_fip_nat_rules(self, interface_name, action): + def _handle_fip_nat_rules(self, interface_name): """Configures NAT rules for Floating IPs for DVR. Remove all the rules. This is safe because if @@ -61,20 +61,20 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - # And add them back if the action is add_rules - if action == 'add_rules' and interface_name: - rule = ('POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name}) - self.iptables_manager.ipv4['nat'].add_rule(*rule) + # And add the NAT rule back + rule = ('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name}) + self.iptables_manager.ipv4['nat'].add_rule(*rule) + self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): """Add floating IP to FIP namespace.""" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] - rule_pr = self.fip_ns.allocate_rule_priority() + rule_pr = self.fip_ns.allocate_rule_priority(floating_ip) self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) @@ -113,7 +113,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): ip_rule.rule.delete(ip=floating_ip, table=dvr_fip_ns.FIP_RT_TBL, priority=rule_pr) - self.fip_ns.deallocate_rule_priority(rule_pr) + self.fip_ns.deallocate_rule_priority(floating_ip) #TODO(rajeev): Handle else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) @@ -265,7 +265,8 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): if is_add: exc = _LE('DVR: error adding redirection logic') else: - exc = _LE('DVR: removed snat failed') + exc = _LE('DVR: snat remove failed to clear the rule ' + 'and device') LOG.exception(exc) def _snat_redirect_add(self, gateway, sn_port, sn_int): @@ -373,8 +374,9 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): floating_ips = self.get_floating_ips() fip_agent_port = self.get_floating_agent_gw_interface( ex_gw_port['network_id']) - LOG.debug("FloatingIP agent gateway port received from the plugin: " - "%s", fip_agent_port) + if fip_agent_port: + LOG.debug("FloatingIP agent gateway port received from the " + "plugin: %s", fip_agent_port) is_first = False if floating_ips: is_first = self.fip_ns.subscribe(self.router_id) diff --git a/neutron/agent/l3/dvr_router_base.py b/neutron/agent/l3/dvr_router_base.py index 0c872c4c345..c8381aefc63 100644 --- a/neutron/agent/l3/dvr_router_base.py +++ b/neutron/agent/l3/dvr_router_base.py @@ -39,4 +39,8 @@ class DvrRouterBase(router.RouterInfo): if match_port: return match_port[0] else: - LOG.error(_LE('DVR: no map match_port found!')) + LOG.error(_LE('DVR: SNAT port not found in the list ' + '%(snat_list)s for the given router ' + ' internal port %(int_p)s'), { + 'snat_list': snat_ports, + 'int_p': int_port}) diff --git a/neutron/agent/l3/fip_rule_priority_allocator.py b/neutron/agent/l3/fip_rule_priority_allocator.py new file mode 100644 index 00000000000..016f12cd317 --- /dev/null +++ b/neutron/agent/l3/fip_rule_priority_allocator.py @@ -0,0 +1,53 @@ +# Copyright 2015 IBM Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent.l3.item_allocator import ItemAllocator + + +class FipPriority(object): + def __init__(self, index): + self.index = index + + def __repr__(self): + return str(self.index) + + def __hash__(self): + return hash(self.__repr__()) + + def __eq__(self, other): + if isinstance(other, FipPriority): + return (self.index == other.index) + else: + return False + + +class FipRulePriorityAllocator(ItemAllocator): + """Manages allocation of floating ips rule priorities. + IP rule priorities assigned to DVR floating IPs need + to be preserved over L3 agent restarts. + This class provides an allocator which saves the prirorities + to a datastore which will survive L3 agent restarts. + """ + def __init__(self, data_store_path, priority_rule_start, + priority_rule_end): + """Create the necessary pool and create the item allocator + using ',' as the delimiter and FipRulePriorityAllocator as the + class type + """ + pool = set(FipPriority(str(s)) for s in range(priority_rule_start, + priority_rule_end)) + + super(FipRulePriorityAllocator, self).__init__(data_store_path, + FipPriority, + pool) diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index e7b7b5020af..33d750d300d 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -333,6 +333,16 @@ class HaRouter(router.RouterInfo): self.ha_state = state callback(self.router_id, state) + @staticmethod + def _gateway_ports_equal(port1, port2): + def _get_filtered_dict(d, ignore): + return {k: v for k, v in d.items() if k not in ignore} + + keys_to_ignore = set(['binding:host_id']) + port1_filtered = _get_filtered_dict(port1, keys_to_ignore) + port2_filtered = _get_filtered_dict(port2, keys_to_ignore) + return port1_filtered == port2_filtered + def external_gateway_added(self, ex_gw_port, interface_name): self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name) self._add_gateway_vip(ex_gw_port, interface_name) diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index 8b25f0a6a33..1c29d793b1f 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -15,7 +15,6 @@ import netaddr from oslo_log import log as logging -import six from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib @@ -23,6 +22,7 @@ from neutron.agent.linux import iptables_manager from neutron.agent.linux import ra from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.i18n import _LW @@ -110,12 +110,17 @@ class RouterInfo(object): def get_external_device_interface_name(self, ex_gw_port): return self.get_external_device_name(ex_gw_port['id']) - def _update_routing_table(self, operation, route): + def _update_routing_table(self, operation, route, namespace): cmd = ['ip', 'route', operation, 'to', route['destination'], 'via', route['nexthop']] - ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name) + ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(cmd, check_exit_code=False) + def update_routing_table(self, operation, route, namespace=None): + if namespace is None: + namespace = self.ns_name + self._update_routing_table(operation, route, namespace) + def routes_updated(self): new_routes = self.router['routes'] @@ -129,10 +134,10 @@ class RouterInfo(object): if route['destination'] == del_route['destination']: removes.remove(del_route) #replace success even if there is no existing route - self._update_routing_table('replace', route) + self.update_routing_table('replace', route) for route in removes: LOG.debug("Removed route entry is '%s'", route) - self._update_routing_table('delete', route) + self.update_routing_table('delete', route) self.routes = new_routes def get_ex_gw_port(self): @@ -239,6 +244,8 @@ class RouterInfo(object): ip_cidr for ip_cidr in existing_cidrs - new_cidrs if common_utils.is_cidr_host(ip_cidr)) for ip_cidr in fips_to_remove: + LOG.debug("Removing floating ip %s from interface %s in " + "namespace %s", ip_cidr, interface_name, self.ns_name) self.remove_floating_ip(device, ip_cidr) return fip_statuses @@ -266,9 +273,28 @@ class RouterInfo(object): if self.router_namespace: self.router_namespace.delete() + def _internal_network_updated(self, port, subnet_id, prefix, old_prefix, + updated_cidrs): + interface_name = self.get_internal_device_name(port['id']) + if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX: + fixed_ips = port['fixed_ips'] + for fixed_ip in fixed_ips: + if fixed_ip['subnet_id'] == subnet_id: + v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'], + fixed_ip.get('prefixlen')) + if v6addr not in updated_cidrs: + self.driver.add_ipv6_addr(interface_name, v6addr, + self.ns_name) + else: + self.driver.delete_ipv6_addr_with_prefix(interface_name, + old_prefix, + self.ns_name) + def _internal_network_added(self, ns_name, network_id, port_id, fixed_ips, mac_address, interface_name, prefix): + LOG.debug("adding internal network: prefix(%s), port(%s)", + prefix, port_id) self.driver.plug(network_id, port_id, interface_name, mac_address, namespace=ns_name, prefix=prefix) @@ -300,7 +326,8 @@ class RouterInfo(object): def internal_network_removed(self, port): interface_name = self.get_internal_device_name(port['id']) - + LOG.debug("removing internal network: port(%s) interface(%s)", + port['id'], interface_name) if ip_lib.device_exists(interface_name, namespace=self.ns_name): self.driver.unplug(interface_name, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) @@ -326,7 +353,8 @@ class RouterInfo(object): def _port_has_ipv6_subnet(port): if 'subnets' in port: for subnet in port['subnets']: - if netaddr.IPNetwork(subnet['cidr']).version == 6: + if (netaddr.IPNetwork(subnet['cidr']).version == 6 and + subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX): return True def enable_radvd(self, internal_ports=None): @@ -344,7 +372,7 @@ class RouterInfo(object): self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs, namespace=self.ns_name) - def _process_internal_ports(self): + def _process_internal_ports(self, pd): existing_port_ids = set(p['id'] for p in self.internal_ports) internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) @@ -361,14 +389,26 @@ class RouterInfo(object): enable_ra = False for p in new_ports: self.internal_network_added(p) + LOG.debug("appending port %s to internal_ports cache", p) self.internal_ports.append(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) + for subnet in p['subnets']: + if ipv6_utils.is_ipv6_pd_enabled(subnet): + interface_name = self.get_internal_device_name(p['id']) + pd.enable_subnet(self.router_id, subnet['id'], + subnet['cidr'], + interface_name, p['mac_address']) for p in old_ports: self.internal_network_removed(p) + LOG.debug("removing port %s from internal_ports cache", p) self.internal_ports.remove(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) + for subnet in p['subnets']: + if ipv6_utils.is_ipv6_pd_enabled(subnet): + pd.disable_subnet(self.router_id, subnet['id']) + updated_cidrs = [] if updated_ports: for index, p in enumerate(internal_ports): if not updated_ports.get(p['id']): @@ -376,9 +416,26 @@ class RouterInfo(object): self.internal_ports[index] = updated_ports[p['id']] interface_name = self.get_internal_device_name(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) + LOG.debug("updating internal network for port %s", p) + updated_cidrs += ip_cidrs self.internal_network_updated(interface_name, ip_cidrs) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) + # Check if there is any pd prefix update + for p in internal_ports: + if p['id'] in (set(current_port_ids) & set(existing_port_ids)): + for subnet in p.get('subnets', []): + if ipv6_utils.is_ipv6_pd_enabled(subnet): + old_prefix = pd.update_subnet(self.router_id, + subnet['id'], + subnet['cidr']) + if old_prefix: + self._internal_network_updated(p, subnet['id'], + subnet['cidr'], + old_prefix, + updated_cidrs) + enable_ra = True + # Enable RA if enable_ra: self.enable_radvd(internal_ports) @@ -392,6 +449,7 @@ class RouterInfo(object): for stale_dev in stale_devs: LOG.debug('Deleting stale internal router device: %s', stale_dev) + pd.remove_stale_ri_ifname(self.router_id, stale_dev) self.driver.unplug(stale_dev, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) @@ -433,6 +491,8 @@ class RouterInfo(object): def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): + LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)", + ex_gw_port, interface_name, ns_name) self._plug_external_gateway(ex_gw_port, interface_name, ns_name) # Build up the interface and gateway IP addresses that @@ -474,12 +534,18 @@ class RouterInfo(object): ex_gw_port, interface_name, self.ns_name, preserve_ips) def external_gateway_removed(self, ex_gw_port, interface_name): + LOG.debug("External gateway removed: port(%s), interface(%s)", + ex_gw_port, interface_name) self.driver.unplug(interface_name, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, prefix=EXTERNAL_DEV_PREFIX) - def _process_external_gateway(self, ex_gw_port): + @staticmethod + def _gateway_ports_equal(port1, port2): + return port1 == port2 + + def _process_external_gateway(self, ex_gw_port, pd): # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or self.ex_gw_port and self.ex_gw_port['id']) @@ -488,22 +554,14 @@ class RouterInfo(object): if ex_gw_port_id: interface_name = self.get_external_device_name(ex_gw_port_id) if ex_gw_port: - def _gateway_ports_equal(port1, port2): - def _get_filtered_dict(d, ignore): - return dict((k, v) for k, v in six.iteritems(d) - if k not in ignore) - - keys_to_ignore = set(['binding:host_id']) - port1_filtered = _get_filtered_dict(port1, keys_to_ignore) - port2_filtered = _get_filtered_dict(port2, keys_to_ignore) - return port1_filtered == port2_filtered - if not self.ex_gw_port: self.external_gateway_added(ex_gw_port, interface_name) - elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port): + pd.add_gw_interface(self.router['id'], interface_name) + elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port): self.external_gateway_updated(ex_gw_port, interface_name) elif not ex_gw_port and self.ex_gw_port: self.external_gateway_removed(self.ex_gw_port, interface_name) + pd.remove_gw_interface(self.router['id']) existing_devices = self._get_existing_devices() stale_devs = [dev for dev in existing_devices @@ -511,6 +569,7 @@ class RouterInfo(object): and dev != interface_name] for stale_dev in stale_devs: LOG.debug('Deleting stale external router device: %s', stale_dev) + pd.remove_gw_interface(self.router['id']) self.driver.unplug(stale_dev, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, @@ -587,7 +646,7 @@ class RouterInfo(object): try: with self.iptables_manager.defer_apply(): ex_gw_port = self.get_ex_gw_port() - self._process_external_gateway(ex_gw_port) + self._process_external_gateway(ex_gw_port, agent.pd) if not ex_gw_port: return @@ -618,7 +677,9 @@ class RouterInfo(object): :param agent: Passes the agent in order to send RPC messages. """ - self._process_internal_ports() + LOG.debug("process router updates") + self._process_internal_ports(agent.pd) + agent.pd.sync_router(self.router['id']) self.process_external(agent) # Process static routes for router self.routes_updated() diff --git a/neutron/agent/l3/router_processing_queue.py b/neutron/agent/l3/router_processing_queue.py index a46177005dc..a0b3fa1d67a 100644 --- a/neutron/agent/l3/router_processing_queue.py +++ b/neutron/agent/l3/router_processing_queue.py @@ -21,7 +21,9 @@ from oslo_utils import timeutils # Lower value is higher priority PRIORITY_RPC = 0 PRIORITY_SYNC_ROUTERS_TASK = 1 +PRIORITY_PD_UPDATE = 2 DELETE_ROUTER = 1 +PD_UPDATE = 2 class RouterUpdate(object): diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 0ac27b241a3..373668f5c0c 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -510,6 +510,11 @@ class Dnsmasq(DhcpLocalProcess): for port in self.network.ports: fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips, v6_nets) + # Confirm whether Neutron server supports dns_name attribute in the + # ports API + dns_assignment = getattr(port, 'dns_assignment', None) + if dns_assignment: + dns_ip_map = {d.ip_address: d for d in dns_assignment} for alloc in fixed_ips: # Note(scollins) Only create entries that are # associated with the subnet being managed by this @@ -523,11 +528,18 @@ class Dnsmasq(DhcpLocalProcess): yield (port, alloc, hostname, fqdn) continue - hostname = 'host-%s' % alloc.ip_address.replace( - '.', '-').replace(':', '-') - fqdn = hostname - if self.conf.dhcp_domain: - fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain) + # If dns_name attribute is supported by ports API, return the + # dns_assignment generated by the Neutron server. Otherwise, + # generate hostname and fqdn locally (previous behaviour) + if dns_assignment: + hostname = dns_ip_map[alloc.ip_address].hostname + fqdn = dns_ip_map[alloc.ip_address].fqdn + else: + hostname = 'host-%s' % alloc.ip_address.replace( + '.', '-').replace(':', '-') + fqdn = hostname + if self.conf.dhcp_domain: + fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain) yield (port, alloc, hostname, fqdn) def _get_port_extra_dhcp_opts(self, port): @@ -761,9 +773,10 @@ class Dnsmasq(DhcpLocalProcess): # Add host routes for isolated network segments - if (isolated_subnets[subnet.id] and + if (self.conf.force_metadata or + (isolated_subnets[subnet.id] and self.conf.enable_isolated_metadata and - subnet.ip_version == 4): + subnet.ip_version == 4)): subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) @@ -900,7 +913,7 @@ class Dnsmasq(DhcpLocalProcess): A subnet is considered non-isolated if there is a port connected to the subnet, and the port's ip address matches that of the subnet's - gateway. The port must be owned by a nuetron router. + gateway. The port must be owned by a neutron router. """ isolated_subnets = collections.defaultdict(lambda: True) subnets = dict((subnet.id, subnet) for subnet in network.subnets) @@ -919,7 +932,8 @@ class Dnsmasq(DhcpLocalProcess): """Determine whether the metadata proxy is needed for a network This method returns True for truly isolated networks (ie: not attached - to a router), when the enable_isolated_metadata flag is True. + to a router) when enable_isolated_metadata is True, or for all the + networks when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local @@ -928,6 +942,9 @@ class Dnsmasq(DhcpLocalProcess): providing access to the metadata service via logical routers built with 3rd party backends. """ + if conf.force_metadata: + return True + if conf.enable_metadata_network and conf.enable_isolated_metadata: # check if the network has a metadata subnet meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR) @@ -996,77 +1013,111 @@ class DeviceManager(object): device.route.delete_gateway(gateway) - def setup_dhcp_port(self, network): - """Create/update DHCP port for the host if needed and return port.""" + def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets): + """Set up the existing DHCP port, if there is one.""" - device_id = self.get_device_id(network) - subnets = {subnet.id: subnet for subnet in network.subnets - if subnet.enable_dhcp} + # To avoid pylint thinking that port might be undefined after + # the following loop... + port = None - dhcp_port = None + # Look for an existing DHCP for this network. for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: - dhcp_enabled_subnet_ids = set(subnets) - port_fixed_ips = [] - for fixed_ip in port.fixed_ips: - if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: - port_fixed_ips.append( - {'subnet_id': fixed_ip.subnet_id, - 'ip_address': fixed_ip.ip_address}) - - port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) - # If there is a new dhcp enabled subnet or a port that is no - # longer on a dhcp enabled subnet, we need to call update. - if dhcp_enabled_subnet_ids != port_subnet_ids: - port_fixed_ips.extend( - dict(subnet_id=s) - for s in dhcp_enabled_subnet_ids - port_subnet_ids) - dhcp_port = self.plugin.update_dhcp_port( - port.id, {'port': {'network_id': network.id, - 'fixed_ips': port_fixed_ips}}) - if not dhcp_port: - raise exceptions.Conflict() - else: - dhcp_port = port - # break since we found port that matches device_id break + else: + return None - # check for a reserved DHCP port - if dhcp_port is None: - LOG.debug('DHCP port %(device_id)s on network %(network_id)s' - ' does not yet exist. Checking for a reserved port.', - {'device_id': device_id, 'network_id': network.id}) - for port in network.ports: - port_device_id = getattr(port, 'device_id', None) - if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: - dhcp_port = self.plugin.update_dhcp_port( - port.id, {'port': {'network_id': network.id, - 'device_id': device_id}}) - if dhcp_port: - break + # Compare what the subnets should be against what is already + # on the port. + dhcp_enabled_subnet_ids = set(dhcp_subnets) + port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) - # DHCP port has not yet been created. - if dhcp_port is None: - LOG.debug('DHCP port %(device_id)s on network %(network_id)s' - ' does not yet exist.', {'device_id': device_id, - 'network_id': network.id}) - port_dict = dict( - name='', - admin_state_up=True, - device_id=device_id, - network_id=network.id, - tenant_id=network.tenant_id, - fixed_ips=[dict(subnet_id=s) for s in subnets]) - dhcp_port = self.plugin.create_dhcp_port({'port': port_dict}) + # If those differ, we need to call update. + if dhcp_enabled_subnet_ids != port_subnet_ids: + # Collect the subnets and fixed IPs that the port already + # has, for subnets that are still in the DHCP-enabled set. + wanted_fixed_ips = [] + for fixed_ip in port.fixed_ips: + if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: + wanted_fixed_ips.append( + {'subnet_id': fixed_ip.subnet_id, + 'ip_address': fixed_ip.ip_address}) - if not dhcp_port: + # Add subnet IDs for new DHCP-enabled subnets. + wanted_fixed_ips.extend( + dict(subnet_id=s) + for s in dhcp_enabled_subnet_ids - port_subnet_ids) + + # Update the port to have the calculated subnets and fixed + # IPs. The Neutron server will allocate a fresh IP for + # each subnet that doesn't already have one. + port = self.plugin.update_dhcp_port( + port.id, + {'port': {'network_id': network.id, + 'fixed_ips': wanted_fixed_ips}}) + if not port: + raise exceptions.Conflict() + + return port + + def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets): + """Setup the reserved DHCP port, if there is one.""" + LOG.debug('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist. Checking for a reserved port.', + {'device_id': device_id, 'network_id': network.id}) + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: + port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'device_id': device_id}}) + if port: + return port + + def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets): + """Create and set up new DHCP port for the specified network.""" + LOG.debug('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist. Creating new one.', + {'device_id': device_id, 'network_id': network.id}) + port_dict = dict( + name='', + admin_state_up=True, + device_id=device_id, + network_id=network.id, + tenant_id=network.tenant_id, + fixed_ips=[dict(subnet_id=s) for s in dhcp_subnets]) + return self.plugin.create_dhcp_port({'port': port_dict}) + + def setup_dhcp_port(self, network): + """Create/update DHCP port for the host if needed and return port.""" + + # The ID that the DHCP port will have (or already has). + device_id = self.get_device_id(network) + + # Get the set of DHCP-enabled subnets on this network. + dhcp_subnets = {subnet.id: subnet for subnet in network.subnets + if subnet.enable_dhcp} + + # There are 3 cases: either the DHCP port already exists (but + # might need to be updated for a changed set of subnets); or + # some other code has already prepared a 'reserved' DHCP port, + # and we just need to adopt that; or we need to create a new + # DHCP port. Try each of those in turn until we have a DHCP + # port. + for setup_method in (self._setup_existing_dhcp_port, + self._setup_reserved_dhcp_port, + self._setup_new_dhcp_port): + dhcp_port = setup_method(network, device_id, dhcp_subnets) + if dhcp_port: + break + else: raise exceptions.Conflict() # Convert subnet_id to subnet dict fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, ip_address=fixed_ip.ip_address, - subnet=subnets[fixed_ip.subnet_id]) + subnet=dhcp_subnets[fixed_ip.subnet_id]) for fixed_ip in dhcp_port.fixed_ips] ips = [DictModel(item) if isinstance(item, dict) else item diff --git a/neutron/agent/linux/dibbler.py b/neutron/agent/linux/dibbler.py new file mode 100644 index 00000000000..3a97f620ef1 --- /dev/null +++ b/neutron/agent/linux/dibbler.py @@ -0,0 +1,181 @@ +# Copyright 2015 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import jinja2 +import os +from oslo_config import cfg +import shutil +import six + +from neutron.agent.linux import external_process +from neutron.agent.linux import pd +from neutron.agent.linux import pd_driver +from neutron.agent.linux import utils +from neutron.common import constants +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) + +PD_SERVICE_NAME = 'dibbler' +CONFIG_TEMPLATE = jinja2.Template(""" +# Config for dibbler-client. + +# Use enterprise number based duid +duid-type duid-en {{ enterprise_number }} {{ va_id }} + +# 8 (Debug) is most verbose. 7 (Info) is usually the best option +log-level 8 + +# No automatic downlink address assignment +downlink-prefix-ifaces "none" + +# Use script to notify l3_agent of assigned prefix +script {{ script_path }} + +# Ask for prefix over the external gateway interface +iface {{ interface_name }} { +# Bind to generated LLA +bind-to-address {{ bind_address }} +# ask for address + pd 1 +} +""") + +# The first line must be #!/usr/bin/env bash +SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash + +exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }} +""") + + +class PDDibbler(pd_driver.PDDriverBase): + def __init__(self, router_id, subnet_id, ri_ifname): + super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname) + self.requestor_id = "%s:%s:%s" % (self.router_id, + self.subnet_id, + self.ri_ifname) + self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs, + self.requestor_id) + self.prefix_path = "%s/prefix" % self.dibbler_client_working_area + self.pid_path = "%s/client.pid" % self.dibbler_client_working_area + self.converted_subnet_id = self.subnet_id.replace('-', '') + + def _is_dibbler_client_running(self): + return utils.get_value_from_file(self.pid_path) + + def _generate_dibbler_conf(self, ex_gw_ifname, lla): + dcwa = self.dibbler_client_working_area + script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True) + buf = six.StringIO() + buf.write('%s' % SCRIPT_TEMPLATE.render( + prefix_path=self.prefix_path, + l3_agent_pid=os.getpid())) + utils.replace_file(script_path, buf.getvalue()) + os.chmod(script_path, 0o744) + + dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False) + buf = six.StringIO() + buf.write('%s' % CONFIG_TEMPLATE.render( + enterprise_number=cfg.CONF.vendor_pen, + va_id='0x%s' % self.converted_subnet_id, + script_path='"%s/notify.sh"' % dcwa, + interface_name='"%s"' % ex_gw_ifname, + bind_address='%s' % lla)) + + utils.replace_file(dibbler_conf, buf.getvalue()) + return dcwa + + def _spawn_dibbler(self, pmon, router_ns, dibbler_conf): + def callback(pid_file): + dibbler_cmd = ['dibbler-client', + 'start', + '-w', '%s' % dibbler_conf] + return dibbler_cmd + + pm = external_process.ProcessManager( + uuid=self.requestor_id, + default_cmd_callback=callback, + namespace=router_ns, + service=PD_SERVICE_NAME, + conf=cfg.CONF, + pid_file=self.pid_path) + pm.enable(reload_cfg=False) + pmon.register(uuid=self.requestor_id, + service_name=PD_SERVICE_NAME, + monitored_process=pm) + + def enable(self, pmon, router_ns, ex_gw_ifname, lla): + LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s", + self.router_id, self.subnet_id, self.ri_ifname) + if not self._is_dibbler_client_running(): + dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla) + self._spawn_dibbler(pmon, router_ns, dibbler_conf) + LOG.debug("dibbler client enabled for router %s subnet %s" + " ri_ifname %s", + self.router_id, self.subnet_id, self.ri_ifname) + + def disable(self, pmon, router_ns): + LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s", + self.router_id, self.subnet_id, self.ri_ifname) + dcwa = self.dibbler_client_working_area + + def callback(pid_file): + dibbler_cmd = ['dibbler-client', + 'stop', + '-w', '%s' % dcwa] + return dibbler_cmd + + pmon.unregister(uuid=self.requestor_id, + service_name=PD_SERVICE_NAME) + pm = external_process.ProcessManager( + uuid=self.requestor_id, + namespace=router_ns, + service=PD_SERVICE_NAME, + conf=cfg.CONF, + pid_file=self.pid_path) + pm.disable(get_stop_command=callback) + shutil.rmtree(dcwa, ignore_errors=True) + LOG.debug("dibbler client disabled for router %s subnet %s " + "ri_ifname %s", + self.router_id, self.subnet_id, self.ri_ifname) + + def get_prefix(self): + prefix = utils.get_value_from_file(self.prefix_path) + if not prefix: + prefix = constants.PROVISIONAL_IPV6_PD_PREFIX + return prefix + + @staticmethod + def get_sync_data(): + try: + requestor_ids = os.listdir(cfg.CONF.pd_confs) + except OSError: + return [] + + sync_data = [] + requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2) + for router_id, subnet_id, ri_ifname in requestors: + pd_info = pd.PDInfo() + pd_info.router_id = router_id + pd_info.subnet_id = subnet_id + pd_info.ri_ifname = ri_ifname + pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname) + pd_info.client_started = ( + pd_info.driver._is_dibbler_client_running()) + pd_info.prefix = pd_info.driver.get_prefix() + sync_data.append(pd_info) + + return sync_data diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py index 4cf287218df..2bccdf67c75 100644 --- a/neutron/agent/linux/external_process.py +++ b/neutron/agent/linux/external_process.py @@ -96,15 +96,20 @@ class ProcessManager(MonitoredProcess): def reload_cfg(self): self.disable('HUP') - def disable(self, sig='9'): + def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: - cmd = ['kill', '-%s' % (sig), pid] - utils.execute(cmd, run_as_root=True) - # In the case of shutting down, remove the pid file - if sig == '9': - fileutils.delete_if_exists(self.get_pid_file_name()) + if get_stop_command: + cmd = get_stop_command(self.get_pid_file_name()) + ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) + ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env) + else: + cmd = ['kill', '-%s' % (sig), pid] + utils.execute(cmd, run_as_root=True) + # In the case of shutting down, remove the pid file + if sig == '9': + fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', {'uuid': self.uuid, 'pid': pid, diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index 9207503e7ac..e57620675fe 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -25,6 +25,7 @@ from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants as n_const from neutron.common import exceptions +from neutron.common import ipv6_utils from neutron.i18n import _LE, _LI @@ -51,6 +52,17 @@ class LinuxInterfaceDriver(object): def __init__(self, conf): self.conf = conf + if self.conf.network_device_mtu: + self._validate_network_device_mtu() + + def _validate_network_device_mtu(self): + if (ipv6_utils.is_enabled() and + self.conf.network_device_mtu < n_const.IPV6_MIN_MTU): + LOG.error(_LE("IPv6 protocol requires a minimum MTU of " + "%(min_mtu)s, while the configured value is " + "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU, + 'current_mtu': self.conf.network_device_mtu}) + raise SystemExit(1) def init_l3(self, device_name, ip_cidrs, namespace=None, preserve_ips=[], gateway_ips=None, @@ -116,6 +128,8 @@ class LinuxInterfaceDriver(object): associated to removed ips extra_subnets: An iterable of cidrs to add as routes without address """ + LOG.debug("init_router_port: device_name(%s), namespace(%s)", + device_name, namespace) self.init_l3(device_name=device_name, ip_cidrs=ip_cidrs, namespace=namespace, @@ -134,10 +148,41 @@ class LinuxInterfaceDriver(object): device.route.list_onlink_routes(n_const.IP_VERSION_4) + device.route.list_onlink_routes(n_const.IP_VERSION_6)) for route in new_onlink_routes - existing_onlink_routes: + LOG.debug("adding onlink route(%s)", route) device.route.add_onlink_route(route) for route in existing_onlink_routes - new_onlink_routes: + LOG.debug("deleting onlink route(%s)", route) device.route.delete_onlink_route(route) + def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'): + device = ip_lib.IPDevice(device_name, + namespace=namespace) + net = netaddr.IPNetwork(v6addr) + device.addr.add(str(net), scope) + + def delete_ipv6_addr(self, device_name, v6addr, namespace): + device = ip_lib.IPDevice(device_name, + namespace=namespace) + device.delete_addr_and_conntrack_state(v6addr) + + def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace): + """Delete the first listed IPv6 address that falls within a given + prefix. + """ + device = ip_lib.IPDevice(device_name, namespace=namespace) + net = netaddr.IPNetwork(prefix) + for address in device.addr.list(scope='global', filters=['permanent']): + ip_address = netaddr.IPNetwork(address['cidr']) + if ip_address in net: + device.delete_addr_and_conntrack_state(address['cidr']) + break + + def get_ipv6_llas(self, device_name, namespace): + device = ip_lib.IPDevice(device_name, + namespace=namespace) + + return device.addr.list(scope='link', ip_version=6) + def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exceptions.BridgeDoesNotExist(bridge=bridge) diff --git a/neutron/agent/linux/ip_conntrack.py b/neutron/agent/linux/ip_conntrack.py index 97c94e0f62c..3e988ee3918 100644 --- a/neutron/agent/linux/ip_conntrack.py +++ b/neutron/agent/linux/ip_conntrack.py @@ -23,7 +23,8 @@ LOG = logging.getLogger(__name__) class IpConntrackManager(object): """Smart wrapper for ip conntrack.""" - def __init__(self, execute=None, namespace=None): + def __init__(self, zone_lookup_func, execute=None, namespace=None): + self.get_device_zone = zone_lookup_func self.execute = execute or linux_utils.execute self.namespace = namespace @@ -48,9 +49,7 @@ class IpConntrackManager(object): cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace) ethertype = rule.get('ethertype') for device_info in device_info_list: - zone_id = device_info.get('zone_id') - if not zone_id: - continue + zone_id = self.get_device_zone(device_info['device']) ips = device_info.get('fixed_ips', []) for ip in ips: net = netaddr.IPNetwork(ip) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index cadbd019fb1..7c4b4e37af9 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -152,6 +152,11 @@ class IPWrapper(SubProcessBase): """Delete a virtual interface between two namespaces.""" self._as_root([], 'link', ('del', name)) + def add_dummy(self, name): + """Create a Linux dummy interface with the given name.""" + self._as_root([], 'link', ('add', name, 'type', 'dummy')) + return IPDevice(name, namespace=self.namespace) + def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 9684e331390..339b9370fbf 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -14,6 +14,8 @@ # under the License. import collections +import re + import netaddr from oslo_config import cfg from oslo_log import log as logging @@ -41,7 +43,10 @@ DIRECTION_IP_PREFIX = {firewall.INGRESS_DIRECTION: 'source_ip_prefix', firewall.EGRESS_DIRECTION: 'dest_ip_prefix'} IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src', firewall.EGRESS_DIRECTION: 'dst'} +# length of all device prefixes (e.g. qvo, tap, qvb) +LINUX_DEV_PREFIX_LEN = 3 LINUX_DEV_LEN = 14 +MAX_CONNTRACK_ZONES = 65535 comment_rule = iptables_manager.comment_rule @@ -57,7 +62,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver): # TODO(majopela, shihanzhang): refactor out ipset to a separate # driver composed over this one self.ipset = ipset_manager.IpsetManager(namespace=namespace) - self.ipconntrack = ip_conntrack.IpConntrackManager(namespace=namespace) + self.ipconntrack = ip_conntrack.IpConntrackManager( + self.get_device_zone, namespace=namespace) + self._populate_initial_zone_map() # list of port which has security group self.filtered_ports = {} self.unfiltered_ports = {} @@ -638,11 +645,10 @@ class IptablesFirewallDriver(firewall.FirewallDriver): filtered_ports) for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove): - self._clear_sg_members(ip_version, remote_sg_ids) if self.enable_ipset: self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids) - self._remove_unused_sg_members() + self._remove_sg_members(remote_sgs_to_remove) # Remove unused security group rules for remove_group_id in self._determine_sg_rules_to_remove( @@ -690,23 +696,17 @@ class IptablesFirewallDriver(firewall.FirewallDriver): port_group_ids.update(port.get('security_groups', [])) return port_group_ids - def _clear_sg_members(self, ip_version, remote_sg_ids): - """Clear our internal cache of sg members matching the parameters.""" - for remote_sg_id in remote_sg_ids: - if self.sg_members[remote_sg_id][ip_version]: - self.sg_members[remote_sg_id][ip_version] = [] - def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids): """Remove system ipsets matching the provided parameters.""" for remote_sg_id in remote_sg_ids: self.ipset.destroy(remote_sg_id, ip_version) - def _remove_unused_sg_members(self): - """Remove sg_member entries where no IPv4 or IPv6 is associated.""" - for sg_id in list(self.sg_members.keys()): - sg_has_members = (self.sg_members[sg_id][constants.IPv4] or - self.sg_members[sg_id][constants.IPv6]) - if not sg_has_members: + def _remove_sg_members(self, remote_sgs_to_remove): + """Remove sg_member entries.""" + ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4) + ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6) + for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set): + if sg_id in self.sg_members: del self.sg_members[sg_id] def _find_deleted_sg_rules(self, sg_id): @@ -743,7 +743,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): sec_group_change = False device_info = self.filtered_ports.get(device) pre_device_info = self._pre_defer_filtered_ports.get(device) - if not (device_info or pre_device_info): + if not device_info or not pre_device_info: continue for sg_id in pre_device_info.get('security_groups', []): if sg_id not in device_info.get('security_groups', []): @@ -795,6 +795,68 @@ class IptablesFirewallDriver(firewall.FirewallDriver): self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None + def _populate_initial_zone_map(self): + """Setup the map between devices and zones based on current rules.""" + self._device_zone_map = {} + rules = self.iptables.get_rules_for_table('raw') + for rule in rules: + match = re.match(r'.* --physdev-in (?P[a-zA-Z0-9\-]+)' + r'.* -j CT --zone (?P\d+).*', rule) + if match: + # strip off any prefix that the interface is using + short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:] + self._device_zone_map[short_port_id] = int(match.group('zone')) + LOG.debug("Populated conntrack zone map: %s", self._device_zone_map) + + def get_device_zone(self, port_id): + # we have to key the device_zone_map based on the fragment of the port + # UUID that shows up in the interface name. This is because the initial + # map is populated strictly based on interface names that we don't know + # the full UUID of. + short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] + try: + return self._device_zone_map[short_port_id] + except KeyError: + self._free_zones_from_removed_ports() + return self._generate_device_zone(short_port_id) + + def _free_zones_from_removed_ports(self): + """Clears any entries from the zone map of removed ports.""" + existing_ports = [ + port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] + for port in (list(self.filtered_ports.values()) + + list(self.unfiltered_ports.values())) + ] + removed = set(self._device_zone_map) - set(existing_ports) + for dev in removed: + self._device_zone_map.pop(dev, None) + + def _generate_device_zone(self, short_port_id): + """Generates a unique conntrack zone for the passed in ID.""" + zone = self._find_open_zone() + self._device_zone_map[short_port_id] = zone + LOG.debug("Assigned CT zone %(z)s to port %(dev)s.", + {'z': zone, 'dev': short_port_id}) + return self._device_zone_map[short_port_id] + + def _find_open_zone(self): + # call set to dedup because old ports may be mapped to the same zone. + zones_in_use = sorted(set(self._device_zone_map.values())) + if not zones_in_use: + return 1 + # attempt to increment onto the highest used zone first. if we hit the + # end, go back and look for any gaps left by removed devices. + last = zones_in_use[-1] + if last < MAX_CONNTRACK_ZONES: + return last + 1 + for index, used in enumerate(zones_in_use): + if used - index != 1: + # gap found, let's use it! + return index + 1 + # conntrack zones exhausted :( :( + raise RuntimeError("iptables conntrack zones exhausted. " + "iptables rules cannot be applied.") + class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX @@ -815,20 +877,18 @@ class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): else: device = self._get_device_name(port) jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % ( - device, port['zone_id']) + device, self.get_device_zone(port['device'])) return jump_rule def _add_raw_chain_rules(self, port, direction): - if port['zone_id']: - jump_rule = self._get_jump_rule(port, direction) - self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule) - self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule) + jump_rule = self._get_jump_rule(port, direction) + self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule) + self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule) def _remove_raw_chain_rules(self, port, direction): - if port['zone_id']: - jump_rule = self._get_jump_rule(port, direction) - self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule) - self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule) + jump_rule = self._get_jump_rule(port, direction) + self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule) + self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule) def _add_chain(self, port, direction): super(OVSHybridIptablesFirewallDriver, self)._add_chain(port, diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index a65e769c0b6..d72cdd58727 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -426,6 +426,13 @@ class IptablesManager(object): with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): return self._apply_synchronized() + def get_rules_for_table(self, table): + """Runs iptables-save on a table and returns the results.""" + args = ['iptables-save', '-t', table] + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + return self.execute(args, run_as_root=True).split('\n') + def _apply_synchronized(self): """Apply the current in-memory set of iptables rules. diff --git a/neutron/agent/linux/pd.py b/neutron/agent/linux/pd.py new file mode 100644 index 00000000000..cfed4936f1b --- /dev/null +++ b/neutron/agent/linux/pd.py @@ -0,0 +1,356 @@ +# Copyright 2015 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import functools +import signal +import six + +from stevedore import driver + +from oslo_config import cfg +from oslo_log import log as logging + +from neutron.agent.linux import utils as linux_utils +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources +from neutron.common import constants as l3_constants +from neutron.common import ipv6_utils +from neutron.common import utils + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('pd_dhcp_driver', + default='dibbler', + help=_('Service to handle DHCPv6 Prefix delegation.')), +] + +cfg.CONF.register_opts(OPTS) + + +class PrefixDelegation(object): + def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb, + agent_conf): + self.context = context + self.pmon = pmon + self.intf_driver = intf_driver + self.notifier = notifier + self.routers = {} + self.pd_update_cb = pd_update_cb + self.agent_conf = agent_conf + self.pd_dhcp_driver = driver.DriverManager( + namespace='neutron.agent.linux.pd_drivers', + name=agent_conf.prefix_delegation_driver, + ).driver + registry.subscribe(add_router, + resources.ROUTER, + events.BEFORE_CREATE) + registry.subscribe(remove_router, + resources.ROUTER, + events.AFTER_DELETE) + self._get_sync_data() + + @utils.synchronized("l3-agent-pd") + def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac): + router = self.routers.get(router_id) + if router is None: + return + + pd_info = router['subnets'].get(subnet_id) + if not pd_info: + pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac) + router['subnets'][subnet_id] = pd_info + + pd_info.bind_lla = self._get_lla(mac) + if pd_info.sync: + pd_info.mac = mac + pd_info.old_prefix = prefix + else: + self._add_lla(router, pd_info.get_bind_lla_with_mask()) + + def _delete_pd(self, router, pd_info): + self._delete_lla(router, pd_info.get_bind_lla_with_mask()) + if pd_info.client_started: + pd_info.driver.disable(self.pmon, router['ns_name']) + + @utils.synchronized("l3-agent-pd") + def disable_subnet(self, router_id, subnet_id): + prefix_update = {} + router = self.routers.get(router_id) + if not router: + return + pd_info = router['subnets'].get(subnet_id) + if not pd_info: + return + self._delete_pd(router, pd_info) + prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX + del router['subnets'][subnet_id] + LOG.debug("Update server with prefixes: %s", prefix_update) + self.notifier(self.context, prefix_update) + + @utils.synchronized("l3-agent-pd") + def update_subnet(self, router_id, subnet_id, prefix): + router = self.routers.get(router_id) + if router is not None: + pd_info = router['subnets'].get(subnet_id) + if pd_info and pd_info.old_prefix != prefix: + old_prefix = pd_info.old_prefix + pd_info.old_prefix = prefix + return old_prefix + + @utils.synchronized("l3-agent-pd") + def add_gw_interface(self, router_id, gw_ifname): + router = self.routers.get(router_id) + prefix_update = {} + if not router: + return + router['gw_interface'] = gw_ifname + for subnet_id, pd_info in six.iteritems(router['subnets']): + # gateway is added after internal router ports. + # If a PD is being synced, and if the prefix is available, + # send update if prefix out of sync; If not available, + # start the PD client + bind_lla_with_mask = pd_info.get_bind_lla_with_mask() + if pd_info.sync: + pd_info.sync = False + if pd_info.client_started: + if pd_info.prefix != pd_info.old_prefix: + prefix_update['subnet_id'] = pd_info.prefix + else: + self._delete_lla(router, bind_lla_with_mask) + self._add_lla(router, bind_lla_with_mask) + else: + self._add_lla(router, bind_lla_with_mask) + if prefix_update: + LOG.debug("Update server with prefixes: %s", prefix_update) + self.notifier(self.context, prefix_update) + + def delete_router_pd(self, router): + prefix_update = {} + for subnet_id, pd_info in six.iteritems(router['subnets']): + self._delete_lla(router, pd_info.get_bind_lla_with_mask()) + if pd_info.client_started: + pd_info.driver.disable(self.pmon, router['ns_name']) + pd_info.prefix = None + pd_info.client_started = False + prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX + prefix_update[subnet_id] = prefix + if prefix_update: + LOG.debug("Update server with prefixes: %s", prefix_update) + self.notifier(self.context, prefix_update) + + @utils.synchronized("l3-agent-pd") + def remove_gw_interface(self, router_id): + router = self.routers.get(router_id) + if router is not None: + router['gw_interface'] = None + self.delete_router_pd(router) + + @utils.synchronized("l3-agent-pd") + def sync_router(self, router_id): + router = self.routers.get(router_id) + if router is not None and router['gw_interface'] is None: + self.delete_router_pd(router) + + @utils.synchronized("l3-agent-pd") + def remove_stale_ri_ifname(self, router_id, stale_ifname): + router = self.routers.get(router_id) + if router is not None: + for subnet_id, pd_info in router['subnets'].items(): + if pd_info.ri_ifname == stale_ifname: + self._delete_pd(router, pd_info) + del router['subnets'][subnet_id] + + @staticmethod + def _get_lla(mac): + lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX, + mac) + return lla + + def _get_llas(self, gw_ifname, ns_name): + try: + return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name) + except RuntimeError: + # The error message was printed as part of the driver call + # This could happen if the gw_ifname was removed + # simply return and exit the thread + return + + def _add_lla(self, router, lla_with_mask): + if router['gw_interface']: + self.intf_driver.add_ipv6_addr(router['gw_interface'], + lla_with_mask, + router['ns_name'], + 'link') + # There is a delay before the LLA becomes active. + # This is because the kernal runs DAD to make sure LLA uniqueness + # Spawn a thread to wait for the interface to be ready + self._spawn_lla_thread(router['gw_interface'], + router['ns_name'], + lla_with_mask) + + def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask): + eventlet.spawn_n(self._ensure_lla_task, + gw_ifname, + ns_name, + lla_with_mask) + + def _delete_lla(self, router, lla_with_mask): + if lla_with_mask and router['gw_interface']: + try: + self.intf_driver.delete_ipv6_addr(router['gw_interface'], + lla_with_mask, + router['ns_name']) + except RuntimeError: + # Ignore error if the lla doesn't exist + pass + + def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask): + # It would be insane for taking so long unless DAD test failed + # In that case, the subnet would never be assigned a prefix. + linux_utils.wait_until_true(functools.partial(self._lla_available, + gw_ifname, + ns_name, + lla_with_mask), + timeout=l3_constants.LLA_TASK_TIMEOUT, + sleep=2) + + def _lla_available(self, gw_ifname, ns_name, lla_with_mask): + llas = self._get_llas(gw_ifname, ns_name) + if self._is_lla_active(lla_with_mask, llas): + LOG.debug("LLA %s is active now" % lla_with_mask) + self.pd_update_cb() + return True + + @staticmethod + def _is_lla_active(lla_with_mask, llas): + for lla in llas: + if lla_with_mask == lla['cidr']: + return not lla['tentative'] + return False + + @utils.synchronized("l3-agent-pd") + def process_prefix_update(self): + LOG.debug("Processing IPv6 PD Prefix Update") + + prefix_update = {} + for router_id, router in six.iteritems(self.routers): + if not router['gw_interface']: + continue + + llas = None + for subnet_id, pd_info in six.iteritems(router['subnets']): + if pd_info.client_started: + prefix = pd_info.driver.get_prefix() + if prefix != pd_info.prefix: + pd_info.prefix = prefix + prefix_update[subnet_id] = prefix + else: + if not llas: + llas = self._get_llas(router['gw_interface'], + router['ns_name']) + + if self._is_lla_active(pd_info.get_bind_lla_with_mask(), + llas): + if not pd_info.driver: + pd_info.driver = self.pd_dhcp_driver( + router_id, subnet_id, pd_info.ri_ifname) + pd_info.driver.enable(self.pmon, router['ns_name'], + router['gw_interface'], + pd_info.bind_lla) + pd_info.client_started = True + + if prefix_update: + LOG.debug("Update server with prefixes: %s", prefix_update) + self.notifier(self.context, prefix_update) + + def after_start(self): + LOG.debug('SIGHUP signal handler set') + signal.signal(signal.SIGHUP, self._handle_sighup) + + def _handle_sighup(self, signum, frame): + # The external DHCPv6 client uses SIGHUP to notify agent + # of prefix changes. + self.pd_update_cb() + + def _get_sync_data(self): + sync_data = self.pd_dhcp_driver.get_sync_data() + for pd_info in sync_data: + router_id = pd_info.router_id + if not self.routers.get(router_id): + self.routers[router_id] = {'gw_interface': None, + 'ns_name': None, + 'subnets': {}} + new_pd_info = PDInfo(pd_info=pd_info) + subnets = self.routers[router_id]['subnets'] + subnets[pd_info.subnet_id] = new_pd_info + + +@utils.synchronized("l3-agent-pd") +def remove_router(resource, event, l3_agent, **kwargs): + router_id = kwargs['router'].router_id + router = l3_agent.pd.routers.get(router_id) + l3_agent.pd.delete_router_pd(router) + del l3_agent.pd.routers[router_id]['subnets'] + del l3_agent.pd.routers[router_id] + + +def get_router_entry(ns_name): + return {'gw_interface': None, + 'ns_name': ns_name, + 'subnets': {}} + + +@utils.synchronized("l3-agent-pd") +def add_router(resource, event, l3_agent, **kwargs): + added_router = kwargs['router'] + router = l3_agent.pd.routers.get(added_router.router_id) + if not router: + l3_agent.pd.routers[added_router.router_id] = ( + get_router_entry(added_router.ns_name)) + else: + # This will happen during l3 agent restart + router['ns_name'] = added_router.ns_name + + +class PDInfo(object): + """A class to simplify storing and passing of information relevant to + Prefix Delegation operations for a given subnet. + """ + def __init__(self, pd_info=None, ri_ifname=None, mac=None): + if pd_info is None: + self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX + self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX + self.ri_ifname = ri_ifname + self.mac = mac + self.bind_lla = None + self.sync = False + self.driver = None + self.client_started = False + else: + self.prefix = pd_info.prefix + self.old_prefix = None + self.ri_ifname = pd_info.ri_ifname + self.mac = None + self.bind_lla = None + self.sync = True + self.driver = pd_info.driver + self.client_started = pd_info.client_started + + def get_bind_lla_with_mask(self): + bind_lla_with_mask = '%s/64' % self.bind_lla + return bind_lla_with_mask diff --git a/neutron/agent/linux/pd_driver.py b/neutron/agent/linux/pd_driver.py new file mode 100644 index 00000000000..8f11e817ce6 --- /dev/null +++ b/neutron/agent/linux/pd_driver.py @@ -0,0 +1,65 @@ +# Copyright 2015 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +from oslo_config import cfg + +OPTS = [ + cfg.StrOpt('pd_confs', + default='$state_path/pd', + help=_('Location to store IPv6 PD files.')), + cfg.StrOpt('vendor_pen', + default='8888', + help=_("A decimal value as Vendor's Registered Private " + "Enterprise Number as required by RFC3315 DUID-EN.")), +] + +cfg.CONF.register_opts(OPTS) + + +@six.add_metaclass(abc.ABCMeta) +class PDDriverBase(object): + + def __init__(self, router_id, subnet_id, ri_ifname): + self.router_id = router_id + self.subnet_id = subnet_id + self.ri_ifname = ri_ifname + + @abc.abstractmethod + def enable(self, pmon, router_ns, ex_gw_ifname, lla): + """Enable IPv6 Prefix Delegation for this PDDriver on the given + external interface, with the given link local address + """ + + @abc.abstractmethod + def disable(self, pmon, router_ns): + """Disable IPv6 Prefix Delegation for this PDDriver + """ + + @abc.abstractmethod + def get_prefix(self): + """Get the current assigned prefix for this PDDriver from the PD agent. + If no prefix is currently assigned, return + constants.PROVISIONAL_IPV6_PD_PREFIX + """ + + @staticmethod + @abc.abstractmethod + def get_sync_data(): + """Get the latest router_id, subnet_id, and ri_ifname from the PD agent + so that the PDDriver can be kept up to date + """ diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index 30d3f5cc0ff..96e179b03bf 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -33,6 +33,7 @@ from oslo_log import log as logging from oslo_log import loggers from oslo_rootwrap import client from oslo_utils import excutils +import six from six.moves import http_client as httplib from neutron.agent.common import config @@ -82,7 +83,6 @@ def create_process(cmd, run_as_root=False, addl_env=None): cmd = list(map(str, addl_env_args(addl_env) + cmd)) if run_as_root: cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd - LOG.debug("Running command: %s", cmd) obj = utils.subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, @@ -98,7 +98,6 @@ def execute_rootwrap_daemon(cmd, process_input, addl_env): # In practice, no neutron code should be trying to execute something that # would throw those errors, and if it does it should be fixed as opposed to # just logging the execution error. - LOG.debug("Running command (rootwrap daemon): %s", cmd) client = RootwrapDaemonHelper.get_client() return client.execute(cmd, process_input) @@ -107,29 +106,46 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False): try: + if (process_input is None or + isinstance(process_input, six.binary_type)): + _process_input = process_input + else: + _process_input = process_input.encode('utf-8') if run_as_root and cfg.CONF.AGENT.root_helper_daemon: returncode, _stdout, _stderr = ( execute_rootwrap_daemon(cmd, process_input, addl_env)) else: obj, cmd = create_process(cmd, run_as_root=run_as_root, addl_env=addl_env) - _stdout, _stderr = obj.communicate(process_input) + _stdout, _stderr = obj.communicate(_process_input) returncode = obj.returncode obj.stdin.close() + if six.PY3: + if isinstance(_stdout, bytes): + try: + _stdout = _stdout.decode(encoding='utf-8') + except UnicodeError: + pass + if isinstance(_stderr, bytes): + try: + _stderr = _stderr.decode(encoding='utf-8') + except UnicodeError: + pass - m = _("\nCommand: {cmd}\nExit code: {code}\nStdin: {stdin}\n" - "Stdout: {stdout}\nStderr: {stderr}").format( + m = _("\nCommand: {cmd}\nExit code: {code}\n").format( cmd=cmd, - code=returncode, - stdin=process_input or '', - stdout=_stdout, - stderr=_stderr) + code=returncode) extra_ok_codes = extra_ok_codes or [] if returncode and returncode in extra_ok_codes: returncode = None if returncode and log_fail_as_error: + m += ("Stdin: {stdin}\n" + "Stdout: {stdout}\nStderr: {stderr}").format( + stdin=process_input or '', + stdout=_stdout, + stderr=_stderr) LOG.error(m) else: LOG.debug(m) @@ -149,13 +165,15 @@ def get_interface_mac(interface): MAC_START = 18 MAC_END = 24 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - info = fcntl.ioctl(s.fileno(), 0x8927, - struct.pack('256s', interface[:constants.DEVICE_NAME_MAX_LEN])) + dev = interface[:constants.DEVICE_NAME_MAX_LEN] + if isinstance(dev, six.text_type): + dev = dev.encode('utf-8') + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev)) return ''.join(['%02x:' % ord(char) for char in info[MAC_START:MAC_END]])[:-1] -def replace_file(file_name, data): +def replace_file(file_name, data, file_mode=0o644): """Replaces the contents of file_name with data in a safe manner. First write to a temp file and then rename. Since POSIX renames are @@ -168,7 +186,7 @@ def replace_file(file_name, data): tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) tmp_file.write(data) tmp_file.close() - os.chmod(tmp_file.name, 0o644) + os.chmod(tmp_file.name, file_mode) os.rename(tmp_file.name, file_name) diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index 60a571f087c..c458c6d04e6 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -269,9 +269,12 @@ class MetadataProxyHandler(object): raise Exception(_('Unexpected response code: %s') % resp.status) def _sign_instance_id(self, instance_id): - return hmac.new(self.conf.metadata_proxy_shared_secret, - instance_id, - hashlib.sha256).hexdigest() + secret = self.conf.metadata_proxy_shared_secret + if isinstance(secret, six.text_type): + secret = secret.encode('utf-8') + if isinstance(instance_id, six.text_type): + instance_id = instance_id.encode('utf-8') + return hmac.new(secret, instance_id, hashlib.sha256).hexdigest() class UnixDomainMetadataProxy(object): diff --git a/neutron/agent/metadata/namespace_proxy.py b/neutron/agent/metadata/namespace_proxy.py index d68cb2493a5..5cdde8c67fc 100644 --- a/neutron/agent/metadata/namespace_proxy.py +++ b/neutron/agent/metadata/namespace_proxy.py @@ -92,7 +92,7 @@ class NetworkMetadataProxyHandler(object): response = webob.Response() response.status = resp.status response.headers['Content-Type'] = resp['content-type'] - response.body = content + response.body = wsgi.encode_body(content) return response elif resp.status == 400: return webob.exc.HTTPBadRequest() diff --git a/neutron/agent/ovsdb/api.py b/neutron/agent/ovsdb/api.py index 58fb135f552..7dc88d02df9 100644 --- a/neutron/agent/ovsdb/api.py +++ b/neutron/agent/ovsdb/api.py @@ -95,14 +95,16 @@ class API(object): """ @abc.abstractmethod - def add_br(self, name, may_exist=True): + def add_br(self, name, may_exist=True, datapath_type=None): """Create an command to add an OVS bridge - :param name: The name of the bridge - :type name: string - :param may_exist: Do not fail if bridge already exists - :type may_exist: bool - :returns: :class:`Command` with no result + :param name: The name of the bridge + :type name: string + :param may_exist: Do not fail if bridge already exists + :type may_exist: bool + :param datapath_type: The datapath_type of the bridge + :type datapath_type: string + :returns: :class:`Command` with no result """ @abc.abstractmethod @@ -161,6 +163,29 @@ class API(object): :returns: :class:`Command` with field value result """ + @abc.abstractmethod + def db_create(self, table, **col_values): + """Create a command to create new record + + :param table: The OVS table containing the record to be created + :type table: string + :param col_values: The columns and their associated values + to be set after create + :type col_values: Dictionary of columns id's and values + :returns: :class:`Command` with no result + """ + + @abc.abstractmethod + def db_destroy(self, table, record): + """Create a command to destroy a record + + :param table: The OVS table containing the record to be destroyed + :type table: string + :param record: The record id (name/uuid) to be destroyed + :type record: uuid/string + :returns: :class:`Command` with no result + """ + @abc.abstractmethod def db_set(self, table, record, *col_values): """Create a command to set fields in a record diff --git a/neutron/agent/ovsdb/impl_idl.py b/neutron/agent/ovsdb/impl_idl.py index 4edb407c366..c4459b94e86 100644 --- a/neutron/agent/ovsdb/impl_idl.py +++ b/neutron/agent/ovsdb/impl_idl.py @@ -144,8 +144,8 @@ class OvsdbIdl(api.API): self.context.vsctl_timeout, check_error, log_errors) - def add_br(self, name, may_exist=True): - return cmd.AddBridgeCommand(self, name, may_exist) + def add_br(self, name, may_exist=True, datapath_type=None): + return cmd.AddBridgeCommand(self, name, may_exist, datapath_type) def del_br(self, name, if_exists=True): return cmd.DelBridgeCommand(self, name, if_exists) @@ -168,6 +168,12 @@ class OvsdbIdl(api.API): def br_set_external_id(self, name, field, value): return cmd.BrSetExternalIdCommand(self, name, field, value) + def db_create(self, table, **col_values): + return cmd.DbCreateCommand(self, table, **col_values) + + def db_destroy(self, table, record): + return cmd.DbDestroyCommand(self, table, record) + def db_set(self, table, record, *col_values): return cmd.DbSetCommand(self, table, record, *col_values) diff --git a/neutron/agent/ovsdb/impl_vsctl.py b/neutron/agent/ovsdb/impl_vsctl.py index aa00922979f..306f5e48669 100644 --- a/neutron/agent/ovsdb/impl_vsctl.py +++ b/neutron/agent/ovsdb/impl_vsctl.py @@ -160,9 +160,13 @@ class OvsdbVsctl(ovsdb.API): def transaction(self, check_error=False, log_errors=True, **kwargs): return Transaction(self.context, check_error, log_errors, **kwargs) - def add_br(self, name, may_exist=True): + def add_br(self, name, may_exist=True, datapath_type=None): opts = ['--may-exist'] if may_exist else None - return BaseCommand(self.context, 'add-br', opts, [name]) + params = [name] + if datapath_type: + params += ['--', 'set', 'Bridge', name, + 'datapath_type=%s' % datapath_type] + return BaseCommand(self.context, 'add-br', opts, params) def del_br(self, name, if_exists=True): opts = ['--if-exists'] if if_exists else None @@ -184,6 +188,15 @@ class OvsdbVsctl(ovsdb.API): return BaseCommand(self.context, 'br-get-external-id', args=[name, field]) + def db_create(self, table, **col_values): + args = [table] + args += _set_colval_args(*col_values.items()) + return BaseCommand(self.context, 'create', args=args) + + def db_destroy(self, table, record): + args = [table, record] + return BaseCommand(self.context, 'destroy', args=args) + def db_set(self, table, record, *col_values): args = [table, record] args += _set_colval_args(*col_values) @@ -259,8 +272,11 @@ def _set_colval_args(*col_values): col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()] elif (isinstance(val, collections.Sequence) and not isinstance(val, six.string_types)): - args.append( - "%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val)))) + if len(val) == 0: + args.append("%s%s%s" % (col, op, "[]")) + else: + args.append( + "%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val)))) else: args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val))) return args diff --git a/neutron/agent/ovsdb/native/commands.py b/neutron/agent/ovsdb/native/commands.py index 0ae9dd9c296..beb185a5815 100644 --- a/neutron/agent/ovsdb/native/commands.py +++ b/neutron/agent/ovsdb/native/commands.py @@ -50,10 +50,11 @@ class BaseCommand(api.Command): class AddBridgeCommand(BaseCommand): - def __init__(self, api, name, may_exist): + def __init__(self, api, name, may_exist, datapath_type): super(AddBridgeCommand, self).__init__(api) self.name = name self.may_exist = may_exist + self.datapath_type = datapath_type def run_idl(self, txn): if self.may_exist: @@ -63,6 +64,8 @@ class AddBridgeCommand(BaseCommand): return row = txn.insert(self.api._tables['Bridge']) row.name = self.name + if self.datapath_type: + row.datapath_type = self.datapath_type self.api._ovs.verify('bridges') self.api._ovs.bridges = self.api._ovs.bridges + [row] @@ -148,6 +151,30 @@ class BrSetExternalIdCommand(BaseCommand): br.external_ids = external_ids +class DbCreateCommand(BaseCommand): + def __init__(self, api, table, **columns): + super(DbCreateCommand, self).__init__(api) + self.table = table + self.columns = columns + + def run_idl(self, txn): + row = txn.insert(self.api._tables[self.table]) + for col, val in self.columns.items(): + setattr(row, col, val) + self.result = row + + +class DbDestroyCommand(BaseCommand): + def __init__(self, api, table, record): + super(DbDestroyCommand, self).__init__(api) + self.table = table + self.record = record + + def run_idl(self, txn): + record = idlutils.row_by_record(self.api.idl, self.table, self.record) + record.delete() + + class DbSetCommand(BaseCommand): def __init__(self, api, table, record, *col_values): super(DbSetCommand, self).__init__(api) diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py index ec1ad6b2c9f..a0ac9ed3c6f 100644 --- a/neutron/agent/securitygroups_rpc.py +++ b/neutron/agent/securitygroups_rpc.py @@ -110,23 +110,6 @@ class SecurityGroupAgentRpc(object): self.global_refresh_firewall = False self._use_enhanced_rpc = None - def set_local_zone(self, device): - """Set local zone id for device - - In order to separate conntrack in different networks, a local zone - id is needed to generate related iptables rules. This routine sets - zone id to device according to the network it belongs to. For OVS - agent, vlan id of each network can be used as zone id. - - :param device: dictionary of device information, get network id by - device['network_id'], and set zone id by device['zone_id'] - """ - net_id = device['network_id'] - zone_id = None - if self.local_vlan_map and net_id in self.local_vlan_map: - zone_id = self.local_vlan_map[net_id].vlan - device['zone_id'] = zone_id - @property def use_enhanced_rpc(self): if self._use_enhanced_rpc is None: @@ -176,7 +159,6 @@ class SecurityGroupAgentRpc(object): with self.firewall.defer_apply(): for device in devices.values(): - self.set_local_zone(device) self.firewall.prepare_port_filter(device) if self.use_enhanced_rpc: LOG.debug("Update security group information for ports %s", @@ -267,7 +249,6 @@ class SecurityGroupAgentRpc(object): with self.firewall.defer_apply(): for device in devices.values(): LOG.debug("Update port filter for %s", device['device']) - self.set_local_zone(device) self.firewall.update_port_filter(device) if self.use_enhanced_rpc: LOG.debug("Update security group information for ports %s", diff --git a/neutron/agent/windows/utils.py b/neutron/agent/windows/utils.py index 5221534a63b..bcbccd3bcd1 100644 --- a/neutron/agent/windows/utils.py +++ b/neutron/agent/windows/utils.py @@ -18,6 +18,7 @@ import os from eventlet.green import subprocess from eventlet import greenthread from oslo_log import log as logging +import six from neutron.common import utils @@ -45,12 +46,29 @@ def create_process(cmd, addl_env=None): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, - extra_ok_codes=None, run_as_root=False): + extra_ok_codes=None, run_as_root=False, do_decode=True): try: + if (process_input is None or + isinstance(process_input, six.binary_type)): + _process_input = process_input + else: + _process_input = process_input.encode('utf-8') obj, cmd = create_process(cmd, addl_env=addl_env) - _stdout, _stderr = obj.communicate(process_input) + _stdout, _stderr = obj.communicate(_process_input) obj.stdin.close() + if six.PY3: + if isinstance(_stdout, bytes): + try: + _stdout = _stdout.decode(encoding='utf-8') + except UnicodeError: + pass + if isinstance(_stderr, bytes): + try: + _stderr = _stderr.decode(encoding='utf-8') + except UnicodeError: + pass + m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n" "Stdout: %(stdout)s\nStderr: %(stderr)s") % \ {'cmd': cmd, diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index 595c592bd72..5b7032092cc 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -273,7 +273,17 @@ class SortingEmulatedHelper(SortingHelper): def sort(self, items): def cmp_func(obj1, obj2): for key, direction in self.sort_dict: - ret = (obj1[key] > obj2[key]) - (obj1[key] < obj2[key]) + o1 = obj1[key] + o2 = obj2[key] + + if o1 is None and o2 is None: + ret = 0 + elif o1 is None and o2 is not None: + ret = -1 + elif o1 is not None and o2 is None: + ret = 1 + else: + ret = (o1 > o2) - (o1 < o2) if ret: return ret * (1 if direction else -1) return 0 diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index 27b8a0d4dcb..407f702c073 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -17,7 +17,6 @@ import abc import collections import imp -import itertools import os from oslo_config import cfg @@ -560,10 +559,7 @@ class PluginAwareExtensionManager(ExtensionManager): def _plugins_support(self, extension): alias = extension.get_alias() - supports_extension = any((hasattr(plugin, - "supported_extension_aliases") and - alias in plugin.supported_extension_aliases) - for plugin in self.plugins.values()) + supports_extension = alias in self.get_supported_extension_aliases() if not supports_extension: LOG.warn(_LW("Extension %s not supported by any of loaded " "plugins"), @@ -588,11 +584,25 @@ class PluginAwareExtensionManager(ExtensionManager): manager.NeutronManager.get_service_plugins()) return cls._instance + def get_supported_extension_aliases(self): + """Gets extension aliases supported by all plugins.""" + aliases = set() + for plugin in self.plugins.values(): + # we also check all classes that the plugins inherit to see if they + # directly provide support for an extension + for item in [plugin] + plugin.__class__.mro(): + try: + aliases |= set( + getattr(item, "supported_extension_aliases", [])) + except TypeError: + # we land here if a class has an @property decorator for + # supported extension aliases. They only work on objects. + pass + return aliases + def check_if_plugin_extensions_loaded(self): """Check if an extension supported by a plugin has been loaded.""" - plugin_extensions = set(itertools.chain.from_iterable([ - getattr(plugin, "supported_extension_aliases", []) - for plugin in self.plugins.values()])) + plugin_extensions = self.get_supported_extension_aliases() missing_aliases = plugin_extensions - set(self.extensions) if missing_aliases: raise exceptions.ExtensionsNotFound( diff --git a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py index c0a7160c02b..fb5b3d0467c 100644 --- a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py @@ -100,7 +100,7 @@ class L3AgentNotifyAPI(object): cctxt.cast(context, method, payload=dvr_arptable) def _notification(self, context, method, router_ids, operation, - shuffle_agents): + shuffle_agents, schedule_routers=True): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) @@ -112,7 +112,8 @@ class L3AgentNotifyAPI(object): plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): adminContext = (context.is_admin and context or context.elevated()) - plugin.schedule_routers(adminContext, router_ids) + if schedule_routers: + plugin.schedule_routers(adminContext, router_ids) self._agent_notification( context, method, router_ids, operation, shuffle_agents) else: @@ -138,10 +139,10 @@ class L3AgentNotifyAPI(object): self._notification_fanout(context, 'router_deleted', router_id) def routers_updated(self, context, router_ids, operation=None, data=None, - shuffle_agents=False): + shuffle_agents=False, schedule_routers=True): if router_ids: self._notification(context, 'routers_updated', router_ids, - operation, shuffle_agents) + operation, shuffle_agents, schedule_routers) def add_arp_entry(self, context, router_id, arp_table, operation=None): self._agent_notification_arp(context, 'add_arp_entry', router_id, diff --git a/neutron/plugins/ibm/common/__init__.py b/neutron/api/rpc/callbacks/__init__.py similarity index 100% rename from neutron/plugins/ibm/common/__init__.py rename to neutron/api/rpc/callbacks/__init__.py diff --git a/neutron/plugins/ml2/drivers/cisco/ncs/__init__.py b/neutron/api/rpc/callbacks/consumer/__init__.py similarity index 100% rename from neutron/plugins/ml2/drivers/cisco/ncs/__init__.py rename to neutron/api/rpc/callbacks/consumer/__init__.py diff --git a/neutron/api/rpc/callbacks/consumer/registry.py b/neutron/api/rpc/callbacks/consumer/registry.py new file mode 100644 index 00000000000..3f6c5754f05 --- /dev/null +++ b/neutron/api/rpc/callbacks/consumer/registry.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.api.rpc.callbacks import resource_manager + + +LOG = logging.getLogger(__name__) + + +#TODO(ajo): consider adding locking to _get_manager, it's +# safe for eventlet, but not for normal threading. +def _get_manager(): + return resource_manager.ConsumerResourceCallbacksManager() + + +def subscribe(callback, resource_type): + _get_manager().register(callback, resource_type) + + +def unsubscribe(callback, resource_type): + _get_manager().unregister(callback, resource_type) + + +def push(resource_type, resource, event_type): + """Push resource events into all registered callbacks for the type.""" + + callbacks = _get_manager().get_callbacks(resource_type) + for callback in callbacks: + callback(resource, event_type) + + +def clear(): + _get_manager().clear() diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py b/neutron/api/rpc/callbacks/events.py similarity index 66% rename from neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py rename to neutron/api/rpc/callbacks/events.py index fac9503df8c..485a1bc801e 100644 --- a/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py +++ b/neutron/api/rpc/callbacks/events.py @@ -1,6 +1,3 @@ -# Copyright 2013 OpenStack Foundation -# All rights reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,12 +10,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" -ML2 Mechanism Driver for Cisco Nexus platforms. -""" +CREATED = 'created' +UPDATED = 'updated' +DELETED = 'deleted' -from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus - - -class CiscoNexusMechanismDriver(mech_cisco_nexus.CiscoNexusMechanismDriver): - pass +VALID = ( + CREATED, + UPDATED, + DELETED +) diff --git a/neutron/plugins/ibm/common/exceptions.py b/neutron/api/rpc/callbacks/exceptions.py similarity index 61% rename from neutron/plugins/ibm/common/exceptions.py rename to neutron/api/rpc/callbacks/exceptions.py index 26298bae131..9e17474db08 100644 --- a/neutron/plugins/ibm/common/exceptions.py +++ b/neutron/api/rpc/callbacks/exceptions.py @@ -1,7 +1,3 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -17,10 +13,13 @@ from neutron.common import exceptions -class SdnveException(exceptions.NeutronException): - message = _("An unexpected error occurred in the SDN-VE Plugin. " - "Here is the error message: %(msg)s") +class CallbackWrongResourceType(exceptions.NeutronException): + message = _('Callback for %(resource_type)s returned wrong resource type') -class BadInputException(exceptions.BadRequest): - message = _("The input does not contain nececessary info: %(msg)s") +class CallbackNotFound(exceptions.NeutronException): + message = _('Callback for %(resource_type)s not found') + + +class CallbacksMaxLimitReached(exceptions.NeutronException): + message = _("Cannot add multiple callbacks for %(resource_type)s") diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py b/neutron/api/rpc/callbacks/producer/__init__.py similarity index 100% rename from neutron/plugins/ml2/drivers/cisco/nexus/__init__.py rename to neutron/api/rpc/callbacks/producer/__init__.py diff --git a/neutron/api/rpc/callbacks/producer/registry.py b/neutron/api/rpc/callbacks/producer/registry.py new file mode 100644 index 00000000000..b19a8bfd501 --- /dev/null +++ b/neutron/api/rpc/callbacks/producer/registry.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.api.rpc.callbacks import exceptions +from neutron.api.rpc.callbacks import resource_manager +from neutron.objects import base + + +LOG = logging.getLogger(__name__) + + +# TODO(ajo): consider adding locking: it's safe for eventlet but not +# for other types of threading. +def _get_manager(): + return resource_manager.ProducerResourceCallbacksManager() + + +def provide(callback, resource_type): + """Register a callback as a producer for the resource type. + + This callback will be used to produce resources of corresponding type for + interested parties. + """ + _get_manager().register(callback, resource_type) + + +def unprovide(callback, resource_type): + """Unregister a callback for corresponding resource type.""" + _get_manager().unregister(callback, resource_type) + + +def clear(): + """Clear all callbacks.""" + _get_manager().clear() + + +def pull(resource_type, resource_id, **kwargs): + """Get resource object that corresponds to resource id. + + The function will return an object that is provided by resource producer. + + :returns: NeutronObject + """ + callback = _get_manager().get_callback(resource_type) + obj = callback(resource_type, resource_id, **kwargs) + if obj: + if (not isinstance(obj, base.NeutronObject) or + resource_type != obj.obj_name()): + raise exceptions.CallbackWrongResourceType( + resource_type=resource_type) + return obj diff --git a/neutron/api/rpc/callbacks/resource_manager.py b/neutron/api/rpc/callbacks/resource_manager.py new file mode 100644 index 00000000000..63f89803358 --- /dev/null +++ b/neutron/api/rpc/callbacks/resource_manager.py @@ -0,0 +1,139 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import collections + +from oslo_log import log as logging +import six + +from neutron.api.rpc.callbacks import exceptions as rpc_exc +from neutron.api.rpc.callbacks import resources +from neutron.callbacks import exceptions + +LOG = logging.getLogger(__name__) + +# TODO(QoS): split the registry/resources_rpc modules into two separate things: +# one for pull and one for push APIs + + +def _validate_resource_type(resource_type): + if not resources.is_valid_resource_type(resource_type): + raise exceptions.Invalid(element='resource', value=resource_type) + + +@six.add_metaclass(abc.ABCMeta) +class ResourceCallbacksManager(object): + """A callback system that allows information providers in a loose manner. + """ + + # This hook is to allow tests to get new objects for the class + _singleton = True + + def __new__(cls, *args, **kwargs): + if not cls._singleton: + return super(ResourceCallbacksManager, cls).__new__(cls) + + if not hasattr(cls, '_instance'): + cls._instance = super(ResourceCallbacksManager, cls).__new__(cls) + return cls._instance + + @abc.abstractmethod + def _add_callback(self, callback, resource_type): + pass + + @abc.abstractmethod + def _delete_callback(self, callback, resource_type): + pass + + def register(self, callback, resource_type): + """Register a callback for a resource type. + + :param callback: the callback. It must raise or return NeutronObject. + :param resource_type: must be a valid resource type. + """ + LOG.debug("Registering callback for %s", resource_type) + _validate_resource_type(resource_type) + self._add_callback(callback, resource_type) + + def unregister(self, callback, resource_type): + """Unregister callback from the registry. + + :param callback: the callback. + :param resource_type: must be a valid resource type. + """ + LOG.debug("Unregistering callback for %s", resource_type) + _validate_resource_type(resource_type) + self._delete_callback(callback, resource_type) + + @abc.abstractmethod + def clear(self): + """Brings the manager to a clean state.""" + + def get_subscribed_types(self): + return list(self._callbacks.keys()) + + +class ProducerResourceCallbacksManager(ResourceCallbacksManager): + + _callbacks = dict() + + def _add_callback(self, callback, resource_type): + if resource_type in self._callbacks: + raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type) + self._callbacks[resource_type] = callback + + def _delete_callback(self, callback, resource_type): + try: + del self._callbacks[resource_type] + except KeyError: + raise rpc_exc.CallbackNotFound(resource_type=resource_type) + + def clear(self): + self._callbacks = dict() + + def get_callback(self, resource_type): + _validate_resource_type(resource_type) + try: + return self._callbacks[resource_type] + except KeyError: + raise rpc_exc.CallbackNotFound(resource_type=resource_type) + + +class ConsumerResourceCallbacksManager(ResourceCallbacksManager): + + _callbacks = collections.defaultdict(set) + + def _add_callback(self, callback, resource_type): + self._callbacks[resource_type].add(callback) + + def _delete_callback(self, callback, resource_type): + try: + self._callbacks[resource_type].remove(callback) + if not self._callbacks[resource_type]: + del self._callbacks[resource_type] + except KeyError: + raise rpc_exc.CallbackNotFound(resource_type=resource_type) + + def clear(self): + self._callbacks = collections.defaultdict(set) + + def get_callbacks(self, resource_type): + """Return the callback if found, None otherwise. + + :param resource_type: must be a valid resource type. + """ + _validate_resource_type(resource_type) + callbacks = self._callbacks[resource_type] + if not callbacks: + raise rpc_exc.CallbackNotFound(resource_type=resource_type) + return callbacks diff --git a/neutron/api/rpc/callbacks/resources.py b/neutron/api/rpc/callbacks/resources.py new file mode 100644 index 00000000000..bde7aed9a7e --- /dev/null +++ b/neutron/api/rpc/callbacks/resources.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.objects.qos import policy + + +_QOS_POLICY_CLS = policy.QosPolicy + +_VALID_CLS = ( + _QOS_POLICY_CLS, +) + +_VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS] + + +# Supported types +QOS_POLICY = _QOS_POLICY_CLS.obj_name() + + +_TYPE_TO_CLS_MAP = { + QOS_POLICY: _QOS_POLICY_CLS, +} + + +def get_resource_type(resource_cls): + if not resource_cls: + return None + + if not hasattr(resource_cls, 'obj_name'): + return None + + return resource_cls.obj_name() + + +def is_valid_resource_type(resource_type): + return resource_type in _VALID_TYPES + + +def get_resource_cls(resource_type): + return _TYPE_TO_CLS_MAP.get(resource_type) diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index bba9f2341fa..9eb23f8eb79 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -30,8 +30,10 @@ from neutron.db import api as db_api from neutron.extensions import portbindings from neutron.i18n import _LW from neutron import manager +from neutron.plugins.common import utils as p_utils from neutron.quota import resource_registry + LOG = logging.getLogger(__name__) @@ -77,7 +79,7 @@ class DhcpRpcCallback(object): """Perform port operations taking care of concurrency issues.""" try: if action == 'create_port': - return plugin.create_port(context, port) + return p_utils.create_port(plugin, context, port) elif action == 'update_port': return plugin.update_port(context, port['id'], port) else: diff --git a/neutron/api/rpc/handlers/dvr_rpc.py b/neutron/api/rpc/handlers/dvr_rpc.py index 8b6574707e8..02909b6f70a 100644 --- a/neutron/api/rpc/handlers/dvr_rpc.py +++ b/neutron/api/rpc/handlers/dvr_rpc.py @@ -32,6 +32,9 @@ class DVRServerRpcApi(object): can be found below: DVRServerRpcCallback. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ + # 1.0 Initial Version + # 1.1 Support for passing 'fixed_ips' in get_subnet_for_dvr function. + # Passing 'subnet" will be deprecated in the next release. def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0', @@ -55,9 +58,10 @@ class DVRServerRpcApi(object): host=host, subnet=subnet) @log_helpers.log_method_call - def get_subnet_for_dvr(self, context, subnet): + def get_subnet_for_dvr(self, context, subnet, fixed_ips): cctxt = self.client.prepare() - return cctxt.call(context, 'get_subnet_for_dvr', subnet=subnet) + return cctxt.call( + context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips) class DVRServerRpcCallback(object): @@ -70,8 +74,10 @@ class DVRServerRpcCallback(object): # History # 1.0 Initial version + # 1.1 Support for passing the 'fixed_ips" in get_subnet_for_dvr. + # Passing subnet will be deprecated in the next release. - target = oslo_messaging.Target(version='1.0', + target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_DVR) @property @@ -96,8 +102,10 @@ class DVRServerRpcCallback(object): host, subnet) def get_subnet_for_dvr(self, context, **kwargs): + fixed_ips = kwargs.get('fixed_ips') subnet = kwargs.get('subnet') - return self.plugin.get_subnet_for_dvr(context, subnet) + return self.plugin.get_subnet_for_dvr( + context, subnet, fixed_ips=fixed_ips) class DVRAgentRpcApiMixin(object): diff --git a/neutron/api/rpc/handlers/l3_rpc.py b/neutron/api/rpc/handlers/l3_rpc.py index 176ddb22974..6c2ca53f2ab 100644 --- a/neutron/api/rpc/handlers/l3_rpc.py +++ b/neutron/api/rpc/handlers/l3_rpc.py @@ -98,13 +98,16 @@ class L3RpcCallback(object): LOG.debug("Checking router: %(id)s for host: %(host)s", {'id': router['id'], 'host': host}) if router.get('gw_port') and router.get('distributed'): + # '' is used to effectively clear binding of a gw port if not + # bound (snat is not hosted on any l3 agent) + gw_port_host = router.get('gw_port_host') or '' self._ensure_host_set_on_port(context, - router.get('gw_port_host'), + gw_port_host, router.get('gw_port'), router['id']) for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []): self._ensure_host_set_on_port(context, - router.get('gw_port_host'), + gw_port_host, p, router['id']) else: self._ensure_host_set_on_port( @@ -143,6 +146,8 @@ class L3RpcCallback(object): context, port['id'], {'port': {portbindings.HOST_ID: host}}) + # updating port's host to pass actual info to l3 agent + port[portbindings.HOST_ID] = host except exceptions.PortNotFound: LOG.debug("Port %(port)s not found while updating " "agent binding for router %(router)s.", @@ -269,6 +274,10 @@ class L3RpcCallback(object): def process_prefix_update(self, context, **kwargs): subnets = kwargs.get('subnets') + updated_subnets = [] for subnet_id, prefix in subnets.items(): - self.plugin.update_subnet(context, subnet_id, - {'subnet': {'cidr': prefix}}) + updated_subnets.append(self.plugin.update_subnet( + context, + subnet_id, + {'subnet': {'cidr': prefix}})) + return updated_subnets diff --git a/neutron/api/rpc/handlers/resources_rpc.py b/neutron/api/rpc/handlers/resources_rpc.py new file mode 100755 index 00000000000..55344a81104 --- /dev/null +++ b/neutron/api/rpc/handlers/resources_rpc.py @@ -0,0 +1,174 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +import oslo_messaging + +from neutron.api.rpc.callbacks.consumer import registry as cons_registry +from neutron.api.rpc.callbacks.producer import registry as prod_registry +from neutron.api.rpc.callbacks import resources +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.objects import base as obj_base + + +LOG = logging.getLogger(__name__) + + +class ResourcesRpcError(exceptions.NeutronException): + pass + + +class InvalidResourceTypeClass(ResourcesRpcError): + message = _("Invalid resource type %(resource_type)s") + + +class ResourceNotFound(ResourcesRpcError): + message = _("Resource %(resource_id)s of type %(resource_type)s " + "not found") + + +def _validate_resource_type(resource_type): + if not resources.is_valid_resource_type(resource_type): + raise InvalidResourceTypeClass(resource_type=resource_type) + + +def resource_type_versioned_topic(resource_type): + _validate_resource_type(resource_type) + cls = resources.get_resource_cls(resource_type) + return topics.RESOURCE_TOPIC_PATTERN % {'resource_type': resource_type, + 'version': cls.VERSION} + + +class ResourcesPullRpcApi(object): + """Agent-side RPC (stub) for agent-to-plugin interaction. + + This class implements the client side of an rpc interface. The server side + can be found below: ResourcesPullRpcCallback. For more information on + this RPC interface, see doc/source/devref/rpc_callbacks.rst. + """ + + def __new__(cls): + # make it a singleton + if not hasattr(cls, '_instance'): + cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls) + target = oslo_messaging.Target( + topic=topics.PLUGIN, version='1.0', + namespace=constants.RPC_NAMESPACE_RESOURCES) + cls._instance.client = n_rpc.get_client(target) + return cls._instance + + @log_helpers.log_method_call + def pull(self, context, resource_type, resource_id): + _validate_resource_type(resource_type) + + # we've already validated the resource type, so we are pretty sure the + # class is there => no need to validate it specifically + resource_type_cls = resources.get_resource_cls(resource_type) + + cctxt = self.client.prepare() + primitive = cctxt.call(context, 'pull', + resource_type=resource_type, + version=resource_type_cls.VERSION, resource_id=resource_id) + + if primitive is None: + raise ResourceNotFound(resource_type=resource_type, + resource_id=resource_id) + + return resource_type_cls.clean_obj_from_primitive(primitive) + + +class ResourcesPullRpcCallback(object): + """Plugin-side RPC (implementation) for agent-to-plugin interaction. + + This class implements the server side of an rpc interface. The client side + can be found above: ResourcesPullRpcApi. For more information on + this RPC interface, see doc/source/devref/rpc_callbacks.rst. + """ + + # History + # 1.0 Initial version + + target = oslo_messaging.Target( + version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) + + def pull(self, context, resource_type, version, resource_id): + obj = prod_registry.pull(resource_type, resource_id, context=context) + if obj: + #TODO(QoS): Remove in the future with new version of + # versionedobjects containing + # https://review.openstack.org/#/c/207998/ + if version == obj.VERSION: + version = None + return obj.obj_to_primitive(target_version=version) + + +class ResourcesPushRpcApi(object): + """Plugin-side RPC for plugin-to-agents interaction. + + This interface is designed to push versioned object updates to interested + agents using fanout topics. + + This class implements the caller side of an rpc interface. The receiver + side can be found below: ResourcesPushRpcCallback. + """ + + def __init__(self): + target = oslo_messaging.Target( + version='1.0', + namespace=constants.RPC_NAMESPACE_RESOURCES) + self.client = n_rpc.get_client(target) + + def _prepare_object_fanout_context(self, obj): + """Prepare fanout context, one topic per object type.""" + obj_topic = resource_type_versioned_topic(obj.obj_name()) + return self.client.prepare(fanout=True, topic=obj_topic) + + @log_helpers.log_method_call + def push(self, context, resource, event_type): + resource_type = resources.get_resource_type(resource) + _validate_resource_type(resource_type) + cctxt = self._prepare_object_fanout_context(resource) + #TODO(QoS): Push notifications for every known version once we have + # multiple of those + dehydrated_resource = resource.obj_to_primitive() + cctxt.cast(context, 'push', + resource=dehydrated_resource, + event_type=event_type) + + +class ResourcesPushRpcCallback(object): + """Agent-side RPC for plugin-to-agents interaction. + + This class implements the receiver for notification about versioned objects + resource updates used by neutron.api.rpc.callbacks. You can find the + caller side in ResourcesPushRpcApi. + """ + # History + # 1.0 Initial version + + target = oslo_messaging.Target(version='1.0', + namespace=constants.RPC_NAMESPACE_RESOURCES) + + def push(self, context, resource, event_type): + resource_obj = obj_base.NeutronObject.clean_obj_from_primitive( + resource) + LOG.debug("Resources notification (%(event_type)s): %(resource)s", + {'event_type': event_type, 'resource': repr(resource_obj)}) + resource_type = resources.get_resource_type(resource_obj) + cons_registry.push(resource_type, resource_obj, event_type) diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py index ff0165be431..c2ffe7c462b 100644 --- a/neutron/api/v2/attributes.py +++ b/neutron/api/v2/attributes.py @@ -19,6 +19,7 @@ import netaddr from oslo_log import log as logging from oslo_utils import uuidutils import six +import webob.exc from neutron.common import constants from neutron.common import exceptions as n_exc @@ -170,6 +171,10 @@ def _validate_mac_address(data, valid_values=None): valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) except Exception: valid_mac = False + + if valid_mac: + valid_mac = not netaddr.EUI(data) in map(netaddr.EUI, + constants.INVALID_MAC_ADDRESSES) # TODO(arosen): The code in this file should be refactored # so it catches the correct exceptions. _validate_no_whitespace # raises AttributeError if data is None. @@ -825,7 +830,7 @@ RESOURCE_ATTRIBUTE_MAP = { 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, + 'validate': {'type:string': TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'prefixes': {'allow_post': True, @@ -884,3 +889,65 @@ PLURALS = {NETWORKS: NETWORK, 'allocation_pools': 'allocation_pool', 'fixed_ips': 'fixed_ip', 'extensions': 'extension'} + + +def fill_default_value(attr_info, res_dict, + exc_cls=ValueError, + check_allow_post=True): + for attr, attr_vals in six.iteritems(attr_info): + if attr_vals['allow_post']: + if ('default' not in attr_vals and + attr not in res_dict): + msg = _("Failed to parse request. Required " + "attribute '%s' not specified") % attr + raise exc_cls(msg) + res_dict[attr] = res_dict.get(attr, + attr_vals.get('default')) + elif check_allow_post: + if attr in res_dict: + msg = _("Attribute '%s' not allowed in POST") % attr + raise exc_cls(msg) + + +def convert_value(attr_info, res_dict, exc_cls=ValueError): + for attr, attr_vals in six.iteritems(attr_info): + if (attr not in res_dict or + res_dict[attr] is ATTR_NOT_SPECIFIED): + continue + # Convert values if necessary + if 'convert_to' in attr_vals: + res_dict[attr] = attr_vals['convert_to'](res_dict[attr]) + # Check that configured values are correct + if 'validate' not in attr_vals: + continue + for rule in attr_vals['validate']: + res = validators[rule](res_dict[attr], attr_vals['validate'][rule]) + if res: + msg_dict = dict(attr=attr, reason=res) + msg = _("Invalid input for %(attr)s. " + "Reason: %(reason)s.") % msg_dict + raise exc_cls(msg) + + +def populate_tenant_id(context, res_dict, attr_info, is_create): + if (('tenant_id' in res_dict and + res_dict['tenant_id'] != context.tenant_id and + not context.is_admin)): + msg = _("Specifying 'tenant_id' other than authenticated " + "tenant in request requires admin privileges") + raise webob.exc.HTTPBadRequest(msg) + + if is_create and 'tenant_id' not in res_dict: + if context.tenant_id: + res_dict['tenant_id'] = context.tenant_id + elif 'tenant_id' in attr_info: + msg = _("Running without keystone AuthN requires " + "that tenant_id is specified") + raise webob.exc.HTTPBadRequest(msg) + + +def verify_attributes(res_dict, attr_info): + extra_keys = set(res_dict.keys()) - set(attr_info.keys()) + if extra_keys: + msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys) + raise webob.exc.HTTPBadRequest(msg) diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index cd591b4f9ea..69a88d230b2 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -194,7 +194,12 @@ class Controller(object): policy.init() # Fetch the resource and verify if the user can access it try: - resource = self._item(request, id, True) + parent_id = kwargs.get(self._parent_id_name) + resource = self._item(request, + id, + do_authz=True, + field_list=None, + parent_id=parent_id) except oslo_policy.PolicyNotAuthorized: msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) @@ -596,23 +601,6 @@ class Controller(object): self._send_nova_notification(action, orig_object_copy, result) return result - @staticmethod - def _populate_tenant_id(context, res_dict, attr_info, is_create): - if (('tenant_id' in res_dict and - res_dict['tenant_id'] != context.tenant_id and - not context.is_admin)): - msg = _("Specifying 'tenant_id' other than authenticated " - "tenant in request requires admin privileges") - raise webob.exc.HTTPBadRequest(msg) - - if is_create and 'tenant_id' not in res_dict: - if context.tenant_id: - res_dict['tenant_id'] = context.tenant_id - elif 'tenant_id' in attr_info: - msg = _("Running without keystone AuthN requires " - "that tenant_id is specified") - raise webob.exc.HTTPBadRequest(msg) - @staticmethod def prepare_request_body(context, body, is_create, resource, attr_info, allow_bulk=False): @@ -652,56 +640,21 @@ class Controller(object): msg = _("Unable to find '%s' in request body") % resource raise webob.exc.HTTPBadRequest(msg) - Controller._populate_tenant_id(context, res_dict, attr_info, is_create) - Controller._verify_attributes(res_dict, attr_info) + attributes.populate_tenant_id(context, res_dict, attr_info, is_create) + attributes.verify_attributes(res_dict, attr_info) if is_create: # POST - for attr, attr_vals in six.iteritems(attr_info): - if attr_vals['allow_post']: - if ('default' not in attr_vals and - attr not in res_dict): - msg = _("Failed to parse request. Required " - "attribute '%s' not specified") % attr - raise webob.exc.HTTPBadRequest(msg) - res_dict[attr] = res_dict.get(attr, - attr_vals.get('default')) - else: - if attr in res_dict: - msg = _("Attribute '%s' not allowed in POST") % attr - raise webob.exc.HTTPBadRequest(msg) + attributes.fill_default_value(attr_info, res_dict, + webob.exc.HTTPBadRequest) else: # PUT for attr, attr_vals in six.iteritems(attr_info): if attr in res_dict and not attr_vals['allow_put']: msg = _("Cannot update read-only attribute %s") % attr raise webob.exc.HTTPBadRequest(msg) - for attr, attr_vals in six.iteritems(attr_info): - if (attr not in res_dict or - res_dict[attr] is attributes.ATTR_NOT_SPECIFIED): - continue - # Convert values if necessary - if 'convert_to' in attr_vals: - res_dict[attr] = attr_vals['convert_to'](res_dict[attr]) - # Check that configured values are correct - if 'validate' not in attr_vals: - continue - for rule in attr_vals['validate']: - res = attributes.validators[rule](res_dict[attr], - attr_vals['validate'][rule]) - if res: - msg_dict = dict(attr=attr, reason=res) - msg = _("Invalid input for %(attr)s. " - "Reason: %(reason)s.") % msg_dict - raise webob.exc.HTTPBadRequest(msg) + attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest) return body - @staticmethod - def _verify_attributes(res_dict, attr_info): - extra_keys = set(res_dict.keys()) - set(attr_info.keys()) - if extra_keys: - msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys) - raise webob.exc.HTTPBadRequest(msg) - def _validate_network_tenant_ownership(self, request, resource_item): # TODO(salvatore-orlando): consider whether this check can be folded # in the policy engine diff --git a/neutron/api/versions.py b/neutron/api/versions.py index 3d9cfec6c38..857dd6eca28 100644 --- a/neutron/api/versions.py +++ b/neutron/api/versions.py @@ -57,6 +57,6 @@ class Versions(object): response = webob.Response() response.content_type = content_type - response.body = body + response.body = wsgi.encode_body(body) return response diff --git a/neutron/callbacks/events.py b/neutron/callbacks/events.py index 2abc57ce128..7dfd83d5e8e 100644 --- a/neutron/callbacks/events.py +++ b/neutron/callbacks/events.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +# String literals representing core events. BEFORE_CREATE = 'before_create' BEFORE_READ = 'before_read' BEFORE_UPDATE = 'before_update' @@ -27,18 +28,3 @@ ABORT_DELETE = 'abort_delete' ABORT = 'abort_' BEFORE = 'before_' - -VALID = ( - BEFORE_CREATE, - BEFORE_READ, - BEFORE_UPDATE, - BEFORE_DELETE, - AFTER_CREATE, - AFTER_READ, - AFTER_UPDATE, - AFTER_DELETE, - ABORT_CREATE, - ABORT_READ, - ABORT_UPDATE, - ABORT_DELETE, -) diff --git a/neutron/callbacks/manager.py b/neutron/callbacks/manager.py index 4927ff337f6..c5b97e9af73 100644 --- a/neutron/callbacks/manager.py +++ b/neutron/callbacks/manager.py @@ -17,7 +17,6 @@ from oslo_utils import reflection from neutron.callbacks import events from neutron.callbacks import exceptions -from neutron.callbacks import resources from neutron.i18n import _LE LOG = logging.getLogger(__name__) @@ -40,13 +39,15 @@ class CallbacksManager(object): """ LOG.debug("Subscribe: %(callback)s %(resource)s %(event)s", {'callback': callback, 'resource': resource, 'event': event}) - if resource not in resources.VALID: - raise exceptions.Invalid(element='resource', value=resource) - if event not in events.VALID: - raise exceptions.Invalid(element='event', value=event) callback_id = _get_id(callback) - self._callbacks[resource][event][callback_id] = callback + try: + self._callbacks[resource][event][callback_id] = callback + except KeyError: + # Initialize the registry for unknown resources and/or events + # prior to enlisting the callback. + self._callbacks[resource][event] = {} + self._callbacks[resource][event][callback_id] = callback # We keep a copy of callbacks to speed the unsubscribe operation. if callback_id not in self._index: self._index[callback_id] = collections.defaultdict(set) @@ -125,9 +126,6 @@ class CallbacksManager(object): """Brings the manager to a clean slate.""" self._callbacks = collections.defaultdict(dict) self._index = collections.defaultdict(dict) - for resource in resources.VALID: - for event in events.VALID: - self._callbacks[resource][event] = collections.defaultdict() def _notify_loop(self, resource, event, trigger, **kwargs): """The notification loop.""" @@ -135,8 +133,9 @@ class CallbacksManager(object): {'resource': resource, 'event': event}) errors = [] + callbacks = self._callbacks[resource].get(event, {}).items() # TODO(armax): consider using a GreenPile - for callback_id, callback in self._callbacks[resource][event].items(): + for callback_id, callback in callbacks: try: LOG.debug("Calling callback %s", callback_id) callback(resource, event, trigger, **kwargs) diff --git a/neutron/callbacks/resources.py b/neutron/callbacks/resources.py index d796faf4960..1544fe5a4b3 100644 --- a/neutron/callbacks/resources.py +++ b/neutron/callbacks/resources.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +# String literals representing core resources. PORT = 'port' ROUTER = 'router' ROUTER_GATEWAY = 'router_gateway' @@ -17,13 +18,3 @@ ROUTER_INTERFACE = 'router_interface' SECURITY_GROUP = 'security_group' SECURITY_GROUP_RULE = 'security_group_rule' SUBNET = 'subnet' - -VALID = ( - PORT, - ROUTER, - ROUTER_GATEWAY, - ROUTER_INTERFACE, - SECURITY_GROUP, - SECURITY_GROUP_RULE, - SUBNET, -) diff --git a/neutron/cmd/pd_notify.py b/neutron/cmd/pd_notify.py new file mode 100644 index 00000000000..02f5fdcfe63 --- /dev/null +++ b/neutron/cmd/pd_notify.py @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Cisco Systems. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import signal +import sys + +from neutron.common import utils + + +def main(): + """Expected arguments: + sys.argv[1] - The add/update/delete operation performed by the PD agent + sys.argv[2] - The file where the new prefix should be written + sys.argv[3] - The process ID of the L3 agent to be notified of this change + """ + operation = sys.argv[1] + prefix_fname = sys.argv[2] + agent_pid = sys.argv[3] + prefix = os.getenv('PREFIX1', "::") + + if operation == "add" or operation == "update": + utils.replace_file(prefix_fname, "%s/64" % prefix) + elif operation == "delete": + utils.replace_file(prefix_fname, "::/64") + os.kill(int(agent_pid), signal.SIGHUP) diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py index 37f0947f2db..819d00c23e2 100644 --- a/neutron/cmd/sanity/checks.py +++ b/neutron/cmd/sanity/checks.py @@ -42,6 +42,7 @@ LOG = logging.getLogger(__name__) MINIMUM_DNSMASQ_VERSION = 2.67 +MINIMUM_DIBBLER_VERSION = '1.0.1' def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'): @@ -51,6 +52,13 @@ def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'): return port != ovs_lib.INVALID_OFPORT +def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'): + name = "genevetest-" + utils.get_random_string(6) + with ovs_lib.OVSBridge(name) as br: + port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_GENEVE) + return port != ovs_lib.INVALID_OFPORT + + def iproute2_vxlan_supported(): ip = ip_lib.IPWrapper() name = "vxlantest-" + utils.get_random_string(4) @@ -127,22 +135,24 @@ def arp_header_match_supported(): def vf_management_supported(): + is_supported = True required_caps = ( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE, - ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK) + ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, + ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) try: vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section() for cap in required_caps: if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported( vf_section, cap): + is_supported = False LOG.debug("ip link command does not support " "vf capability '%(cap)s'", cap) - return False except ip_link_support.UnsupportedIpLinkCommand: LOG.exception(_LE("Unexpected exception while checking supported " "ip link command")) return False - return True + return is_supported def netns_read_requires_helper(): @@ -321,3 +331,19 @@ def ebtables_supported(): LOG.debug("Exception while checking for installed ebtables. " "Exception: %s", e) return False + + +def get_minimal_dibbler_version_supported(): + return MINIMUM_DIBBLER_VERSION + + +def dibbler_version_supported(): + try: + cmd = ['dibbler-client', + 'help'] + out = agent_utils.execute(cmd) + return '-w' in out + except (OSError, RuntimeError, IndexError, ValueError) as e: + LOG.debug("Exception while checking minimal dibbler version. " + "Exception: %s", e) + return False diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py index 90895e2340f..2188f3771a7 100644 --- a/neutron/cmd/sanity_check.py +++ b/neutron/cmd/sanity_check.py @@ -56,6 +56,15 @@ def check_ovs_vxlan(): return result +def check_ovs_geneve(): + result = checks.ovs_geneve_supported() + if not result: + LOG.error(_LE('Check for Open vSwitch Geneve support failed. ' + 'Please ensure that the version of openvswitch ' + 'and kernel being used has Geneve support.')) + return result + + def check_iproute2_vxlan(): result = checks.iproute2_vxlan_supported() if not result: @@ -116,6 +125,15 @@ def check_keepalived_ipv6_support(): return result +def check_dibbler_version(): + result = checks.dibbler_version_supported() + if not result: + LOG.error(_LE('The installed version of dibbler-client is too old. ' + 'Please update to at least version %s.'), + checks.get_minimal_dibbler_version_supported()) + return result + + def check_nova_notify(): result = checks.nova_notify_supported() if not result: @@ -172,6 +190,8 @@ def check_ebtables(): OPTS = [ BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False, help=_('Check for OVS vxlan support')), + BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False, + help=_('Check for OVS Geneve support')), BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False, help=_('Check for iproute2 vxlan support')), BoolOptCallback('ovs_patch', check_ovs_patch, default=False, @@ -194,6 +214,8 @@ OPTS = [ help=_('Check ebtables installation')), BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support, help=_('Check keepalived IPv6 support')), + BoolOptCallback('dibbler_version', check_dibbler_version, + help=_('Check minimal dibbler version')), ] @@ -205,6 +227,8 @@ def enable_tests_from_config(): if 'vxlan' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_override('ovs_vxlan', True) + if 'geneve' in cfg.CONF.AGENT.tunnel_types: + cfg.CONF.set_override('ovs_geneve', True) if ('vxlan' in cfg.CONF.ml2.type_drivers or cfg.CONF.VXLAN.enable_vxlan): cfg.CONF.set_override('iproute2_vxlan', True) diff --git a/neutron/cmd/usage_audit.py b/neutron/cmd/usage_audit.py index a1efa7e1e47..72fdaf127e1 100644 --- a/neutron/cmd/usage_audit.py +++ b/neutron/cmd/usage_audit.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright (c) 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # diff --git a/neutron/common/config.py b/neutron/common/config.py index c8e4eebf52c..b6ba1a716e7 100644 --- a/neutron/common/config.py +++ b/neutron/common/config.py @@ -82,6 +82,9 @@ core_opts = [ deprecated_name='dhcp_lease_time', help=_("DHCP lease duration (in seconds). Use -1 to tell " "dnsmasq to use infinite lease times.")), + cfg.StrOpt('dns_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), cfg.BoolOpt('dhcp_agent_notification', default=True, help=_("Allow sending resource operation" " notification to DHCP agent")), @@ -152,13 +155,18 @@ cfg.CONF.register_cli_opts(core_cli_opts) # Ensure that the control exchange is set correctly oslo_messaging.set_transport_defaults(control_exchange='neutron') -_SQL_CONNECTION_DEFAULT = 'sqlite://' -# Update the default QueuePool parameters. These can be tweaked by the -# configuration variables - max_pool_size, max_overflow and pool_timeout -db_options.set_defaults(cfg.CONF, - connection=_SQL_CONNECTION_DEFAULT, - sqlite_db='', max_pool_size=10, - max_overflow=20, pool_timeout=10) + + +def set_db_defaults(): + # Update the default QueuePool parameters. These can be tweaked by the + # conf variables - max_pool_size, max_overflow and pool_timeout + db_options.set_defaults( + cfg.CONF, + connection='sqlite://', + sqlite_db='', max_pool_size=10, + max_overflow=20, pool_timeout=10) + +set_db_defaults() NOVA_CONF_SECTION = 'nova' diff --git a/neutron/common/constants.py b/neutron/common/constants.py index 9e61d587756..9a4ada150a5 100644 --- a/neutron/common/constants.py +++ b/neutron/common/constants.py @@ -77,6 +77,8 @@ IP_VERSION_6 = 6 IPv4_BITS = 32 IPv6_BITS = 128 +INVALID_MAC_ADDRESSES = ['00:00:00:00:00:00', 'FF:FF:FF:FF:FF:FF'] + IPv4_ANY = '0.0.0.0/0' IPv6_ANY = '::/0' IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY} @@ -88,15 +90,12 @@ FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0') AGENT_TYPE_DHCP = 'DHCP agent' AGENT_TYPE_OVS = 'Open vSwitch agent' AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent' -AGENT_TYPE_HYPERV = 'HyperV agent' AGENT_TYPE_NEC = 'NEC plugin agent' AGENT_TYPE_OFA = 'OFA driver agent' AGENT_TYPE_L3 = 'L3 agent' AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent' -AGENT_TYPE_MLNX = 'Mellanox plugin agent' AGENT_TYPE_METERING = 'Metering agent' AGENT_TYPE_METADATA = 'Metadata agent' -AGENT_TYPE_SDNVE = 'IBM SDN-VE agent' AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent' L2_AGENT_TOPIC = 'N/A' @@ -146,6 +145,9 @@ IPV6_PD_POOL_ID = 'prefix_delegation' # Special provisional prefix for IPv6 Prefix Delegation PROVISIONAL_IPV6_PD_PREFIX = '::/64' +# Timeout in seconds for getting an IPv6 LLA +LLA_TASK_TIMEOUT = 40 + # Linux interface max length DEVICE_NAME_MAX_LEN = 15 @@ -183,9 +185,12 @@ RPC_NAMESPACE_SECGROUP = None RPC_NAMESPACE_DVR = None # RPC interface for reporting state back to the plugin RPC_NAMESPACE_STATE = None +# RPC interface for agent to plugin resources API +RPC_NAMESPACE_RESOURCES = None # Default network MTU value when not configured DEFAULT_NETWORK_MTU = 0 +IPV6_MIN_MTU = 1280 ROUTER_MARK_MASK = "0xffff" diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py index c6bc97868ca..3c05b05e909 100644 --- a/neutron/common/exceptions.py +++ b/neutron/common/exceptions.py @@ -77,6 +77,10 @@ class AdminRequired(NotAuthorized): message = _("User does not have admin privileges: %(reason)s") +class ObjectNotFound(NotFound): + message = _("Object %(id)s not found.") + + class NetworkNotFound(NotFound): message = _("Network %(net_id)s could not be found") @@ -93,11 +97,30 @@ class PortNotFound(NotFound): message = _("Port %(port_id)s could not be found") +class QosPolicyNotFound(NotFound): + message = _("QoS policy %(policy_id)s could not be found") + + +class QosRuleNotFound(NotFound): + message = _("QoS rule %(rule_id)s for policy %(policy_id)s " + "could not be found") + + class PortNotFoundOnNetwork(NotFound): message = _("Port %(port_id)s could not be found " "on network %(net_id)s") +class PortQosBindingNotFound(NotFound): + message = _("QoS binding for port %(port_id)s and policy %(policy_id)s " + "could not be found") + + +class NetworkQosBindingNotFound(NotFound): + message = _("QoS binding for network %(net_id)s and policy %(policy_id)s " + "could not be found") + + class PolicyFileNotFound(NotFound): message = _("Policy configuration policy.json could not be found") @@ -118,6 +141,11 @@ class InUse(NeutronException): message = _("The resource is inuse") +class QosPolicyInUse(InUse): + message = _("QoS Policy %(policy_id)s is used by " + "%(object_type)s %(object_id)s.") + + class NetworkInUse(InUse): message = _("Unable to complete operation on network %(net_id)s. " "There are one or more ports still in use on the network.") @@ -489,3 +517,7 @@ class DeviceNotFoundError(NeutronException): class NetworkSubnetPoolAffinityError(BadRequest): message = _("Subnets hosted on the same network must be allocated from " "the same subnet pool") + + +class ObjectActionError(NeutronException): + message = _('Object action %(action)s failed because: %(reason)s') diff --git a/neutron/common/topics.py b/neutron/common/topics.py index 9bb1956e7e8..d0cc55a57e3 100644 --- a/neutron/common/topics.py +++ b/neutron/common/topics.py @@ -19,6 +19,7 @@ PORT = 'port' SECURITY_GROUP = 'security_group' L2POPULATION = 'l2population' DVR = 'dvr' +RESOURCES = 'resources' CREATE = 'create' DELETE = 'delete' @@ -37,6 +38,8 @@ DHCP_AGENT = 'dhcp_agent' METERING_AGENT = 'metering_agent' LOADBALANCER_AGENT = 'n-lbaas_agent' +RESOURCE_TOPIC_PATTERN = "neutron-vo-%(resource_type)s-%(version)s" + def get_topic_name(prefix, table, operation, host=None): """Create a topic name. diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 94607e644d0..2eb31a8365e 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -19,16 +19,17 @@ """Utilities and helper functions.""" import datetime +import decimal import errno import functools import hashlib -import logging as std_logging import multiprocessing import netaddr import os import random import signal import socket +import tempfile import uuid from eventlet.green import subprocess @@ -263,7 +264,7 @@ def str2dict(string): def dict2tuple(d): - items = d.items() + items = list(d.items()) items.sort() return tuple(items) @@ -282,7 +283,7 @@ def is_extension_supported(plugin, ext_alias): def log_opt_values(log): - cfg.CONF.log_opt_values(log, std_logging.DEBUG) + cfg.CONF.log_opt_values(log, logging.DEBUG) def get_random_mac(base_mac): @@ -438,3 +439,32 @@ class DelayedStringRenderer(object): def __str__(self): return str(self.function(*self.args, **self.kwargs)) + + +def camelize(s): + return ''.join(s.replace('_', ' ').title().split()) + + +def round_val(val): + # we rely on decimal module since it behaves consistently across Python + # versions (2.x vs. 3.x) + return int(decimal.Decimal(val).quantize(decimal.Decimal('1'), + rounding=decimal.ROUND_HALF_UP)) + + +def replace_file(file_name, data): + """Replaces the contents of file_name with data in a safe manner. + + First write to a temp file and then rename. Since POSIX renames are + atomic, the file is unlikely to be corrupted by competing writes. + + We create the tempfile on the same device to ensure that it can be renamed. + """ + + base_dir = os.path.dirname(os.path.abspath(file_name)) + with tempfile.NamedTemporaryFile('w+', + dir=base_dir, + delete=False) as tmp_file: + tmp_file.write(data) + os.chmod(tmp_file.name, 0o644) + os.rename(tmp_file.name, file_name) diff --git a/neutron/plugins/ml2/drivers/cisco/ucsm/__init__.py b/neutron/core_extensions/__init__.py similarity index 100% rename from neutron/plugins/ml2/drivers/cisco/ucsm/__init__.py rename to neutron/core_extensions/__init__.py diff --git a/neutron/core_extensions/base.py b/neutron/core_extensions/base.py new file mode 100644 index 00000000000..67cbf87e357 --- /dev/null +++ b/neutron/core_extensions/base.py @@ -0,0 +1,48 @@ +# Copyright (c) 2015 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +NETWORK = 'network' +PORT = 'port' + + +CORE_RESOURCES = [NETWORK, PORT] + + +@six.add_metaclass(abc.ABCMeta) +class CoreResourceExtension(object): + + @abc.abstractmethod + def process_fields(self, context, resource_type, + requested_resource, actual_resource): + """Process extension fields. + + :param context: neutron api request context + :param resource_type: core resource type (one of CORE_RESOURCES) + :param requested_resource: resource dict that contains extension fields + :param actual_resource: actual resource dict known to plugin + """ + + @abc.abstractmethod + def extract_fields(self, resource_type, resource): + """Extract extension fields. + + :param resource_type: core resource type (one of CORE_RESOURCES) + :param resource: resource dict that contains extension fields + """ diff --git a/neutron/core_extensions/qos.py b/neutron/core_extensions/qos.py new file mode 100644 index 00000000000..72fb898836c --- /dev/null +++ b/neutron/core_extensions/qos.py @@ -0,0 +1,82 @@ +# Copyright (c) 2015 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as n_exc +from neutron.core_extensions import base +from neutron.db import api as db_api +from neutron import manager +from neutron.objects.qos import policy as policy_object +from neutron.plugins.common import constants as plugin_constants +from neutron.services.qos import qos_consts + + +class QosCoreResourceExtension(base.CoreResourceExtension): + + @property + def plugin_loaded(self): + if not hasattr(self, '_plugin_loaded'): + service_plugins = manager.NeutronManager.get_service_plugins() + self._plugin_loaded = plugin_constants.QOS in service_plugins + return self._plugin_loaded + + def _get_policy_obj(self, context, policy_id): + obj = policy_object.QosPolicy.get_by_id(context, policy_id) + if obj is None: + raise n_exc.QosPolicyNotFound(policy_id=policy_id) + return obj + + def _update_port_policy(self, context, port, port_changes): + old_policy = policy_object.QosPolicy.get_port_policy( + context, port['id']) + if old_policy: + old_policy.detach_port(port['id']) + + qos_policy_id = port_changes.get(qos_consts.QOS_POLICY_ID) + if qos_policy_id is not None: + policy = self._get_policy_obj(context, qos_policy_id) + policy.attach_port(port['id']) + port[qos_consts.QOS_POLICY_ID] = qos_policy_id + + def _update_network_policy(self, context, network, network_changes): + old_policy = policy_object.QosPolicy.get_network_policy( + context, network['id']) + if old_policy: + old_policy.detach_network(network['id']) + + qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID) + if qos_policy_id is not None: + policy = self._get_policy_obj(context, qos_policy_id) + policy.attach_network(network['id']) + network[qos_consts.QOS_POLICY_ID] = qos_policy_id + + def _exec(self, method_name, context, kwargs): + with db_api.autonested_transaction(context.session): + return getattr(self, method_name)(context=context, **kwargs) + + def process_fields(self, context, resource_type, + requested_resource, actual_resource): + if (qos_consts.QOS_POLICY_ID in requested_resource and + self.plugin_loaded): + self._exec('_update_%s_policy' % resource_type, context, + {resource_type: actual_resource, + "%s_changes" % resource_type: requested_resource}) + + def extract_fields(self, resource_type, resource): + if not self.plugin_loaded: + return {} + + binding = resource['qos_policy_binding'] + qos_policy_id = binding['policy_id'] if binding else None + return {qos_consts.QOS_POLICY_ID: qos_policy_id} diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 702f2e497d1..9417d5e3c37 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -56,6 +56,11 @@ AGENT_OPTS = [ 'dhcp_load_type can be configured to represent the ' 'choice for the resource being balanced. ' 'Example: dhcp_load_type=networks')), + cfg.BoolOpt('enable_new_agents', default=True, + help=_("Agent starts with admin_state_up=False when " + "enable_new_agents=False. In the case, user's " + "resources will not be scheduled automatically to the " + "agent until admin changes admin_state_up to True.")), ] cfg.CONF.register_opts(AGENT_OPTS) @@ -236,7 +241,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase): res['created_at'] = current_time res['started_at'] = current_time res['heartbeat_timestamp'] = current_time - res['admin_state_up'] = True + res['admin_state_up'] = cfg.CONF.enable_new_agents agent_db = Agent(**res) greenthread.sleep(0) context.session.add(agent_db) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 212c6eb55a7..153a420b9c0 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -273,10 +273,11 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler try: dead_bindings = [b for b in self._filter_bindings(context, down_bindings)] - dead_agents = set([b.dhcp_agent_id for b in dead_bindings]) agents = self.get_agents_db( context, {'agent_type': [constants.AGENT_TYPE_DHCP]}) - if len(agents) == len(dead_agents): + active_agents = [agent for agent in agents if + self.is_eligible_agent(context, True, agent)] + if not active_agents: LOG.warn(_LW("No DHCP agents available, " "skipping rescheduling")) return diff --git a/neutron/db/api.py b/neutron/db/api.py index dec09bd3572..53ef51b957b 100644 --- a/neutron/db/api.py +++ b/neutron/db/api.py @@ -14,14 +14,15 @@ # under the License. import contextlib -import six from oslo_config import cfg from oslo_db import api as oslo_db_api -from oslo_db import exception as os_db_exception from oslo_db.sqlalchemy import session +from oslo_utils import uuidutils from sqlalchemy import exc -from sqlalchemy import orm + +from neutron.common import exceptions as n_exc +from neutron.db import common_db_mixin _FACADE = None @@ -72,19 +73,46 @@ def autonested_transaction(sess): yield tx -class convert_db_exception_to_retry(object): - """Converts other types of DB exceptions into RetryRequests.""" +# Common database operation implementations +def get_object(context, model, **kwargs): + with context.session.begin(subtransactions=True): + return (common_db_mixin.model_query(context, model) + .filter_by(**kwargs) + .first()) - def __init__(self, stale_data=False): - self.to_catch = () - if stale_data: - self.to_catch += (orm.exc.StaleDataError, ) - def __call__(self, f): - @six.wraps(f) - def wrapper(*args, **kwargs): - try: - return f(*args, **kwargs) - except self.to_catch as e: - raise os_db_exception.RetryRequest(e) - return wrapper +def get_objects(context, model, **kwargs): + with context.session.begin(subtransactions=True): + return (common_db_mixin.model_query(context, model) + .filter_by(**kwargs) + .all()) + + +def create_object(context, model, values): + with context.session.begin(subtransactions=True): + if 'id' not in values: + values['id'] = uuidutils.generate_uuid() + db_obj = model(**values) + context.session.add(db_obj) + return db_obj.__dict__ + + +def _safe_get_object(context, model, id): + db_obj = get_object(context, model, id=id) + if db_obj is None: + raise n_exc.ObjectNotFound(id=id) + return db_obj + + +def update_object(context, model, id, values): + with context.session.begin(subtransactions=True): + db_obj = _safe_get_object(context, model, id) + db_obj.update(values) + db_obj.save(session=context.session) + return db_obj.__dict__ + + +def delete_object(context, model, id): + with context.session.begin(subtransactions=True): + db_obj = _safe_get_object(context, model, id) + context.session.delete(db_obj) diff --git a/neutron/db/common_db_mixin.py b/neutron/db/common_db_mixin.py index 3b31c61df1a..d7eedd53d4b 100644 --- a/neutron/db/common_db_mixin.py +++ b/neutron/db/common_db_mixin.py @@ -96,6 +96,34 @@ class CommonDbMixin(object): return model_query_scope(context, model) def _model_query(self, context, model): + if isinstance(model, UnionModel): + return self._union_model_query(context, model) + else: + return self._single_model_query(context, model) + + def _union_model_query(self, context, model): + # A union query is a query that combines multiple sets of data + # together and represents them as one. So if a UnionModel was + # passed in, we generate the query for each model with the + # appropriate filters and then combine them together with the + # .union operator. This allows any subsequent users of the query + # to handle it like a normal query (e.g. add pagination/sorting/etc) + first_query = None + remaining_queries = [] + for name, component_model in model.model_map.items(): + query = self._single_model_query(context, component_model) + if model.column_type_name: + query.add_columns( + sql.expression.column('"%s"' % name, is_literal=True). + label(model.column_type_name) + ) + if first_query is None: + first_query = query + else: + remaining_queries.append(query) + return first_query.union(*remaining_queries) + + def _single_model_query(self, context, model): query = context.session.query(model) # define basic filter condition for model query query_filter = None @@ -260,3 +288,14 @@ class CommonDbMixin(object): columns = [c.name for c in model.__table__.columns] return dict((k, v) for (k, v) in six.iteritems(data) if k in columns) + + +class UnionModel(object): + """Collection of models that _model_query can query as a single table.""" + + def __init__(self, model_map, column_type_name=None): + # model_map is a dictionary of models keyed by an arbitrary name. + # If column_type_name is specified, the resulting records will have a + # column with that name which identifies the source of each record + self.model_map = model_map + self.column_type_name = column_type_name diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index 4b66b145f8f..70ee8c1e9a6 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -29,6 +29,40 @@ from neutron.db import models_v2 LOG = logging.getLogger(__name__) +def convert_result_to_dict(f): + @functools.wraps(f) + def inner(*args, **kwargs): + result = f(*args, **kwargs) + + if result is None: + return None + elif isinstance(result, list): + return [r.to_dict() for r in result] + else: + return result.to_dict() + return inner + + +def filter_fields(f): + @functools.wraps(f) + def inner_filter(*args, **kwargs): + result = f(*args, **kwargs) + fields = kwargs.get('fields') + if not fields: + try: + pos = f.__code__.co_varnames.index('fields') + fields = args[pos] + except (IndexError, ValueError): + return result + + do_filter = lambda d: {k: v for k, v in d.items() if k in fields} + if isinstance(result, list): + return [do_filter(obj) for obj in result] + else: + return do_filter(result) + return inner_filter + + class DbBasePluginCommon(common_db_mixin.CommonDbMixin): """Stores getters and helper methods for db_base_plugin_v2 @@ -134,6 +168,13 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} + if "dns_name" in port: + res["dns_name"] = port["dns_name"] + if "dns_assignment" in port: + res["dns_assignment"] = [{"ip_address": a["ip_address"], + "hostname": a["hostname"], + "fqdn": a["fqdn"]} + for a in port["dns_assignment"]] # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 29ecf806525..ca0c73015fe 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -40,6 +40,7 @@ from neutron.db import db_base_plugin_common from neutron.db import ipam_non_pluggable_backend from neutron.db import ipam_pluggable_backend from neutron.db import models_v2 +from neutron.db import rbac_db_mixin as rbac_mixin from neutron.db import rbac_db_models as rbac_db from neutron.db import sqlalchemyutils from neutron.extensions import l3 @@ -62,6 +63,9 @@ LOG = logging.getLogger(__name__) # IP allocations being cleaned up by cascade. AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] +DNS_DOMAIN_DEFAULT = 'openstacklocal.' +FQDN_MAX_LEN = 255 + def _check_subnet_not_used(context, subnet_id): try: @@ -73,7 +77,8 @@ def _check_subnet_not_used(context, subnet_id): class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, - neutron_plugin_base_v2.NeutronPluginBaseV2): + neutron_plugin_base_v2.NeutronPluginBaseV2, + rbac_mixin.RbacPluginMixin): """V2 Neutron plugin interface implementation using SQLAlchemy models. Whenever a non-read call happens the plugin will call an event handler @@ -102,6 +107,79 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, self.nova_notifier.send_port_status) event.listen(models_v2.Port.status, 'set', self.nova_notifier.record_port_status_changed) + for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE, + events.BEFORE_DELETE): + registry.subscribe(self.validate_network_rbac_policy_change, + rbac_mixin.RBAC_POLICY, e) + + def validate_network_rbac_policy_change(self, resource, event, trigger, + context, object_type, policy, + **kwargs): + """Validates network RBAC policy changes. + + On creation, verify that the creator is an admin or that it owns the + network it is sharing. + + On update and delete, make sure the tenant losing access does not have + resources that depend on that access. + """ + if object_type != 'network': + # we only care about network policies + return + # The object a policy targets cannot be changed so we can look + # at the original network for the update event as well. + net = self._get_network(context, policy['object_id']) + if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE): + # we still have to verify that the caller owns the network because + # _get_network will succeed on a shared network + if not context.is_admin and net['tenant_id'] != context.tenant_id: + msg = _("Only admins can manipulate policies on networks " + "they do not own.") + raise n_exc.InvalidInput(error_message=msg) + + tenant_to_check = None + if event == events.BEFORE_UPDATE: + new_tenant = kwargs['policy_update']['target_tenant'] + if policy['target_tenant'] != new_tenant: + tenant_to_check = policy['target_tenant'] + + if event == events.BEFORE_DELETE: + tenant_to_check = policy['target_tenant'] + + if tenant_to_check: + self.ensure_no_tenant_ports_on_network(net['id'], net['tenant_id'], + tenant_to_check) + + def ensure_no_tenant_ports_on_network(self, network_id, net_tenant_id, + tenant_id): + ctx_admin = ctx.get_admin_context() + rb_model = rbac_db.NetworkRBAC + other_rbac_entries = self._model_query(ctx_admin, rb_model).filter( + and_(rb_model.object_id == network_id, + rb_model.action == 'access_as_shared')) + ports = self._model_query(ctx_admin, models_v2.Port).filter( + models_v2.Port.network_id == network_id) + if tenant_id == '*': + # for the wildcard we need to get all of the rbac entries to + # see if any allow the remaining ports on the network. + other_rbac_entries = other_rbac_entries.filter( + rb_model.target_tenant != tenant_id) + # any port with another RBAC entry covering it or one belonging to + # the same tenant as the network owner is ok + allowed_tenants = [entry['target_tenant'] + for entry in other_rbac_entries] + allowed_tenants.append(net_tenant_id) + ports = ports.filter( + ~models_v2.Port.tenant_id.in_(allowed_tenants)) + else: + # if there is a wildcard rule, we can return early because it + # allows any ports + query = other_rbac_entries.filter(rb_model.target_tenant == '*') + if query.count(): + return + ports = ports.filter(models_v2.Port.tenant_id == tenant_id) + if ports.count(): + raise n_exc.InvalidSharedSetting(network=network_id) def set_ipam_backend(self): if cfg.CONF.ipam_driver: @@ -471,7 +549,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, gw_ports = self._get_router_gw_ports_by_network(context, network['id']) router_ids = [p['device_id'] for p in gw_ports] - ctx_admin = ctx.get_admin_context() + ctx_admin = context.elevated() ext_subnets_dict = {s['id']: s for s in network['subnets']} for id in router_ids: router = l3plugin.get_router(ctx_admin, id) @@ -590,9 +668,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, def _update_allocation_pools(self, subnet): """Gets new allocation pools and formats them correctly""" - allocation_pools = self.ipam.generate_allocation_pools( - subnet['cidr'], - subnet['gateway_ip']) + allocation_pools = self.ipam.generate_pools(subnet['cidr'], + subnet['gateway_ip']) return [{'start': str(netaddr.IPAddress(p.first, subnet['ip_version'])), 'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))} @@ -619,13 +696,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip']) for p in db_subnet.allocation_pools] - range_pools = None - if s.get('allocation_pools') is not None: - # Convert allocation pools to IPRange to simplify future checks - range_pools = self.ipam.pools_to_ip_range(s['allocation_pools']) - self.ipam.validate_allocation_pools(range_pools, s['cidr']) - s['allocation_pools'] = range_pools - update_ports_needed = False if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s): # This is an ipv6 prefix delegation-enabled subnet being given an @@ -637,6 +707,13 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, s['gateway_ip'] = utils.get_first_host_ip(net, s['ip_version']) s['allocation_pools'] = self._update_allocation_pools(s) + range_pools = None + if s.get('allocation_pools') is not None: + # Convert allocation pools to IPRange to simplify future checks + range_pools = self.ipam.pools_to_ip_range(s['allocation_pools']) + self.ipam.validate_allocation_pools(range_pools, s['cidr']) + s['allocation_pools'] = range_pools + # If either gateway_ip or allocation_pools were specified gateway_ip = s.get('gateway_ip') if gateway_ip is not None or s.get('allocation_pools') is not None: @@ -769,6 +846,10 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, return self._get_collection_count(context, models_v2.Subnet, filters=filters) + def get_subnets_by_network(self, context, network_id): + return [self._make_subnet_dict(subnet_db) for subnet_db in + self._get_subnets_by_network(context, network_id)] + def _create_subnetpool_prefix(self, context, cidr, subnetpool_id): prefix_args = {'cidr': cidr, 'subnetpool_id': subnetpool_id} subnetpool_prefix = models_v2.SubnetPoolPrefix(**prefix_args) @@ -956,6 +1037,54 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, def create_port_bulk(self, context, ports): return self._create_bulk('port', context, ports) + def _get_dns_domain(self): + if not cfg.CONF.dns_domain: + return '' + if cfg.CONF.dns_domain.endswith('.'): + return cfg.CONF.dns_domain + return '%s.' % cfg.CONF.dns_domain + + def _get_request_dns_name(self, port): + dns_domain = self._get_dns_domain() + if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): + return port.get('dns_name', '') + return '' + + def _get_dns_names_for_port(self, context, network_id, ips, + request_dns_name): + filter = {'network_id': [network_id]} + subnets = self._get_subnets(context, filters=filter) + v6_subnets = {subnet['id']: subnet for subnet in subnets + if subnet['ip_version'] == 6} + dns_assignment = [] + dns_domain = self._get_dns_domain() + if request_dns_name: + request_fqdn = request_dns_name + if not request_dns_name.endswith('.'): + request_fqdn = '%s.%s' % (request_dns_name, dns_domain) + + for ip in ips: + subnet_id = ip['subnet_id'] + is_auto_address_subnet = ( + subnet_id in v6_subnets and + ipv6_utils.is_auto_address_subnet(v6_subnets[subnet_id])) + if is_auto_address_subnet: + continue + if request_dns_name: + hostname = request_dns_name + fqdn = request_fqdn + else: + hostname = 'host-%s' % ip['ip_address'].replace( + '.', '-').replace(':', '-') + fqdn = hostname + if dns_domain: + fqdn = '%s.%s' % (hostname, dns_domain) + dns_assignment.append({'ip_address': ip['ip_address'], + 'hostname': hostname, + 'fqdn': fqdn}) + + return dns_assignment + def _create_port_with_mac(self, context, network_id, port_data, mac_address): try: @@ -1003,6 +1132,9 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, status=p.get('status', constants.PORT_STATUS_ACTIVE), device_id=p['device_id'], device_owner=p['device_owner']) + if 'dns_name' in p: + request_dns_name = self._get_request_dns_name(p) + port_data['dns_name'] = request_dns_name with context.session.begin(subtransactions=True): # Ensure that the network exists. @@ -1016,8 +1148,16 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, db_port = self._create_port_with_mac( context, network_id, port_data, p['mac_address']) - self.ipam.allocate_ips_for_port_and_store(context, port, port_id) + ips = self.ipam.allocate_ips_for_port_and_store(context, port, + port_id) + if 'dns_name' in p: + dns_assignment = [] + if ips: + dns_assignment = self._get_dns_names_for_port( + context, network_id, ips, request_dns_name) + if 'dns_name' in p: + db_port['dns_assignment'] = dns_assignment return self._make_port_dict(db_port, process_extensions=False) def _validate_port_for_update(self, context, db_port, new_port, new_mac): @@ -1036,20 +1176,45 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, self._check_mac_addr_update(context, db_port, new_mac, current_owner) + def _get_dns_names_for_updated_port(self, context, db_port, + original_ips, original_dns_name, + request_dns_name, changes): + if changes.original or changes.add or changes.remove: + return self._get_dns_names_for_port( + context, db_port['network_id'], changes.original + changes.add, + request_dns_name or original_dns_name) + if original_ips: + return self._get_dns_names_for_port( + context, db_port['network_id'], original_ips, + request_dns_name or original_dns_name) + return [] + def update_port(self, context, id, port): new_port = port['port'] with context.session.begin(subtransactions=True): port = self._get_port(context, id) + if 'dns-integration' in self.supported_extension_aliases: + original_ips = self._make_fixed_ip_dict(port['fixed_ips']) + original_dns_name = port.get('dns_name', '') + request_dns_name = self._get_request_dns_name(new_port) + if not request_dns_name: + new_port['dns_name'] = '' new_mac = new_port.get('mac_address') self._validate_port_for_update(context, port, new_port, new_mac) changes = self.ipam.update_port_with_ips(context, port, new_port, new_mac) + if 'dns-integration' in self.supported_extension_aliases: + dns_assignment = self._get_dns_names_for_updated_port( + context, port, original_ips, original_dns_name, + request_dns_name, changes) result = self._make_port_dict(port) # Keep up with fields that changed if changes.original or changes.add or changes.remove: result['fixed_ips'] = self._make_fixed_ip_dict( changes.original + changes.add) + if 'dns-integration' in self.supported_extension_aliases: + result['dns_assignment'] = dns_assignment return result def delete_port(self, context, id): @@ -1072,8 +1237,19 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, "The port has already been deleted.", port_id) + def _get_dns_name_for_port_get(self, context, port): + if port['fixed_ips']: + return self._get_dns_names_for_port( + context, port['network_id'], port['fixed_ips'], + port['dns_name']) + return [] + def get_port(self, context, id, fields=None): port = self._get_port(context, id) + if (('dns-integration' in self.supported_extension_aliases and + 'dns_name' in port)): + port['dns_assignment'] = self._get_dns_name_for_port_get(context, + port) return self._make_port_dict(port, fields) def _get_ports_query(self, context, filters=None, sorts=None, limit=None, @@ -1111,7 +1287,13 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) - items = [self._make_port_dict(c, fields) for c in query] + items = [] + for c in query: + if (('dns-integration' in self.supported_extension_aliases and + 'dns_name' in c)): + c['dns_assignment'] = self._get_dns_name_for_port_get(context, + c) + items.append(self._make_port_dict(c, fields)) if limit and page_reverse: items.reverse() return items @@ -1134,7 +1316,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, if device_id: if hasattr(self, 'get_router'): try: - ctx_admin = ctx.get_admin_context() + ctx_admin = context.elevated() router = self.get_router(ctx_admin, device_id) except l3.RouterNotFound: return @@ -1144,7 +1326,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, service_constants.L3_ROUTER_NAT)) if l3plugin: try: - ctx_admin = ctx.get_admin_context() + ctx_admin = context.elevated() router = l3plugin.get_router(ctx_admin, device_id) except l3.RouterNotFound: diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py index 0502aa0029f..b3983f0ddd6 100644 --- a/neutron/db/dvr_mac_db.py +++ b/neutron/db/dvr_mac_db.py @@ -162,15 +162,25 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): return ports_by_host @log_helpers.log_method_call - def get_subnet_for_dvr(self, context, subnet): + def get_subnet_for_dvr(self, context, subnet, fixed_ips=None): + if fixed_ips: + subnet_data = fixed_ips[0]['subnet_id'] + else: + subnet_data = subnet try: - subnet_info = self.plugin.get_subnet(context, subnet) + subnet_info = self.plugin.get_subnet( + context, subnet_data) except n_exc.SubnetNotFound: return {} else: # retrieve the gateway port on this subnet - filter = {'fixed_ips': {'subnet_id': [subnet], - 'ip_address': [subnet_info['gateway_ip']]}} + if fixed_ips: + filter = fixed_ips[0] + else: + filter = {'fixed_ips': {'subnet_id': [subnet], + 'ip_address': + [subnet_info['gateway_ip']]}} + internal_gateway_ports = self.plugin.get_ports( context, filters=filter) if not internal_gateway_ports: diff --git a/neutron/db/extraroute_db.py b/neutron/db/extraroute_db.py index 264bff16317..0bf0ae228a0 100644 --- a/neutron/db/extraroute_db.py +++ b/neutron/db/extraroute_db.py @@ -108,7 +108,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): ips = [] for port in ports: for ip in port['fixed_ips']: - cidrs.append(self._core_plugin._get_subnet( + cidrs.append(self._core_plugin.get_subnet( context, ip['subnet_id'])['cidr']) ips.append(ip['ip_address']) for route in routes: @@ -162,8 +162,8 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): super(ExtraRoute_dbonly_mixin, self)._confirm_router_interface_not_in_use( context, router_id, subnet_id) - subnet_db = self._core_plugin._get_subnet(context, subnet_id) - subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + subnet = self._core_plugin.get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet['cidr']) extra_routes = self._get_extra_routes_by_router_id(context, router_id) for route in extra_routes: if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 43ef9800206..d6adf01fb25 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -158,9 +158,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): def _update_subnet_allocation_pools(self, context, subnet_id, s): context.session.query(models_v2.IPAllocationPool).filter_by( subnet_id=subnet_id).delete() - pools = ((netaddr.IPAddress(p.first, p.version).format(), + pools = [(netaddr.IPAddress(p.first, p.version).format(), netaddr.IPAddress(p.last, p.version).format()) - for p in s['allocation_pools']) + for p in s['allocation_pools']] new_pools = [models_v2.IPAllocationPool(first_ip=p[0], last_ip=p[1], subnet_id=subnet_id) diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index 87bf0d188a5..e935ca26c69 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -207,6 +207,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): subnet_id = ip['subnet_id'] self._store_ip_allocation(context, ip_address, network_id, subnet_id, port_id) + return ips def update_port_with_ips(self, context, db_port, new_port, new_mac): changes = self.Changes(add=[], original=[], remove=[]) diff --git a/neutron/db/ipam_pluggable_backend.py b/neutron/db/ipam_pluggable_backend.py index 17e1371c375..29e371e342e 100644 --- a/neutron/db/ipam_pluggable_backend.py +++ b/neutron/db/ipam_pluggable_backend.py @@ -160,6 +160,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): IpamPluggableBackend._store_ip_allocation( context, ip_address, network_id, subnet_id, port_id) + return ips except Exception: with excutils.save_and_reraise_exception(): if ips: @@ -407,7 +408,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): def allocate_subnet(self, context, network, subnet, subnetpool_id): subnetpool = None - if subnetpool_id: + if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, subnetpool_id) self._validate_ip_version_with_subnetpool(subnet, subnetpool) diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index 9c6413054fc..e5e19040ee5 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -103,6 +103,15 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) try: for binding in down_bindings: + agent_mode = self._get_agent_mode(binding.l3_agent) + if agent_mode == constants.L3_AGENT_MODE_DVR: + # rescheduling from l3 dvr agent on compute node doesn't + # make sense. Router will be removed from that agent once + # there are no dvr serviceable ports on that compute node + LOG.warn(_LW('L3 DVR agent on node %(host)s is down. ' + 'Not rescheduling from agent in \'dvr\' ' + 'mode.'), {'host': binding.l3_agent.host}) + continue LOG.warn(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " @@ -124,6 +133,11 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, LOG.exception(_LE("Exception encountered during router " "rescheduling.")) + def _get_agent_mode(self, agent_db): + agent_conf = self.get_configuration_dict(agent_db) + return agent_conf.get(constants.L3_AGENT_MODE, + constants.L3_AGENT_MODE_LEGACY) + def validate_agent_router_combination(self, context, agent, router): """Validate if the router can be correctly assigned to the agent. @@ -134,10 +148,11 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, :raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR router from one DVR Agent to another. """ + if agent['agent_type'] != constants.AGENT_TYPE_L3: + raise l3agentscheduler.InvalidL3Agent(id=agent['id']) + is_distributed = router.get('distributed') - agent_conf = self.get_configuration_dict(agent) - agent_mode = agent_conf.get(constants.L3_AGENT_MODE, - constants.L3_AGENT_MODE_LEGACY) + agent_mode = self._get_agent_mode(agent) router_type = ( 'distributed' if is_distributed else 'centralized') @@ -155,13 +170,14 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, router_type=router_type, router_id=router['id'], agent_id=agent['id']) - is_wrong_type_or_unsuitable_agent = ( - agent['agent_type'] != constants.AGENT_TYPE_L3 or - not agentschedulers_db.services_available(agent['admin_state_up']) - or - not self.get_l3_agent_candidates(context, router, [agent], - ignore_admin_state=True)) - if is_wrong_type_or_unsuitable_agent: + is_suitable_agent = ( + agentschedulers_db.services_available(agent['admin_state_up']) and + (self.get_l3_agent_candidates(context, router, + [agent], + ignore_admin_state=True) or + self.get_snat_candidates(router, [agent])) + ) + if not is_suitable_agent: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) def check_agent_router_scheduling_needed(self, context, agent, router): @@ -181,8 +197,6 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, if binding.l3_agent_id == agent_id: # router already bound to the agent we need return False - if router.get('distributed'): - return False if router.get('ha'): return True # legacy router case: router is already bound to some agent @@ -407,9 +421,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, # This optimization is valid assuming that the L3 # DVR_SNAT node will be the one hosting the DHCP # Agent. - agent_conf = self.get_configuration_dict(l3_agent) - agent_mode = agent_conf.get(constants.L3_AGENT_MODE, - constants.L3_AGENT_MODE_LEGACY) + agent_mode = self._get_agent_mode(l3_agent) for subnet_id in subnet_ids: subnet_dict = core_plugin.get_subnet(context, subnet_id) diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py index 14b1dc50dea..a803dcb6143 100644 --- a/neutron/db/l3_db.py +++ b/neutron/db/l3_db.py @@ -40,6 +40,7 @@ from neutron.extensions import l3 from neutron.i18n import _LI, _LE from neutron import manager from neutron.plugins.common import constants +from neutron.plugins.common import utils as p_utils LOG = logging.getLogger(__name__) @@ -173,11 +174,17 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, None) tenant_id = self._get_tenant_id_for_create(context, r) - with context.session.begin(subtransactions=True): - router_db = self._create_router_db(context, r, tenant_id) + router_db = self._create_router_db(context, r, tenant_id) + try: if gw_info: self._update_router_gw_info(context, router_db['id'], gw_info, router=router_db) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("An exception occurred while creating " + "the router: %s"), router) + self.delete_router(context, router_db.id) + return self._make_router_dict(router_db) def _update_router_db(self, context, router_id, data, gw_info): @@ -278,15 +285,15 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): def _create_router_gw_port(self, context, router, network_id, ext_ips): # Port has no 'tenant-id', as it is hidden from user - gw_port = self._core_plugin.create_port(context.elevated(), { - 'port': {'tenant_id': '', # intentionally not set + port_data = {'tenant_id': '', # intentionally not set 'network_id': network_id, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': ext_ips or attributes.ATTR_NOT_SPECIFIED, 'device_id': router['id'], 'device_owner': DEVICE_OWNER_ROUTER_GW, 'admin_state_up': True, - 'name': ''}}) + 'name': ''} + gw_port = p_utils.create_port(self._core_plugin, + context.elevated(), {'port': port_data}) if not gw_port['fixed_ips']: LOG.debug('No IPs available for external network %s', @@ -311,8 +318,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): msg = _("Network %s is not an external network") % network_id raise n_exc.BadRequest(resource='router', msg=msg) if ext_ips: - subnets = self._core_plugin._get_subnets_by_network(context, - network_id) + subnets = self._core_plugin.get_subnets_by_network(context, + network_id) for s in subnets: if not s['gateway_ip']: continue @@ -362,8 +369,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): new_network and (not router.gw_port or router.gw_port['network_id'] != new_network)) if new_valid_gw_port_attachment: - subnets = self._core_plugin._get_subnets_by_network(context, - new_network) + subnets = self._core_plugin.get_subnets_by_network(context, + new_network) for subnet in subnets: self._check_for_dup_router_subnet(context, router, new_network, subnet['id'], @@ -475,8 +482,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): if subnet_cidr == l3_constants.PROVISIONAL_IPV6_PD_PREFIX: continue sub_id = ip['subnet_id'] - cidr = self._core_plugin._get_subnet(context.elevated(), - sub_id)['cidr'] + cidr = self._core_plugin.get_subnet(context.elevated(), + sub_id)['cidr'] ipnet = netaddr.IPNetwork(cidr) match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr]) @@ -537,8 +544,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): fixed_ips = [ip for ip in port['fixed_ips']] subnets = [] for fixed_ip in fixed_ips: - subnet = self._core_plugin._get_subnet(context, - fixed_ip['subnet_id']) + subnet = self._core_plugin.get_subnet(context, + fixed_ip['subnet_id']) subnets.append(subnet) self._check_for_dup_router_subnet(context, router, port['network_id'], @@ -566,7 +573,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): return port def _add_interface_by_subnet(self, context, router, subnet_id, owner): - subnet = self._core_plugin._get_subnet(context, subnet_id) + subnet = self._core_plugin.get_subnet(context, subnet_id) if not subnet['gateway_ip']: msg = _('Subnet for router interface must have a gateway IP') raise n_exc.BadRequest(resource='router', msg=msg) @@ -596,16 +603,15 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): port['port_id'], {'port': {'fixed_ips': fixed_ips}}), [subnet], False - return self._core_plugin.create_port(context, { - 'port': - {'tenant_id': subnet['tenant_id'], - 'network_id': subnet['network_id'], - 'fixed_ips': [fixed_ip], - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'admin_state_up': True, - 'device_id': router.id, - 'device_owner': owner, - 'name': ''}}), [subnet], True + port_data = {'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'fixed_ips': [fixed_ip], + 'admin_state_up': True, + 'device_id': router.id, + 'device_owner': owner, + 'name': ''} + return p_utils.create_port(self._core_plugin, context, + {'port': port_data}), [subnet], True @staticmethod def _make_router_interface_info( @@ -650,8 +656,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): def _confirm_router_interface_not_in_use(self, context, router_id, subnet_id): - subnet_db = self._core_plugin._get_subnet(context, subnet_id) - subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + subnet = self._core_plugin.get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet['cidr']) fip_qry = context.session.query(FloatingIP) try: kwargs = {'context': context, 'subnet_id': subnet_id} @@ -687,7 +693,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): if subnet_id and subnet_id not in port_subnet_ids: raise n_exc.SubnetMismatchForPort( port_id=port_id, subnet_id=subnet_id) - subnets = [self._core_plugin._get_subnet(context, port_subnet_id) + subnets = [self._core_plugin.get_subnet(context, port_subnet_id) for port_subnet_id in port_subnet_ids] for port_subnet_id in port_subnet_ids: self._confirm_router_interface_not_in_use( @@ -700,7 +706,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): router_id, subnet_id, owner): self._confirm_router_interface_not_in_use( context, router_id, subnet_id) - subnet = self._core_plugin._get_subnet(context, subnet_id) + subnet = self._core_plugin.get_subnet(context, subnet_id) try: rport_qry = context.session.query(models_v2.Port).join(RouterPort) @@ -782,9 +788,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): - subnet_db = self._core_plugin._get_subnet(context, - internal_subnet_id) - if not subnet_db['gateway_ip']: + subnet = self._core_plugin.get_subnet(context, internal_subnet_id) + if not subnet['gateway_ip']: msg = (_('Cannot add floating IP to port on subnet %s ' 'which has no gateway_ip') % internal_subnet_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) @@ -823,7 +828,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): Retrieve information concerning the internal port where the floating IP should be associated to. """ - internal_port = self._core_plugin._get_port(context, fip['port_id']) + internal_port = self._core_plugin.get_port(context, fip['port_id']) if not internal_port['tenant_id'] == fip['tenant_id']: port_id = fip['port_id'] if 'id' in fip: @@ -956,14 +961,11 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): port = {'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': fip_id, 'device_owner': DEVICE_OWNER_FLOATINGIP, 'status': l3_constants.PORT_STATUS_NOTAPPLICABLE, 'name': ''} - if fip.get('floating_ip_address'): port['fixed_ips'] = [ {'ip_address': fip['floating_ip_address']}] @@ -971,9 +973,13 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): if fip.get('subnet_id'): port['fixed_ips'] = [ {'subnet_id': fip['subnet_id']}] - external_port = self._core_plugin.create_port(context.elevated(), - {'port': port}) + # 'status' in port dict could not be updated by default, use + # check_allow_post to stop the verification of system + external_port = p_utils.create_port(self._core_plugin, + context.elevated(), + {'port': port}, + check_allow_post=False) # Ensure IPv4 addresses are allocated on external port external_ipv4_ips = self._port_ipv4_fixed_ips(external_port) if not external_ipv4_ips: @@ -1085,23 +1091,23 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): deletion checks. """ try: - port_db = self._core_plugin._get_port(context, port_id) + port = self._core_plugin.get_port(context, port_id) except n_exc.PortNotFound: # non-existent ports don't need to be protected from deletion return - if port_db['device_owner'] in self.router_device_owners: + if port['device_owner'] in self.router_device_owners: # Raise port in use only if the port has IP addresses # Otherwise it's a stale port that can be removed - fixed_ips = port_db['fixed_ips'] + fixed_ips = port['fixed_ips'] if fixed_ips: - reason = _('has device owner %s') % port_db['device_owner'] - raise n_exc.ServicePortInUse(port_id=port_db['id'], + reason = _('has device owner %s') % port['device_owner'] + raise n_exc.ServicePortInUse(port_id=port['id'], reason=reason) else: LOG.debug("Port %(port_id)s has owner %(port_owner)s, but " "no IP address, so it can be deleted", - {'port_id': port_db['id'], - 'port_owner': port_db['device_owner']}) + {'port_id': port['id'], + 'port_owner': port['device_owner']}) def disassociate_floatingips(self, context, port_id): """Disassociate all floating IPs linked to specific port. diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py index f332a6db7dc..66f1e85bbb3 100644 --- a/neutron/db/l3_dvr_db.py +++ b/neutron/db/l3_dvr_db.py @@ -35,6 +35,7 @@ from neutron.extensions import portbindings from neutron.i18n import _LI from neutron import manager from neutron.plugins.common import constants +from neutron.plugins.common import utils as p_utils LOG = logging.getLogger(__name__) @@ -91,6 +92,12 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, "to centralized")) elif (not router_db.extra_attributes.distributed and router_res.get('distributed')): + # router should be disabled in order for upgrade + if router_db.admin_state_up: + msg = _('Cannot upgrade active router to distributed. Please ' + 'set router admin_state_up to False prior to upgrade.') + raise n_exc.BadRequest(resource='router', msg=msg) + # Notify advanced services of the imminent state transition # for the router. try: @@ -319,6 +326,28 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, return super(L3_NAT_with_dvr_db_mixin, self)._port_has_ipv6_address(port) + def _check_dvr_router_remove_required_and_notify_agent( + self, context, router, port, subnets): + if router.extra_attributes.distributed: + if router.gw_port and subnets[0]['id']: + self.delete_csnat_router_interface_ports( + context.elevated(), router, subnet_id=subnets[0]['id']) + plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + l3_agents = plugin.get_l3_agents_hosting_routers(context, + [router['id']]) + for l3_agent in l3_agents: + if not plugin.check_ports_exist_on_l3agent(context, l3_agent, + router['id']): + plugin.remove_router_from_l3_agent( + context, l3_agent['id'], router['id']) + router_interface_info = self._make_router_interface_info( + router['id'], port['tenant_id'], port['id'], subnets[0]['id'], + [subnet['id'] for subnet in subnets]) + self.notify_router_interface_action( + context, router_interface_info, 'remove') + return router_interface_info + def remove_router_interface(self, context, router_id, interface_info): remove_by_port, remove_by_subnet = ( self._validate_interface_info(interface_info, for_removal=True) @@ -331,32 +360,16 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, if remove_by_port: port, subnets = self._remove_interface_by_port( context, router_id, port_id, subnet_id, device_owner) + # remove_by_subnet is not used here, because the validation logic of # _validate_interface_info ensures that at least one of remote_by_* # is True. else: port, subnets = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) - - if router.extra_attributes.distributed: - if router.gw_port: - self.delete_csnat_router_interface_ports( - context.elevated(), router, subnet_id=subnet_id) - plugin = manager.NeutronManager.get_service_plugins().get( - constants.L3_ROUTER_NAT) - l3_agents = plugin.get_l3_agents_hosting_routers(context, - [router_id]) - for l3_agent in l3_agents: - if not plugin.check_ports_exist_on_l3agent(context, l3_agent, - router_id): - plugin.remove_router_from_l3_agent( - context, l3_agent['id'], router_id) - - router_interface_info = self._make_router_interface_info( - router_id, port['tenant_id'], port['id'], subnets[0]['id'], - [subnet['id'] for subnet in subnets]) - self.notify_router_interface_action( - context, router_interface_info, 'remove') + router_interface_info = ( + self._check_dvr_router_remove_required_and_notify_agent( + context, router, port, subnets)) return router_interface_info def _get_snat_sync_interfaces(self, context, router_ids): @@ -557,17 +570,15 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, if not f_port: LOG.info(_LI('Agent Gateway port does not exist,' ' so create one: %s'), f_port) - agent_port = self._core_plugin.create_port( - context, - {'port': {'tenant_id': '', - 'network_id': network_id, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, - 'device_id': l3_agent_db['id'], - 'device_owner': DEVICE_OWNER_AGENT_GW, - 'binding:host_id': host, - 'admin_state_up': True, - 'name': ''}}) + port_data = {'tenant_id': '', + 'network_id': network_id, + 'device_id': l3_agent_db['id'], + 'device_owner': DEVICE_OWNER_AGENT_GW, + 'binding:host_id': host, + 'admin_state_up': True, + 'name': ''} + agent_port = p_utils.create_port(self._core_plugin, context, + {'port': port_data}) if agent_port: self._populate_subnets_for_ports(context, [agent_port]) return agent_port @@ -592,16 +603,15 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, def _add_csnat_router_interface_port( self, context, router, network_id, subnet_id, do_pop=True): """Add SNAT interface to the specified router and subnet.""" - snat_port = self._core_plugin.create_port( - context, - {'port': {'tenant_id': '', - 'network_id': network_id, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'fixed_ips': [{'subnet_id': subnet_id}], - 'device_id': router.id, - 'device_owner': DEVICE_OWNER_DVR_SNAT, - 'admin_state_up': True, - 'name': ''}}) + port_data = {'tenant_id': '', + 'network_id': network_id, + 'fixed_ips': [{'subnet_id': subnet_id}], + 'device_id': router.id, + 'device_owner': DEVICE_OWNER_DVR_SNAT, + 'admin_state_up': True, + 'name': ''} + snat_port = p_utils.create_port(self._core_plugin, context, + {'port': port_data}) if not snat_port: msg = _("Unable to create the SNAT Interface Port") raise n_exc.BadRequest(resource='router', msg=msg) diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index 605a9356352..9b8ad89a458 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -31,6 +31,7 @@ from neutron.db import agents_db from neutron.db import l3_agentschedulers_db as l3agent_sch_db from neutron.db import model_base from neutron.db import models_v2 +from neutron.extensions import l3agentscheduler from neutron.i18n import _LI, _LW from neutron import manager from neutron.plugins.common import constants as service_constants @@ -51,8 +52,8 @@ class CentralizedSnatL3AgentBinding(model_base.BASEV2): sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) host_id = sa.Column(sa.String(255)) - csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) - + csnat_gw_port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete='CASCADE')) l3_agent = orm.relationship(agents_db.Agent) csnat_gw_port = orm.relationship(models_v2.Port) @@ -191,10 +192,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): [n_const.DEVICE_OWNER_DVR_INTERFACE]} int_ports = self._core_plugin.get_ports( admin_context, filters=filter_rtr) - for prt in int_ports: + for port in int_ports: dvr_binding = (ml2_db. get_dvr_port_binding_by_host(context.session, - prt['id'], + port['id'], port_host)) if dvr_binding: # unbind this port from router @@ -249,46 +250,73 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): self.bind_snat_router(context, router_id, chosen_snat_agent) return chosen_snat_agent - def unbind_snat_servicenode(self, context, router_id): - """Unbind the snat router to the chosen l3 service agent.""" - vm_ports = [] + def unbind_snat(self, context, router_id, agent_id=None): + """Unbind snat from the chosen l3 service agent. + + Unbinds from any L3 agent hosting SNAT if passed agent_id is None + """ with context.session.begin(subtransactions=True): query = (context.session. query(CentralizedSnatL3AgentBinding). filter_by(router_id=router_id)) + if agent_id: + query = query.filter_by(l3_agent_id=agent_id) try: binding = query.one() except exc.NoResultFound: - LOG.debug('no snat router binding found for %s', router_id) + LOG.debug('no snat router binding found for router: %(' + 'router)s, agent: %(agent)s', + {'router': router_id, 'agent': agent_id or 'any'}) return - host = binding.l3_agent.host - subnet_ids = self.get_subnet_ids_on_router(context, router_id) - for subnet in subnet_ids: - vm_ports = ( - self._core_plugin.get_ports_on_host_by_subnet( - context, host, subnet)) - if vm_ports: - LOG.debug('One or more ports exist on the snat enabled ' - 'l3_agent host %(host)s and router_id %(id)s', - {'host': host, 'id': router_id}) - break agent_id = binding.l3_agent_id LOG.debug('Delete binding of the SNAT router %(router_id)s ' 'from agent %(id)s', {'router_id': router_id, 'id': agent_id}) context.session.delete(binding) - if not vm_ports: - query = (context.session. - query(l3agent_sch_db.RouterL3AgentBinding). - filter_by(router_id=router_id, - l3_agent_id=agent_id). - delete(synchronize_session=False)) - self.l3_rpc_notifier.router_removed_from_agent( - context, router_id, host) - LOG.debug('Removed binding for router %(router_id)s and ' - 'agent %(id)s', {'router_id': router_id, 'id': agent_id}) + return binding + + def unbind_router_servicenode(self, context, router_id, binding): + """Unbind the router from the chosen l3 service agent.""" + port_found = False + with context.session.begin(subtransactions=True): + host = binding.l3_agent.host + subnet_ids = self.get_subnet_ids_on_router(context, router_id) + for subnet in subnet_ids: + ports = ( + self._core_plugin.get_ports_on_host_by_subnet( + context, host, subnet)) + for port in ports: + if (n_utils.is_dvr_serviced(port['device_owner'])): + port_found = True + LOG.debug('One or more ports exist on the snat ' + 'enabled l3_agent host %(host)s and ' + 'router_id %(id)s', + {'host': host, 'id': router_id}) + break + agent_id = binding.l3_agent_id + + if not port_found: + context.session.query( + l3agent_sch_db.RouterL3AgentBinding).filter_by( + router_id=router_id, l3_agent_id=agent_id).delete( + synchronize_session=False) + + if not port_found: + self.l3_rpc_notifier.router_removed_from_agent( + context, router_id, host) + LOG.debug('Removed binding for router %(router_id)s and ' + 'agent %(agent_id)s', + {'router_id': router_id, 'agent_id': agent_id}) + return port_found + + def unbind_snat_servicenode(self, context, router_id): + """Unbind snat AND the router from the current agent.""" + with context.session.begin(subtransactions=True): + binding = self.unbind_snat(context, router_id) + if binding: + self.unbind_router_servicenode(context, router_id, binding) def get_snat_bindings(self, context, router_ids): """Retrieves the dvr snat bindings for a router.""" @@ -342,7 +370,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): return snat_candidates = self.get_snat_candidates(sync_router, active_l3_agents) - if snat_candidates: + if not snat_candidates: + LOG.warn(_LW('No candidates found for SNAT')) + return + else: try: chosen_agent = self.bind_snat_servicenode( context, router_id, snat_candidates) @@ -353,6 +384,43 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): context, router_id, chosen_agent) return chosen_agent + def reschedule_router(self, context, router_id, candidates=None): + """Reschedule router to new l3 agents + + Remove the router from l3 agents currently hosting it and + schedule it again + """ + router = self.get_router(context, router_id) + is_distributed = router.get('distributed', False) + if not is_distributed: + return super(L3_DVRsch_db_mixin, self).reschedule_router( + context, router_id, candidates) + + old_agents = self.list_l3_agents_hosting_router( + context, router_id)['agents'] + with context.session.begin(subtransactions=True): + for agent in old_agents: + self._unbind_router(context, router_id, agent['id']) + self.unbind_snat_servicenode(context, router_id) + + self.schedule_router(context, router_id, candidates=candidates) + new_agents = self.list_l3_agents_hosting_router( + context, router_id)['agents'] + if not new_agents: + raise l3agentscheduler.RouterReschedulingFailed( + router_id=router_id) + + l3_notifier = self.agent_notifiers.get(n_const.AGENT_TYPE_L3) + if l3_notifier: + old_hosts = [agent['host'] for agent in old_agents] + new_hosts = [agent['host'] for agent in new_agents] + for host in set(old_hosts) - set(new_hosts): + l3_notifier.router_removed_from_agent( + context, router_id, host) + for host in new_hosts: + l3_notifier.router_added_to_agent( + context, [router_id], host) + def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS): @@ -362,6 +430,48 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): return self._get_dvr_sync_data(context, host, agent, router_ids=router_ids, active=True) + def check_agent_router_scheduling_needed(self, context, agent, router): + if router.get('distributed'): + if router['external_gateway_info']: + return not self.get_snat_bindings(context, [router['id']]) + return False + return super(L3_DVRsch_db_mixin, + self).check_agent_router_scheduling_needed( + context, agent, router) + + def create_router_to_agent_binding(self, context, agent, router): + """Create router to agent binding.""" + router_id = router['id'] + agent_id = agent['id'] + if router['external_gateway_info'] and self.router_scheduler and ( + router.get('distributed')): + try: + self.bind_snat_router(context, router_id, agent) + self.bind_dvr_router_servicenode(context, + router_id, agent) + except db_exc.DBError: + raise l3agentscheduler.RouterSchedulingFailed( + router_id=router_id, + agent_id=agent_id) + else: + super(L3_DVRsch_db_mixin, self).create_router_to_agent_binding( + context, agent, router) + + def remove_router_from_l3_agent(self, context, agent_id, router_id): + router = self.get_router(context, router_id) + if router['external_gateway_info'] and router.get('distributed'): + binding = self.unbind_snat(context, router_id, agent_id=agent_id) + if binding: + notification_not_sent = self.unbind_router_servicenode(context, + router_id, binding) + if notification_not_sent: + self.l3_rpc_notifier.routers_updated( + context, [router_id], schedule_routers=False) + else: + super(L3_DVRsch_db_mixin, + self).remove_router_from_l3_agent( + context, agent_id, router_id) + def _notify_l3_agent_new_port(resource, event, trigger, **kwargs): LOG.debug('Received %(resource)s %(event)s', { diff --git a/neutron/db/l3_hamode_db.py b/neutron/db/l3_hamode_db.py index 8adc0307855..7b286869e1a 100644 --- a/neutron/db/l3_hamode_db.py +++ b/neutron/db/l3_hamode_db.py @@ -30,7 +30,10 @@ from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import l3_ext_ha_mode as l3_ha from neutron.extensions import portbindings +from neutron.extensions import providernet from neutron.i18n import _LI +from neutron.plugins.common import utils as p_utils + VR_ID_RANGE = set(range(1, 255)) MAX_ALLOCATION_TRIES = 10 @@ -53,6 +56,15 @@ L3_HA_OPTS = [ cfg.StrOpt('l3_ha_net_cidr', default='169.254.192.0/18', help=_('Subnet used for the l3 HA admin network.')), + cfg.StrOpt('l3_ha_network_type', default='', + help=_("The network type to use when creating the HA network " + "for an HA router. By default or if empty, the first " + "'tenant_network_types' is used. This is helpful when " + "the VRRP traffic should use a specific network which " + "is not the default one.")), + cfg.StrOpt('l3_ha_network_physical_name', default='', + help=_("The physical network name with which the HA network " + "can be created.")) ] cfg.CONF.register_opts(L3_HA_OPTS) @@ -209,18 +221,15 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin): context, ha_network.network_id, router.id) def _create_ha_subnet(self, context, network_id, tenant_id): - args = {'subnet': - {'network_id': network_id, - 'tenant_id': '', - 'name': constants.HA_SUBNET_NAME % tenant_id, - 'ip_version': 4, - 'cidr': cfg.CONF.l3_ha_net_cidr, - 'enable_dhcp': False, - 'host_routes': attributes.ATTR_NOT_SPECIFIED, - 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, - 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, - 'gateway_ip': None}} - return self._core_plugin.create_subnet(context, args) + args = {'network_id': network_id, + 'tenant_id': '', + 'name': constants.HA_SUBNET_NAME % tenant_id, + 'ip_version': 4, + 'cidr': cfg.CONF.l3_ha_net_cidr, + 'enable_dhcp': False, + 'gateway_ip': None} + return p_utils.create_subnet(self._core_plugin, context, + {'subnet': args}) def _create_ha_network_tenant_binding(self, context, tenant_id, network_id): @@ -230,6 +239,14 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin): context.session.add(ha_network) return ha_network + def _add_ha_network_settings(self, network): + if cfg.CONF.l3_ha_network_type: + network[providernet.NETWORK_TYPE] = cfg.CONF.l3_ha_network_type + + if cfg.CONF.l3_ha_network_physical_name: + network[providernet.PHYSICAL_NETWORK] = ( + cfg.CONF.l3_ha_network_physical_name) + def _create_ha_network(self, context, tenant_id): admin_ctx = context.elevated() @@ -237,9 +254,10 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin): {'name': constants.HA_NETWORK_NAME % tenant_id, 'tenant_id': '', 'shared': False, - 'admin_state_up': True, - 'status': constants.NET_STATUS_ACTIVE}} - network = self._core_plugin.create_network(admin_ctx, args) + 'admin_state_up': True}} + self._add_ha_network_settings(args['network']) + network = p_utils.create_network(self._core_plugin, admin_ctx, args) + try: ha_network = self._create_ha_network_tenant_binding(admin_ctx, tenant_id, @@ -292,16 +310,14 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin): return portbinding def add_ha_port(self, context, router_id, network_id, tenant_id): - port = self._core_plugin.create_port(context, { - 'port': - {'tenant_id': '', - 'network_id': network_id, - 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'admin_state_up': True, - 'device_id': router_id, - 'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF, - 'name': constants.HA_PORT_NAME % tenant_id}}) + args = {'tenant_id': '', + 'network_id': network_id, + 'admin_state_up': True, + 'device_id': router_id, + 'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF, + 'name': constants.HA_PORT_NAME % tenant_id} + port = p_utils.create_port(self._core_plugin, context, + {'port': args}) try: return self._create_ha_port_binding(context, port['id'], router_id) diff --git a/neutron/db/migration/README b/neutron/db/migration/README index e6e51388739..18a126cb251 100644 --- a/neutron/db/migration/README +++ b/neutron/db/migration/README @@ -1,88 +1,4 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +See doc/source/devref/alembic_migrations.rst -The migrations in the alembic/versions contain the changes needed to migrate -from older Neutron releases to newer versions. A migration occurs by executing -a script that details the changes needed to upgrade the database. The migration -scripts are ordered so that multiple scripts can run sequentially to update the -database. The scripts are executed by Neutron's migration wrapper which uses -the Alembic library to manage the migration. Neutron supports migration from -Havana or later. - - -If you are a deployer or developer and want to migrate from Folsom to Grizzly -or later you must first add version tracking to the database: - -$ neutron-db-manage --config-file /path/to/neutron.conf \ - --config-file /path/to/plugin/config.ini stamp folsom - -You can then upgrade to the latest database version via: -$ neutron-db-manage --config-file /path/to/neutron.conf \ - --config-file /path/to/plugin/config.ini upgrade head - -To check the current database version: -$ neutron-db-manage --config-file /path/to/neutron.conf \ - --config-file /path/to/plugin/config.ini current - -To create a script to run the migration offline: -$ neutron-db-manage --config-file /path/to/neutron.conf \ - --config-file /path/to/plugin/config.ini upgrade head --sql - -To run the offline migration between specific migration versions: -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini upgrade \ -: --sql - -Upgrade the database incrementally: -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini upgrade --delta <# of revs> - -NOTE: Database downgrade is not supported. - - -DEVELOPERS: - -A database migration script is required when you submit a change to Neutron -that alters the database model definition. The migration script is a special -python file that includes code to upgrade the database to match the changes in -the model definition. Alembic will execute these scripts in order to provide a -linear migration path between revision. The neutron-db-manage command can be -used to generate migration template for you to complete. The operations in the -template are those supported by the Alembic migration library. - -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini revision \ --m "description of revision" \ ---autogenerate - -This generates a prepopulated template with the changes needed to match the -database state with the models. You should inspect the autogenerated template -to ensure that the proper models have been altered. - -In rare circumstances, you may want to start with an empty migration template -and manually author the changes necessary for an upgrade. You can create a -blank file via: - -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini revision \ --m "description of revision" - -The migration timeline should remain linear so that there is a clear path when -upgrading. To verify that the timeline does branch, you can run this command: -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini check_migration - -If the migration path does branch, you can find the branch point via: -$ neutron-db-manage --config-file /path/to/neutron.conf \ ---config-file /path/to/plugin/config.ini history +Rendered at +http://docs.openstack.org/developer/neutron/devref/alembic_migrations.html diff --git a/neutron/db/migration/__init__.py b/neutron/db/migration/__init__.py index 86cce385af9..d92e57c49cb 100644 --- a/neutron/db/migration/__init__.py +++ b/neutron/db/migration/__init__.py @@ -129,7 +129,7 @@ def create_table_if_not_exist_psql(table_name, values): def remove_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.drop_constraint( - name=fk['name'], + constraint_name=fk['name'], table_name=table, type_='foreignkey' ) @@ -138,9 +138,9 @@ def remove_foreign_keys(table, foreign_keys): def create_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.create_foreign_key( - name=fk['name'], - source=table, - referent=fk['referred_table'], + constraint_name=fk['name'], + source_table=table, + referent_table=fk['referred_table'], local_cols=fk['constrained_columns'], remote_cols=fk['referred_columns'], ondelete='CASCADE' diff --git a/neutron/db/migration/alembic_migrations/external.py b/neutron/db/migration/alembic_migrations/external.py index 3f337c04b59..cd63c2f7226 100644 --- a/neutron/db/migration/alembic_migrations/external.py +++ b/neutron/db/migration/alembic_migrations/external.py @@ -24,12 +24,15 @@ LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors', FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies'] -DRIVER_TABLES = [ - # Arista ML2 driver Models moved to openstack/networking-arista +# Arista ML2 driver Models moved to openstack/networking-arista +REPO_ARISTA_TABLES = [ 'arista_provisioned_nets', 'arista_provisioned_vms', 'arista_provisioned_tenants', - # Models moved to openstack/networking-cisco +] + +# Models moved to openstack/networking-cisco +REPO_CISCO_TABLES = [ 'cisco_ml2_apic_contracts', 'cisco_ml2_apic_names', 'cisco_ml2_apic_host_links', @@ -40,7 +43,62 @@ DRIVER_TABLES = [ 'cisco_ml2_n1kv_vxlan_allocations', 'cisco_ml2_n1kv_vlan_allocations', 'cisco_ml2_n1kv_profile_bindings', - # Add your tables with moved models here^. Please end with a comma. + 'cisco_ml2_nexusport_bindings', + 'cisco_ml2_nexus_nve', + 'ml2_nexus_vxlan_allocations', + 'ml2_nexus_vxlan_mcast_groups', + 'ml2_ucsm_port_profiles', ] -TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + DRIVER_TABLES) +# VMware-NSX models moved to openstack/vmware-nsx +REPO_VMWARE_TABLES = [ + 'tz_network_bindings', + 'neutron_nsx_network_mappings', + 'neutron_nsx_security_group_mappings', + 'neutron_nsx_port_mappings', + 'neutron_nsx_router_mappings', + 'multi_provider_networks', + 'networkconnections', + 'networkgatewaydevicereferences', + 'networkgatewaydevices', + 'networkgateways', + 'maclearningstates', + 'qosqueues', + 'portqueuemappings', + 'networkqueuemappings', + 'lsn_port', + 'lsn', + 'nsxv_router_bindings', + 'nsxv_edge_vnic_bindings', + 'nsxv_edge_dhcp_static_bindings', + 'nsxv_internal_networks', + 'nsxv_internal_edges', + 'nsxv_security_group_section_mappings', + 'nsxv_rule_mappings', + 'nsxv_port_vnic_mappings', + 'nsxv_router_ext_attributes', + 'nsxv_tz_network_bindings', + 'nsxv_port_index_mappings', + 'nsxv_firewall_rule_bindings', + 'nsxv_spoofguard_policy_network_mappings', + 'nsxv_vdr_dhcp_bindings', + 'vcns_router_bindings', +] + +# NEC models moved to stackforge/networking-nec +REPO_NEC_TABLES = [ + 'ofcnetworkmappings', + 'ofcportmappings', + 'ofcroutermappings', + 'ofcfiltermappings', + 'ofctenantmappings', + 'portinfos', + 'routerproviders', + 'packetfilters', +] + +TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + + REPO_ARISTA_TABLES + + REPO_CISCO_TABLES + + REPO_VMWARE_TABLES + + REPO_NEC_TABLES) diff --git a/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py b/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py index a60a9e17a47..00505e7ad05 100644 --- a/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py +++ b/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py @@ -38,7 +38,7 @@ TABLE_NAME = 'extradhcpopts' def upgrade(): with migration.remove_fks_from_table(TABLE_NAME): op.drop_constraint( - name=CONSTRAINT_NAME_OLD, + constraint_name=CONSTRAINT_NAME_OLD, table_name=TABLE_NAME, type_='unique' ) @@ -48,7 +48,7 @@ def upgrade(): op.execute("UPDATE extradhcpopts SET ip_version = 4") op.create_unique_constraint( - name=CONSTRAINT_NAME_NEW, - source='extradhcpopts', - local_cols=['port_id', 'opt_name', 'ip_version'] + constraint_name=CONSTRAINT_NAME_NEW, + table_name='extradhcpopts', + columns=['port_id', 'opt_name', 'ip_version'] ) diff --git a/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py b/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py index fec9a6390d5..44d3d103dad 100644 --- a/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py +++ b/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py @@ -33,7 +33,7 @@ CONSTRAINT_NAME = 'uniq_ports0network_id0mac_address' def upgrade(): op.create_unique_constraint( - name=CONSTRAINT_NAME, - source=TABLE_NAME, - local_cols=['network_id', 'mac_address'] + constraint_name=CONSTRAINT_NAME, + table_name=TABLE_NAME, + columns=['network_id', 'mac_address'] ) diff --git a/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py b/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py index 6df244cdc0d..4e3f8bdc6cf 100644 --- a/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py +++ b/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py @@ -36,8 +36,8 @@ from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.sql import expression as sa_expr -from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers.hyperv import constants FLAT_VLAN_ID = -1 LOCAL_VLAN_ID = -2 @@ -114,7 +114,7 @@ def _migrate_port_bindings(engine): sa_expr.select(['*'], from_obj=port_binding_ports)) ml2_bindings = [dict(x) for x in source_bindings] for binding in ml2_bindings: - binding['vif_type'] = portbindings.VIF_TYPE_HYPERV + binding['vif_type'] = constants.VIF_TYPE_HYPERV binding['driver'] = HYPERV segment = port_segment_map.get(binding['port_id']) if segment: diff --git a/neutron/db/migration/alembic_migrations/versions/38495dc99731_ml2_tunnel_endpoints_table.py b/neutron/db/migration/alembic_migrations/versions/38495dc99731_ml2_tunnel_endpoints_table.py index 65f6f302a08..3bf08bd388b 100644 --- a/neutron/db/migration/alembic_migrations/versions/38495dc99731_ml2_tunnel_endpoints_table.py +++ b/neutron/db/migration/alembic_migrations/versions/38495dc99731_ml2_tunnel_endpoints_table.py @@ -37,15 +37,15 @@ def upgrade(): op.add_column('ml2_gre_endpoints', sa.Column('host', sa.String(length=255), nullable=True)) op.create_unique_constraint( - name=CONSTRAINT_NAME_GRE, - source='ml2_gre_endpoints', - local_cols=['host'] + constraint_name=CONSTRAINT_NAME_GRE, + table_name='ml2_gre_endpoints', + columns=['host'] ) op.add_column('ml2_vxlan_endpoints', sa.Column('host', sa.String(length=255), nullable=True)) op.create_unique_constraint( - name=CONSTRAINT_NAME_VXLAN, - source='ml2_vxlan_endpoints', - local_cols=['host'] + constraint_name=CONSTRAINT_NAME_VXLAN, + table_name='ml2_vxlan_endpoints', + columns=['host'] ) diff --git a/neutron/db/migration/alembic_migrations/versions/41662e32bce2_l3_dvr_snat_mapping.py b/neutron/db/migration/alembic_migrations/versions/41662e32bce2_l3_dvr_snat_mapping.py index 75fe067bd89..489fd69968e 100644 --- a/neutron/db/migration/alembic_migrations/versions/41662e32bce2_l3_dvr_snat_mapping.py +++ b/neutron/db/migration/alembic_migrations/versions/41662e32bce2_l3_dvr_snat_mapping.py @@ -40,10 +40,10 @@ def upgrade(): prev_pk_name = prev_pk_const.get('name') with migration.remove_fks_from_table(TABLE_NAME): - op.drop_constraint(name=prev_pk_name, + op.drop_constraint(constraint_name=prev_pk_name, table_name=TABLE_NAME, type_='primary') - op.create_primary_key(name=None, + op.create_primary_key(constraint_name=None, table_name=TABLE_NAME, - cols=['router_id', 'l3_agent_id']) + columns=['router_id', 'l3_agent_id']) diff --git a/neutron/db/migration/alembic_migrations/versions/44621190bc02_add_uniqueconstraint_ipavailability_ranges.py b/neutron/db/migration/alembic_migrations/versions/44621190bc02_add_uniqueconstraint_ipavailability_ranges.py index 1a00de50031..314116a20c5 100644 --- a/neutron/db/migration/alembic_migrations/versions/44621190bc02_add_uniqueconstraint_ipavailability_ranges.py +++ b/neutron/db/migration/alembic_migrations/versions/44621190bc02_add_uniqueconstraint_ipavailability_ranges.py @@ -35,13 +35,13 @@ UC_2_NAME = 'uniq_ipavailabilityranges0last_ip0allocation_pool_id' def upgrade(): op.create_unique_constraint( - name=UC_1_NAME, - source=TABLE_NAME, - local_cols=['first_ip', 'allocation_pool_id'] + constraint_name=UC_1_NAME, + table_name=TABLE_NAME, + columns=['first_ip', 'allocation_pool_id'] ) op.create_unique_constraint( - name=UC_2_NAME, - source=TABLE_NAME, - local_cols=['last_ip', 'allocation_pool_id'] + constraint_name=UC_2_NAME, + table_name=TABLE_NAME, + columns=['last_ip', 'allocation_pool_id'] ) diff --git a/neutron/db/migration/alembic_migrations/versions/57dd745253a6_nuage_kilo_migrate.py b/neutron/db/migration/alembic_migrations/versions/57dd745253a6_nuage_kilo_migrate.py index e03769e2922..96b08be47fb 100644 --- a/neutron/db/migration/alembic_migrations/versions/57dd745253a6_nuage_kilo_migrate.py +++ b/neutron/db/migration/alembic_migrations/versions/57dd745253a6_nuage_kilo_migrate.py @@ -44,10 +44,10 @@ def upgrade(): op.add_column('nuage_subnet_l2dom_mapping', sa.Column('nuage_managed_subnet', sa.Boolean(), nullable=True)) op.create_unique_constraint( - name=CONSTRAINT_NAME_NR, - source='nuage_net_partition_router_mapping', - local_cols=['nuage_router_id']) + constraint_name=CONSTRAINT_NAME_NR, + table_name='nuage_net_partition_router_mapping', + columns=['nuage_router_id']) op.create_unique_constraint( - name=CONSTRAINT_NAME_NS, - source='nuage_subnet_l2dom_mapping', - local_cols=['nuage_subnet_id']) + constraint_name=CONSTRAINT_NAME_NS, + table_name='nuage_subnet_l2dom_mapping', + columns=['nuage_subnet_id']) diff --git a/neutron/db/migration/alembic_migrations/versions/HEADS b/neutron/db/migration/alembic_migrations/versions/HEADS index 1ea4069eb22..9928899efb2 100644 --- a/neutron/db/migration/alembic_migrations/versions/HEADS +++ b/neutron/db/migration/alembic_migrations/versions/HEADS @@ -1,3 +1,2 @@ -1b4c6e320f79 -2a16083502f3 -kilo +11926bcfe72d +34af2b5c5a59 diff --git a/neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py b/neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py index 51958697a57..298a1f5c05e 100644 --- a/neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py +++ b/neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py @@ -40,7 +40,7 @@ def _drop_constraint(): def upgrade(): _drop_constraint() op.create_foreign_key( - name=None, - source='floatingips', referent='ports', + constraint_name=None, + source_table='floatingips', referent_table='ports', local_cols=['floating_port_id'], remote_cols=['id'], ondelete='CASCADE' ) diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py new file mode 100644 index 00000000000..9ef55843da6 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py @@ -0,0 +1,49 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add geneve ml2 type driver + +Revision ID: 11926bcfe72d +Revises: 2e5352a0ad4d +Create Date: 2015-08-27 19:56:16.356522 + +""" + +# revision identifiers, used by Alembic. +revision = '11926bcfe72d' +down_revision = '2e5352a0ad4d' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table( + 'ml2_geneve_allocations', + sa.Column('geneve_vni', sa.Integer(), + autoincrement=False, nullable=False), + sa.Column('allocated', sa.Boolean(), + server_default=sa.sql.false(), nullable=False), + sa.PrimaryKeyConstraint('geneve_vni'), + ) + op.create_index(op.f('ix_ml2_geneve_allocations_allocated'), + 'ml2_geneve_allocations', ['allocated'], unique=False) + op.create_table( + 'ml2_geneve_endpoints', + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('host', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('ip_address'), + sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'), + ) diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py new file mode 100644 index 00000000000..322f6b06594 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py @@ -0,0 +1,41 @@ +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add missing foreign keys + +Revision ID: 2e5352a0ad4d +Revises: 2a16083502f3 +Create Date: 2015-08-20 12:43:09.110427 + +""" + +# revision identifiers, used by Alembic. +revision = '2e5352a0ad4d' +down_revision = '2a16083502f3' + +from alembic import op +from sqlalchemy.engine import reflection + +from neutron.db import migration + + +TABLE_NAME = 'flavorserviceprofilebindings' + + +def upgrade(): + inspector = reflection.Inspector.from_engine(op.get_bind()) + fk_constraints = inspector.get_foreign_keys(TABLE_NAME) + migration.remove_foreign_keys(TABLE_NAME, fk_constraints) + migration.create_foreign_keys(TABLE_NAME, fk_constraints) diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py b/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py index bd1ddccf930..0e6358ffb7e 100644 --- a/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py +++ b/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py @@ -19,11 +19,13 @@ Create Date: 2015-06-22 00:00:00.000000 """ +from neutron.db.migration import cli + + # revision identifiers, used by Alembic. revision = '30018084ec99' -down_revision = None -depends_on = ('kilo',) -branch_labels = ('liberty_contract',) +down_revision = 'kilo' +branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): diff --git a/neutron/plugins/ml2/drivers/cisco/ucsm/ucsm_model.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py similarity index 55% rename from neutron/plugins/ml2/drivers/cisco/ucsm/ucsm_model.py rename to neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py index 05b164805e4..ba523ae655b 100644 --- a/neutron/plugins/ml2/drivers/cisco/ucsm/ucsm_model.py +++ b/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py @@ -1,5 +1,4 @@ -# Copyright 2015 Cisco Systems, Inc. -# All rights reserved. +# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -12,18 +11,28 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +# +"""Add dns_name to Port + +Revision ID: 34af2b5c5a59 +Revises: 9859ac9c136 +Create Date: 2015-08-23 00:22:47.618593 + +""" + +# revision identifiers, used by Alembic. +revision = '34af2b5c5a59' +down_revision = '9859ac9c136' + +from alembic import op import sqlalchemy as sa -from neutron.db import model_base +from neutron.extensions import dns -class PortProfile(model_base.BASEV2): - - """Port profiles created on the UCS Manager.""" - - __tablename__ = 'ml2_ucsm_port_profiles' - - vlan_id = sa.Column(sa.Integer(), nullable=False, primary_key=True) - profile_id = sa.Column(sa.String(64), nullable=False) - created_on_ucs = sa.Column(sa.Boolean(), nullable=False) +def upgrade(): + op.add_column('ports', + sa.Column('dns_name', + sa.String(length=dns.FQDN_MAX_LEN), + nullable=True)) diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py index df82f17c936..e63b3f5d09b 100644 --- a/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py +++ b/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py @@ -21,15 +21,17 @@ Create Date: 2015-04-19 14:59:15.102609 """ -# revision identifiers, used by Alembic. -revision = '354db87e3225' -down_revision = None -branch_labels = ('liberty_expand',) -depends_on = ('kilo',) - from alembic import op import sqlalchemy as sa +from neutron.db.migration import cli + + +# revision identifiers, used by Alembic. +revision = '354db87e3225' +down_revision = 'kilo' +branch_labels = (cli.EXPAND_BRANCH,) + def upgrade(): op.create_table( diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py new file mode 100755 index 00000000000..a692b955338 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py @@ -0,0 +1,69 @@ +# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""qos db changes + +Revision ID: 48153cb5f051 +Revises: 1b4c6e320f79 +Create Date: 2015-06-24 17:03:34.965101 + +""" + +# revision identifiers, used by Alembic. +revision = '48153cb5f051' +down_revision = '1b4c6e320f79' + +from alembic import op +import sqlalchemy as sa + +from neutron.api.v2 import attributes as attrs + + +def upgrade(): + op.create_table( + 'qos_policies', + sa.Column('id', sa.String(length=36), primary_key=True), + sa.Column('name', sa.String(length=attrs.NAME_MAX_LEN)), + sa.Column('description', sa.String(length=attrs.DESCRIPTION_MAX_LEN)), + sa.Column('shared', sa.Boolean(), nullable=False), + sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN), + index=True)) + + op.create_table( + 'qos_network_policy_bindings', + sa.Column('policy_id', sa.String(length=36), + sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), + nullable=False), + sa.Column('network_id', sa.String(length=36), + sa.ForeignKey('networks.id', ondelete='CASCADE'), + nullable=False, unique=True)) + + op.create_table( + 'qos_port_policy_bindings', + sa.Column('policy_id', sa.String(length=36), + sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), + nullable=False), + sa.Column('port_id', sa.String(length=36), + sa.ForeignKey('ports.id', ondelete='CASCADE'), + nullable=False, unique=True)) + + op.create_table( + 'qos_bandwidth_limit_rules', + sa.Column('id', sa.String(length=36), primary_key=True), + sa.Column('qos_policy_id', sa.String(length=36), + sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), + nullable=False, unique=True), + sa.Column('max_kbps', sa.Integer()), + sa.Column('max_burst_kbps', sa.Integer())) diff --git a/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py b/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py new file mode 100644 index 00000000000..c8935a86f13 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py @@ -0,0 +1,47 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""quota_reservations + +Revision ID: 9859ac9c136 +Revises: 48153cb5f051 +Create Date: 2015-03-11 06:40:56.775075 + +""" + +# revision identifiers, used by Alembic. +revision = '9859ac9c136' +down_revision = '48153cb5f051' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table( + 'reservations', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('expiration', sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'resourcedeltas', + sa.Column('resource', sa.String(length=255), nullable=False), + sa.Column('reservation_id', sa.String(length=36), nullable=False), + sa.Column('amount', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('resource', 'reservation_id')) diff --git a/neutron/db/migration/cli.py b/neutron/db/migration/cli.py index 0881c72112b..d33baa84df7 100644 --- a/neutron/db/migration/cli.py +++ b/neutron/db/migration/cli.py @@ -22,8 +22,8 @@ from alembic import script as alembic_script from alembic import util as alembic_util from oslo_config import cfg from oslo_utils import importutils +import pkg_resources -from neutron.common import repos from neutron.common import utils @@ -31,24 +31,45 @@ from neutron.common import utils HEAD_FILENAME = 'HEAD' HEADS_FILENAME = 'HEADS' CURRENT_RELEASE = "liberty" -MIGRATION_BRANCHES = ('expand', 'contract') +EXPAND_BRANCH = 'expand' +CONTRACT_BRANCH = 'contract' +MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH) -mods = repos.NeutronModules() -VALID_SERVICES = list(map(mods.alembic_name, mods.installed_list())) +MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations' +migration_entrypoints = { + entrypoint.name: entrypoint + for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS) +} +neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini') + +VALID_SERVICES = ['fwaas', 'lbaas', 'vpnaas'] +INSTALLED_SERVICES = [service_ for service_ in VALID_SERVICES + if 'neutron-%s' % service_ in migration_entrypoints] +INSTALLED_SERVICE_PROJECTS = ['neutron-%s' % service_ + for service_ in INSTALLED_SERVICES] +INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints + if project_ not in INSTALLED_SERVICE_PROJECTS] + +service_help = ( + _("Can be one of '%s'.") % "', '".join(INSTALLED_SERVICES) + if INSTALLED_SERVICES else + _("(No services are currently installed).") +) _core_opts = [ cfg.StrOpt('core_plugin', default='', help=_('Neutron plugin provider module')), - cfg.ListOpt('service_plugins', - default=[], - help=_("The service plugins Neutron will use")), cfg.StrOpt('service', - choices=VALID_SERVICES, - help=_("The advanced service to execute the command against. " - "Can be one of '%s'.") % "', '".join(VALID_SERVICES)), + choices=INSTALLED_SERVICES, + help=(_("The advanced service to execute the command against. ") + + service_help)), + cfg.StrOpt('subproject', + choices=INSTALLED_SUBPROJECTS, + help=(_("The subproject to execute the command against. " + "Can be one of %s.") % INSTALLED_SUBPROJECTS)), cfg.BoolOpt('split_branches', default=False, help=_("Enforce using split branches file structure.")) @@ -78,14 +99,25 @@ CONF.register_opts(_quota_opts, 'QUOTAS') def do_alembic_command(config, cmd, *args, **kwargs): + project = config.get_main_option('neutron_project') + alembic_util.msg(_('Running %(cmd)s for %(project)s ...') % + {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) + alembic_util.msg(_('OK')) + + +def _get_alembic_entrypoint(project): + if project not in migration_entrypoints: + alembic_util.err(_('Sub-project %s not installed.') % project) + return migration_entrypoints[project] def do_check_migration(config, cmd): do_alembic_command(config, 'branches') + validate_labels(config) validate_heads_file(config) @@ -130,14 +162,9 @@ def do_stamp(config, cmd): sql=CONF.command.sql) -def _get_branch_label(branch): - '''Get the latest branch label corresponding to release cycle.''' - return '%s_%s' % (CURRENT_RELEASE, branch) - - def _get_branch_head(branch): '''Get the latest @head specification for a branch.''' - return '%s@head' % _get_branch_label(branch) + return '%s@head' % branch def do_revision(config, cmd): @@ -148,24 +175,17 @@ def do_revision(config, cmd): 'sql': CONF.command.sql, } - if _use_separate_migration_branches(CONF): + if _use_separate_migration_branches(config): for branch in MIGRATION_BRANCHES: - version_path = _get_version_branch_path(CONF, branch) + version_path = _get_version_branch_path(config, branch) addn_kwargs['version_path'] = version_path + addn_kwargs['head'] = _get_branch_head(branch) if not os.path.exists(version_path): # Bootstrap initial directory structure utils.ensure_dir(version_path) - # Each new release stream of migrations is detached from - # previous migration chains - addn_kwargs['head'] = 'base' # Mark the very first revision in the new branch with its label - addn_kwargs['branch_label'] = _get_branch_label(branch) - # TODO(ihrachyshka): ideally, we would also add depends_on here - # to refer to the head of the previous release stream. But - # alembic API does not support it yet. - else: - addn_kwargs['head'] = _get_branch_head(branch) + addn_kwargs['branch_label'] = branch do_alembic_command(config, cmd, **addn_kwargs) else: @@ -173,21 +193,81 @@ def do_revision(config, cmd): update_heads_file(config) +def _get_release_labels(labels): + result = set() + for label in labels: + result.add('%s_%s' % (CURRENT_RELEASE, label)) + return result + + +def _compare_labels(revision, expected_labels): + # validate that the script has expected labels only + bad_labels = revision.branch_labels - expected_labels + if bad_labels: + # NOTE(ihrachyshka): this hack is temporary to accomodate those + # projects that already initialized their branches with liberty_* + # labels. Let's notify them about the deprecation for now and drop it + # later. + bad_labels_with_release = (revision.branch_labels - + _get_release_labels(expected_labels)) + if not bad_labels_with_release: + alembic_util.warn( + _('Release aware branch labels (%s) are deprecated. ' + 'Please switch to expand@ and contract@ ' + 'labels.') % bad_labels) + return + + script_name = os.path.basename(revision.path) + alembic_util.err( + _('Unexpected label for script %(script_name)s: %(labels)s') % + {'script_name': script_name, + 'labels': bad_labels} + ) + + +def _validate_single_revision_labels(script_dir, revision, label=None): + expected_labels = set() + if label is not None: + expected_labels.add(label) + + _compare_labels(revision, expected_labels) + + # if it's not the root element of the branch, expect the parent of the + # script to have the same label + if revision.down_revision is not None: + down_revision = script_dir.get_revision(revision.down_revision) + _compare_labels(down_revision, expected_labels) + + +def _validate_revision(script_dir, revision): + for branch in MIGRATION_BRANCHES: + if branch in revision.path: + _validate_single_revision_labels( + script_dir, revision, label=branch) + return + + # validate script from branchless part of migration rules + _validate_single_revision_labels(script_dir, revision) + + +def validate_labels(config): + script_dir = alembic_script.ScriptDirectory.from_config(config) + revisions = [v for v in script_dir.walk_revisions(base='base', + head='heads')] + for revision in revisions: + _validate_revision(script_dir, revision) + + def _get_sorted_heads(script): '''Get the list of heads for all branches, sorted.''' - heads = script.get_heads() - # +1 stands for the core 'kilo' branch, the one that didn't have branches - if len(heads) > len(MIGRATION_BRANCHES) + 1: - alembic_util.err(_('No new branches are allowed except: %s') % - ' '.join(MIGRATION_BRANCHES)) - return sorted(heads) + return sorted(script.get_heads()) def validate_heads_file(config): '''Check that HEADS file contains the latest heads for each branch.''' script = alembic_script.ScriptDirectory.from_config(config) expected_heads = _get_sorted_heads(script) - heads_path = _get_active_head_file_path(CONF) + heads_path = _get_active_head_file_path(config) try: with open(heads_path) as file_: observed_heads = file_.read().split() @@ -204,9 +284,13 @@ def update_heads_file(config): '''Update HEADS file with the latest branch heads.''' script = alembic_script.ScriptDirectory.from_config(config) heads = _get_sorted_heads(script) - heads_path = _get_active_head_file_path(CONF) + heads_path = _get_active_head_file_path(config) with open(heads_path, 'w+') as f: f.write('\n'.join(heads)) + if _use_separate_migration_branches(config): + old_head_file = _get_head_file_path(config) + if os.path.exists(old_head_file): + os.remove(old_head_file) def add_command_parsers(subparsers): @@ -253,88 +337,153 @@ command_opt = cfg.SubCommandOpt('command', CONF.register_cli_opt(command_opt) -def _get_neutron_service_base(neutron_config): - '''Return base python namespace name for a service.''' - if neutron_config.service: - validate_service_installed(neutron_config.service) - return "neutron_%s" % neutron_config.service - return "neutron" +def _get_project_base(config): + '''Return the base python namespace name for a project.''' + script_location = config.get_main_option('script_location') + return script_location.split(':')[0].split('.')[0] -def _get_root_versions_dir(neutron_config): +def _get_package_root_dir(config): + root_module = importutils.try_import(_get_project_base(config)) + if not root_module: + project = config.get_main_option('neutron_project') + alembic_util.err(_("Failed to locate source for %s.") % project) + # The root_module.__file__ property is a path like + # '/opt/stack/networking-foo/networking_foo/__init__.py' + # We return just + # '/opt/stack/networking-foo' + return os.path.dirname(os.path.dirname(root_module.__file__)) + + +def _get_root_versions_dir(config): '''Return root directory that contains all migration rules.''' - service_base = _get_neutron_service_base(neutron_config) - root_module = importutils.import_module(service_base) - return os.path.join( - os.path.dirname(root_module.__file__), - 'db/migration/alembic_migrations/versions') + root_dir = _get_package_root_dir(config) + script_location = config.get_main_option('script_location') + # Script location is something like: + # 'project_base.db.migration:alembic_migrations' + # Convert it to: + # 'project_base/db/migration/alembic_migrations/versions' + part1, part2 = script_location.split(':') + parts = part1.split('.') + part2.split('.') + ['versions'] + # Return the absolute path to the versions dir + return os.path.join(root_dir, *parts) -def _get_head_file_path(neutron_config): +def _get_head_file_path(config): '''Return the path of the file that contains single head.''' return os.path.join( - _get_root_versions_dir(neutron_config), + _get_root_versions_dir(config), HEAD_FILENAME) -def _get_heads_file_path(neutron_config): +def _get_heads_file_path(config): '''Return the path of the file that contains all latest heads, sorted.''' return os.path.join( - _get_root_versions_dir(neutron_config), + _get_root_versions_dir(config), HEADS_FILENAME) -def _get_active_head_file_path(neutron_config): +def _get_active_head_file_path(config): '''Return the path of the file that contains latest head(s), depending on whether multiple branches are used. ''' - if _use_separate_migration_branches(neutron_config): - return _get_heads_file_path(neutron_config) - return _get_head_file_path(neutron_config) + if _use_separate_migration_branches(config): + return _get_heads_file_path(config) + return _get_head_file_path(config) -def _get_version_branch_path(neutron_config, branch=None): - version_path = _get_root_versions_dir(neutron_config) +def _get_version_branch_path(config, branch=None): + version_path = _get_root_versions_dir(config) if branch: return os.path.join(version_path, CURRENT_RELEASE, branch) return version_path -def _use_separate_migration_branches(neutron_config): +def _use_separate_migration_branches(config): '''Detect whether split migration branches should be used.''' - return (neutron_config.split_branches or + return (CONF.split_branches or # Use HEADS file to indicate the new, split migration world - os.path.exists(_get_heads_file_path(neutron_config))) + os.path.exists(_get_heads_file_path(config))) def _set_version_locations(config): '''Make alembic see all revisions in all migration branches.''' - version_paths = [] - - version_paths.append(_get_version_branch_path(CONF)) - if _use_separate_migration_branches(CONF): + version_paths = [_get_version_branch_path(config)] + if _use_separate_migration_branches(config): for branch in MIGRATION_BRANCHES: - version_paths.append(_get_version_branch_path(CONF, branch)) + version_paths.append(_get_version_branch_path(config, branch)) config.set_main_option('version_locations', ' '.join(version_paths)) -def validate_service_installed(service): - if not importutils.try_import('neutron_%s' % service): - alembic_util.err(_('Package neutron-%s not installed') % service) +def _get_installed_entrypoint(subproject): + '''Get the entrypoint for the subproject, which must be installed.''' + if subproject not in migration_entrypoints: + alembic_util.err(_('Package %s not installed') % subproject) + return migration_entrypoints[subproject] -def get_script_location(neutron_config): - location = '%s.db.migration:alembic_migrations' - return location % _get_neutron_service_base(neutron_config) +def _get_subproject_script_location(subproject): + '''Get the script location for the installed subproject.''' + entrypoint = _get_installed_entrypoint(subproject) + return ':'.join([entrypoint.module_name, entrypoint.attrs[0]]) -def get_alembic_config(): - config = alembic_config.Config(os.path.join(os.path.dirname(__file__), - 'alembic.ini')) - config.set_main_option('script_location', get_script_location(CONF)) - _set_version_locations(config) - return config +def _get_service_script_location(service): + '''Get the script location for the service, which must be installed.''' + return _get_subproject_script_location('neutron-%s' % service) + + +def _get_subproject_base(subproject): + '''Get the import base name for the installed subproject.''' + entrypoint = _get_installed_entrypoint(subproject) + return entrypoint.module_name.split('.')[0] + + +def get_alembic_configs(): + '''Return a list of alembic configs, one per project. + ''' + + # Get the script locations for the specified or installed projects. + # Which projects to get script locations for is determined by the CLI + # options as follows: + # --service X # only subproject neutron-X + # --subproject Y # only subproject Y + # (none specified) # neutron and all installed subprojects + script_locations = {} + if CONF.service: + script_location = _get_service_script_location(CONF.service) + script_locations['neutron-%s' % CONF.service] = script_location + elif CONF.subproject: + script_location = _get_subproject_script_location(CONF.subproject) + script_locations[CONF.subproject] = script_location + else: + for subproject, ep in migration_entrypoints.items(): + script_locations[subproject] = _get_subproject_script_location( + subproject) + + # Return a list of alembic configs from the projects in the + # script_locations dict. If neutron is in the list it is first. + configs = [] + project_seq = sorted(script_locations.keys()) + # Core neutron must be the first project if there is more than one + if len(project_seq) > 1 and 'neutron' in project_seq: + project_seq.insert(0, project_seq.pop(project_seq.index('neutron'))) + for project in project_seq: + config = alembic_config.Config(neutron_alembic_ini) + config.set_main_option('neutron_project', project) + script_location = script_locations[project] + config.set_main_option('script_location', script_location) + _set_version_locations(config) + config.neutron_config = CONF + configs.append(config) + + return configs + + +def get_neutron_config(): + # Neutron's alembic config is always the first one + return get_alembic_configs()[0] def run_sanity_checks(config, revision): @@ -357,10 +506,14 @@ def run_sanity_checks(config, revision): script_dir.run_env() +def validate_cli_options(): + if CONF.subproject and CONF.service: + alembic_util.err(_("Cannot specify both --service and --subproject.")) + + def main(): CONF(project='neutron') - config = get_alembic_config() - config.neutron_config = CONF - - #TODO(gongysh) enable logging - CONF.command.func(config, CONF.command.name) + validate_cli_options() + for config in get_alembic_configs(): + #TODO(gongysh) enable logging + CONF.command.func(config, CONF.command.name) diff --git a/neutron/db/migration/models/head.py b/neutron/db/migration/models/head.py index 8680b06a4f4..72e5e660e04 100644 --- a/neutron/db/migration/models/head.py +++ b/neutron/db/migration/models/head.py @@ -41,6 +41,7 @@ from neutron.db import model_base from neutron.db import models_v2 # noqa from neutron.db import portbindings_db # noqa from neutron.db import portsecurity_db # noqa +from neutron.db.qos import models as qos_models # noqa from neutron.db.quota import models # noqa from neutron.db import rbac_db_models # noqa from neutron.db import securitygroups_db # noqa @@ -54,19 +55,13 @@ from neutron.plugins.cisco.db import n1kv_models_v2 # noqa from neutron.plugins.cisco.db import network_models_v2 # noqa from neutron.plugins.ml2.drivers.brocade.db import ( # noqa models as ml2_brocade_models) -from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa - nexus_models_v2 as ml2_nexus_models_v2) -from neutron.plugins.ml2.drivers.cisco.ucsm import ucsm_model # noqa from neutron.plugins.ml2.drivers import type_flat # noqa +from neutron.plugins.ml2.drivers import type_geneve # noqa from neutron.plugins.ml2.drivers import type_gre # noqa from neutron.plugins.ml2.drivers import type_vlan # noqa from neutron.plugins.ml2.drivers import type_vxlan # noqa from neutron.plugins.ml2 import models # noqa -from neutron.plugins.nec.db import models as nec_models # noqa from neutron.plugins.nuage import nuage_models # noqa -from neutron.plugins.vmware.dbexts import nsx_models # noqa -from neutron.plugins.vmware.dbexts import nsxv_models # noqa -from neutron.plugins.vmware.dbexts import vcns_models # noqa def get_metadata(): diff --git a/neutron/db/model_base.py b/neutron/db/model_base.py index e1abbd5533a..7671e8b3296 100644 --- a/neutron/db/model_base.py +++ b/neutron/db/model_base.py @@ -14,9 +14,35 @@ # limitations under the License. from oslo_db.sqlalchemy import models +from oslo_utils import uuidutils +import sqlalchemy as sa from sqlalchemy.ext import declarative from sqlalchemy import orm +from neutron.api.v2 import attributes as attr + + +class HasTenant(object): + """Tenant mixin, add to subclasses that have a tenant.""" + + # NOTE(jkoelker) tenant_id is just a free form string ;( + tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) + + +class HasId(object): + """id mixin, add to subclasses that have an id.""" + + id = sa.Column(sa.String(36), + primary_key=True, + default=uuidutils.generate_uuid) + + +class HasStatusDescription(object): + """Status with description mixin.""" + + status = sa.Column(sa.String(16), nullable=False) + status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) + class NeutronBase(models.ModelBase): """Base class for Neutron Models.""" diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py index 5a8b8311eba..51a483c0084 100644 --- a/neutron/db/models_v2.py +++ b/neutron/db/models_v2.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy import orm @@ -21,28 +20,14 @@ from sqlalchemy import orm from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.db import model_base +from neutron.db import rbac_db_models -class HasTenant(object): - """Tenant mixin, add to subclasses that have a tenant.""" - - # NOTE(jkoelker) tenant_id is just a free form string ;( - tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) - - -class HasId(object): - """id mixin, add to subclasses that have an id.""" - - id = sa.Column(sa.String(36), - primary_key=True, - default=uuidutils.generate_uuid) - - -class HasStatusDescription(object): - """Status with description mixin.""" - - status = sa.Column(sa.String(16), nullable=False) - status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) +# NOTE(kevinbenton): these are here for external projects that expect them +# to be found in this module. +HasTenant = model_base.HasTenant +HasId = model_base.HasId +HasStatusDescription = model_base.HasStatusDescription class IPAvailabilityRange(model_base.BASEV2): @@ -141,6 +126,7 @@ class Port(model_base.BASEV2, HasId, HasTenant): device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False) device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN), nullable=False) + dns_name = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.Index( 'ix_ports_network_id_mac_address', 'network_id', 'mac_address'), @@ -154,7 +140,8 @@ class Port(model_base.BASEV2, HasId, HasTenant): def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, - device_id=None, device_owner=None, fixed_ips=None): + device_id=None, device_owner=None, fixed_ips=None, + dns_name=None): self.id = id self.tenant_id = tenant_id self.name = name @@ -163,6 +150,7 @@ class Port(model_base.BASEV2, HasId, HasTenant): self.admin_state_up = admin_state_up self.device_owner = device_owner self.device_id = device_id + self.dns_name = dns_name # Since this is a relationship only set it if one is passed in. if fixed_ips: self.fixed_ips = fixed_ips @@ -228,7 +216,8 @@ class SubnetPoolPrefix(model_base.BASEV2): cidr = sa.Column(sa.String(64), nullable=False, primary_key=True) subnetpool_id = sa.Column(sa.String(36), - sa.ForeignKey('subnetpools.id'), + sa.ForeignKey('subnetpools.id', + ondelete='CASCADE'), nullable=False, primary_key=True) @@ -264,6 +253,6 @@ class Network(model_base.BASEV2, HasId, HasTenant): admin_state_up = sa.Column(sa.Boolean) mtu = sa.Column(sa.Integer, nullable=True) vlan_transparent = sa.Column(sa.Boolean, nullable=True) - rbac_entries = orm.relationship("NetworkRBAC", backref='network', - lazy='joined', + rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, + backref='network', lazy='joined', cascade='all, delete, delete-orphan') diff --git a/neutron/plugins/ml2/drivers/mech_nuage/__init__.py b/neutron/db/qos/__init__.py similarity index 100% rename from neutron/plugins/ml2/drivers/mech_nuage/__init__.py rename to neutron/db/qos/__init__.py diff --git a/neutron/db/qos/api.py b/neutron/db/qos/api.py new file mode 100644 index 00000000000..cdc4bb44cdd --- /dev/null +++ b/neutron/db/qos/api.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import exception as oslo_db_exception +from sqlalchemy.orm import exc as orm_exc + +from neutron.common import exceptions as n_exc +from neutron.db import common_db_mixin as db +from neutron.db.qos import models + + +def create_policy_network_binding(context, policy_id, network_id): + try: + with context.session.begin(subtransactions=True): + db_obj = models.QosNetworkPolicyBinding(policy_id=policy_id, + network_id=network_id) + context.session.add(db_obj) + except oslo_db_exception.DBReferenceError: + raise n_exc.NetworkQosBindingNotFound(net_id=network_id, + policy_id=policy_id) + + +def delete_policy_network_binding(context, policy_id, network_id): + try: + with context.session.begin(subtransactions=True): + db_object = (db.model_query(context, + models.QosNetworkPolicyBinding) + .filter_by(policy_id=policy_id, + network_id=network_id).one()) + context.session.delete(db_object) + except orm_exc.NoResultFound: + raise n_exc.NetworkQosBindingNotFound(net_id=network_id, + policy_id=policy_id) + + +def create_policy_port_binding(context, policy_id, port_id): + try: + with context.session.begin(subtransactions=True): + db_obj = models.QosPortPolicyBinding(policy_id=policy_id, + port_id=port_id) + context.session.add(db_obj) + except oslo_db_exception.DBReferenceError: + raise n_exc.PortQosBindingNotFound(port_id=port_id, + policy_id=policy_id) + + +def delete_policy_port_binding(context, policy_id, port_id): + try: + with context.session.begin(subtransactions=True): + db_object = (db.model_query(context, models.QosPortPolicyBinding) + .filter_by(policy_id=policy_id, + port_id=port_id).one()) + context.session.delete(db_object) + except orm_exc.NoResultFound: + raise n_exc.PortQosBindingNotFound(port_id=port_id, + policy_id=policy_id) diff --git a/neutron/db/qos/models.py b/neutron/db/qos/models.py new file mode 100755 index 00000000000..3e1d027c68c --- /dev/null +++ b/neutron/db/qos/models.py @@ -0,0 +1,80 @@ +# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +import sqlalchemy as sa + +from neutron.api.v2 import attributes as attrs +from neutron.db import model_base +from neutron.db import models_v2 + + +LOG = logging.getLogger(__name__) + + +class QosPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + __tablename__ = 'qos_policies' + name = sa.Column(sa.String(attrs.NAME_MAX_LEN)) + description = sa.Column(sa.String(attrs.DESCRIPTION_MAX_LEN)) + shared = sa.Column(sa.Boolean, nullable=False) + + +class QosNetworkPolicyBinding(model_base.BASEV2): + __tablename__ = 'qos_network_policy_bindings' + policy_id = sa.Column(sa.String(36), + sa.ForeignKey('qos_policies.id', + ondelete='CASCADE'), + nullable=False, + primary_key=True) + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', + ondelete='CASCADE'), + nullable=False, + unique=True, + primary_key=True) + network = sa.orm.relationship( + models_v2.Network, + backref=sa.orm.backref("qos_policy_binding", uselist=False, + cascade='delete', lazy='joined')) + + +class QosPortPolicyBinding(model_base.BASEV2): + __tablename__ = 'qos_port_policy_bindings' + policy_id = sa.Column(sa.String(36), + sa.ForeignKey('qos_policies.id', + ondelete='CASCADE'), + nullable=False, + primary_key=True) + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', + ondelete='CASCADE'), + nullable=False, + unique=True, + primary_key=True) + port = sa.orm.relationship( + models_v2.Port, + backref=sa.orm.backref("qos_policy_binding", uselist=False, + cascade='delete', lazy='joined')) + + +class QosBandwidthLimitRule(models_v2.HasId, model_base.BASEV2): + __tablename__ = 'qos_bandwidth_limit_rules' + qos_policy_id = sa.Column(sa.String(36), + sa.ForeignKey('qos_policies.id', + ondelete='CASCADE'), + nullable=False, + unique=True) + max_kbps = sa.Column(sa.Integer) + max_burst_kbps = sa.Column(sa.Integer) diff --git a/neutron/db/quota/api.py b/neutron/db/quota/api.py index 40a0a597d38..8ff8cb971ed 100644 --- a/neutron/db/quota/api.py +++ b/neutron/db/quota/api.py @@ -13,11 +13,21 @@ # under the License. import collections +import datetime + +import sqlalchemy as sa +from sqlalchemy.orm import exc as orm_exc +from sqlalchemy import sql from neutron.db import common_db_mixin as common_db_api from neutron.db.quota import models as quota_models +# Wrapper for utcnow - needed for mocking it in unit tests +def utcnow(): + return datetime.datetime.utcnow() + + class QuotaUsageInfo(collections.namedtuple( 'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'reserved', 'dirty'])): @@ -27,6 +37,12 @@ class QuotaUsageInfo(collections.namedtuple( return self.reserved + self.used +class ReservationInfo(collections.namedtuple( + 'ReservationInfo', ['reservation_id', 'tenant_id', + 'expiration', 'deltas'])): + """Information about a resource reservation.""" + + def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id, lock_for_update=False): """Return usage info for a given resource and tenant. @@ -157,3 +173,106 @@ def set_all_quota_usage_dirty(context, resource, dirty=True): query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource) return query.update({'dirty': dirty}) + + +def create_reservation(context, tenant_id, deltas, expiration=None): + # This method is usually called from within another transaction. + # Consider using begin_nested + with context.session.begin(subtransactions=True): + expiration = expiration or (utcnow() + datetime.timedelta(0, 120)) + resv = quota_models.Reservation(tenant_id=tenant_id, + expiration=expiration) + context.session.add(resv) + for (resource, delta) in deltas.items(): + context.session.add( + quota_models.ResourceDelta(resource=resource, + amount=delta, + reservation=resv)) + # quota_usage for all resources involved in this reservation must + # be marked as dirty + set_resources_quota_usage_dirty( + context, deltas.keys(), tenant_id) + return ReservationInfo(resv['id'], + resv['tenant_id'], + resv['expiration'], + dict((delta.resource, delta.amount) + for delta in resv.resource_deltas)) + + +def get_reservation(context, reservation_id): + query = context.session.query(quota_models.Reservation).filter_by( + id=reservation_id) + resv = query.first() + if not resv: + return + return ReservationInfo(resv['id'], + resv['tenant_id'], + resv['expiration'], + dict((delta.resource, delta.amount) + for delta in resv.resource_deltas)) + + +def remove_reservation(context, reservation_id, set_dirty=False): + delete_query = context.session.query(quota_models.Reservation).filter_by( + id=reservation_id) + # Not handling MultipleResultsFound as the query is filtering by primary + # key + try: + reservation = delete_query.one() + except orm_exc.NoResultFound: + # TODO(salv-orlando): Raise here and then handle the exception? + return + tenant_id = reservation.tenant_id + resources = [delta.resource for delta in reservation.resource_deltas] + num_deleted = delete_query.delete() + if set_dirty: + # quota_usage for all resource involved in this reservation must + # be marked as dirty + set_resources_quota_usage_dirty(context, resources, tenant_id) + return num_deleted + + +def get_reservations_for_resources(context, tenant_id, resources, + expired=False): + """Retrieve total amount of reservations for specified resources. + + :param context: Neutron context with db session + :param tenant_id: Tenant identifier + :param resources: Resources for which reserved amounts should be fetched + :param expired: False to fetch active reservations, True to fetch expired + reservations (defaults to False) + :returns: a dictionary mapping resources with corresponding deltas + """ + if not resources: + # Do not waste time + return + now = utcnow() + resv_query = context.session.query( + quota_models.ResourceDelta.resource, + quota_models.Reservation.expiration, + sql.func.sum(quota_models.ResourceDelta.amount)).join( + quota_models.Reservation) + if expired: + exp_expr = (quota_models.Reservation.expiration < now) + else: + exp_expr = (quota_models.Reservation.expiration >= now) + resv_query = resv_query.filter(sa.and_( + quota_models.Reservation.tenant_id == tenant_id, + quota_models.ResourceDelta.resource.in_(resources), + exp_expr)).group_by( + quota_models.ResourceDelta.resource, + quota_models.Reservation.expiration) + return dict((resource, total_reserved) + for (resource, exp, total_reserved) in resv_query) + + +def remove_expired_reservations(context, tenant_id=None): + now = utcnow() + resv_query = context.session.query(quota_models.Reservation) + if tenant_id: + tenant_expr = (quota_models.Reservation.tenant_id == tenant_id) + else: + tenant_expr = sql.true() + resv_query = resv_query.filter(sa.and_( + tenant_expr, quota_models.Reservation.expiration < now)) + return resv_query.delete() diff --git a/neutron/db/quota/driver.py b/neutron/db/quota/driver.py index cf6031ae2d8..3b72ffdd4ed 100644 --- a/neutron/db/quota/driver.py +++ b/neutron/db/quota/driver.py @@ -13,9 +13,16 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_db import api as oslo_db_api +from oslo_log import log + from neutron.common import exceptions +from neutron.db import api as db_api +from neutron.db.quota import api as quota_api from neutron.db.quota import models as quota_models +LOG = log.getLogger(__name__) + class DbQuotaDriver(object): """Driver to perform necessary checks to enforce quotas and obtain quota @@ -42,7 +49,8 @@ class DbQuotaDriver(object): # update with tenant specific limits q_qry = context.session.query(quota_models.Quota).filter_by( tenant_id=tenant_id) - tenant_quota.update((q['resource'], q['limit']) for q in q_qry) + for item in q_qry: + tenant_quota[item['resource']] = item['limit'] return tenant_quota @@ -83,6 +91,8 @@ class DbQuotaDriver(object): tenant_quota[quota['resource']] = quota['limit'] + # Convert values to a list to as caller expect an indexable iterable, + # where python3's dict_values does not support indexing return list(all_tenant_quotas.values()) @staticmethod @@ -116,6 +126,112 @@ class DbQuotaDriver(object): return dict((k, v) for k, v in quotas.items()) + def _handle_expired_reservations(self, context, tenant_id, + resource, expired_amount): + LOG.debug(("Adjusting usage for resource %(resource)s: " + "removing %(expired)d reserved items"), + {'resource': resource, + 'expired': expired_amount}) + # TODO(salv-orlando): It should be possible to do this + # operation for all resources with a single query. + # Update reservation usage + quota_api.set_quota_usage( + context, + resource, + tenant_id, + reserved=-expired_amount, + delta=True) + # Delete expired reservations (we don't want them to accrue + # in the database) + quota_api.remove_expired_reservations( + context, tenant_id=tenant_id) + + @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, + retry_on_request=True, + retry_on_deadlock=True) + def make_reservation(self, context, tenant_id, resources, deltas, plugin): + # Lock current reservation table + # NOTE(salv-orlando): This routine uses DB write locks. + # These locks are acquired by the count() method invoked on resources. + # Please put your shotguns aside. + # A non locking algorithm for handling reservation is feasible, however + # it will require two database writes even in cases when there are not + # concurrent reservations. + # For this reason it might be advisable to handle contention using + # this kind of locks and paying the cost of a write set certification + # failure when a mysql galera cluster is employed. Also, this class of + # locks should be ok to use when support for sending "hotspot" writes + # to a single node will be avaialable. + requested_resources = deltas.keys() + with context.session.begin(): + # Gather current usage information + # TODO(salv-orlando): calling count() for every resource triggers + # multiple queries on quota usage. This should be improved, however + # this is not an urgent matter as the REST API currently only + # allows allocation of a resource at a time + # NOTE: pass plugin too for compatibility with CountableResource + # instances + current_usages = dict( + (resource, resources[resource].count( + context, plugin, tenant_id)) for + resource in requested_resources) + # get_tenant_quotes needs in inout a dictionary mapping resource + # name to BaseResosurce instances so that the default quota can be + # retrieved + current_limits = self.get_tenant_quotas( + context, resources, tenant_id) + # Adjust for expired reservations. Apparently it is cheaper than + # querying everytime for active reservations and counting overall + # quantity of resources reserved + expired_deltas = quota_api.get_reservations_for_resources( + context, tenant_id, requested_resources, expired=True) + # Verify that the request can be accepted with current limits + resources_over_limit = [] + for resource in requested_resources: + expired_reservations = expired_deltas.get(resource, 0) + total_usage = current_usages[resource] - expired_reservations + # A negative quota limit means infinite + if current_limits[resource] < 0: + LOG.debug(("Resource %(resource)s has unlimited quota " + "limit. It is possible to allocate %(delta)s " + "items."), {'resource': resource, + 'delta': deltas[resource]}) + continue + res_headroom = current_limits[resource] - total_usage + LOG.debug(("Attempting to reserve %(delta)d items for " + "resource %(resource)s. Total usage: %(total)d; " + "quota limit: %(limit)d; headroom:%(headroom)d"), + {'resource': resource, + 'delta': deltas[resource], + 'total': total_usage, + 'limit': current_limits[resource], + 'headroom': res_headroom}) + if res_headroom < deltas[resource]: + resources_over_limit.append(resource) + if expired_reservations: + self._handle_expired_reservations( + context, tenant_id, resource, expired_reservations) + + if resources_over_limit: + raise exceptions.OverQuota(overs=sorted(resources_over_limit)) + # Success, store the reservation + # TODO(salv-orlando): Make expiration time configurable + return quota_api.create_reservation( + context, tenant_id, deltas) + + def commit_reservation(self, context, reservation_id): + # Do not mark resource usage as dirty. If a reservation is committed, + # then the releveant resources have been created. Usage data for these + # resources has therefore already been marked dirty. + quota_api.remove_reservation(context, reservation_id, + set_dirty=False) + + def cancel_reservation(self, context, reservation_id): + # Mark resource usage as dirty so the next time both actual resources + # used and reserved will be recalculated + quota_api.remove_reservation(context, reservation_id, + set_dirty=True) + def limit_check(self, context, tenant_id, resources, values): """Check simple quota limits. diff --git a/neutron/db/quota/models.py b/neutron/db/quota/models.py index b0abd0d9f54..a4dbd7117e4 100644 --- a/neutron/db/quota/models.py +++ b/neutron/db/quota/models.py @@ -13,12 +13,33 @@ # under the License. import sqlalchemy as sa +from sqlalchemy import orm from sqlalchemy import sql from neutron.db import model_base from neutron.db import models_v2 +class ResourceDelta(model_base.BASEV2): + resource = sa.Column(sa.String(255), primary_key=True) + reservation_id = sa.Column(sa.String(36), + sa.ForeignKey('reservations.id', + ondelete='CASCADE'), + primary_key=True, + nullable=False) + # Requested amount of resource + amount = sa.Column(sa.Integer) + + +class Reservation(model_base.BASEV2, models_v2.HasId): + tenant_id = sa.Column(sa.String(255)) + expiration = sa.Column(sa.DateTime()) + resource_deltas = orm.relationship(ResourceDelta, + backref='reservation', + lazy="joined", + cascade='all, delete-orphan') + + class Quota(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represent a single quota override for a tenant. diff --git a/neutron/db/rbac_db_mixin.py b/neutron/db/rbac_db_mixin.py new file mode 100644 index 00000000000..182a9563995 --- /dev/null +++ b/neutron/db/rbac_db_mixin.py @@ -0,0 +1,123 @@ +# Copyright (c) 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.callbacks import events +from neutron.callbacks import exceptions as c_exc +from neutron.callbacks import registry +from neutron.common import exceptions as n_exc +from neutron.db import common_db_mixin +from neutron.db import rbac_db_models as models +from neutron.extensions import rbac as ext_rbac + +# resource name using in callbacks +RBAC_POLICY = 'rbac-policy' + + +class RbacPluginMixin(common_db_mixin.CommonDbMixin): + """Plugin mixin that implements the RBAC DB operations.""" + + object_type_cache = {} + supported_extension_aliases = ['rbac-policies'] + + def create_rbac_policy(self, context, rbac_policy): + e = rbac_policy['rbac_policy'] + try: + registry.notify(RBAC_POLICY, events.BEFORE_CREATE, self, + context=context, object_type=e['object_type'], + policy=e) + except c_exc.CallbackFailure as e: + raise n_exc.InvalidInput(error_message=e) + dbmodel = models.get_type_model_map()[e['object_type']] + tenant_id = self._get_tenant_id_for_create(context, e) + with context.session.begin(subtransactions=True): + db_entry = dbmodel(object_id=e['object_id'], + target_tenant=e['target_tenant'], + action=e['action'], + tenant_id=tenant_id) + context.session.add(db_entry) + return self._make_rbac_policy_dict(db_entry) + + def _make_rbac_policy_dict(self, db_entry, fields=None): + res = {f: db_entry[f] for f in ('id', 'tenant_id', 'target_tenant', + 'action', 'object_id')} + res['object_type'] = db_entry.object_type + return self._fields(res, fields) + + def update_rbac_policy(self, context, id, rbac_policy): + pol = rbac_policy['rbac_policy'] + entry = self._get_rbac_policy(context, id) + object_type = entry['object_type'] + try: + registry.notify(RBAC_POLICY, events.BEFORE_UPDATE, self, + context=context, policy=entry, + object_type=object_type, policy_update=pol) + except c_exc.CallbackFailure as ex: + raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'], + details=ex) + with context.session.begin(subtransactions=True): + entry.update(pol) + return self._make_rbac_policy_dict(entry) + + def delete_rbac_policy(self, context, id): + entry = self._get_rbac_policy(context, id) + object_type = entry['object_type'] + try: + registry.notify(RBAC_POLICY, events.BEFORE_DELETE, self, + context=context, object_type=object_type, + policy=entry) + except c_exc.CallbackFailure as ex: + raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'], + details=ex) + with context.session.begin(subtransactions=True): + context.session.delete(entry) + self.object_type_cache.pop(id, None) + + def _get_rbac_policy(self, context, id): + object_type = self._get_object_type(context, id) + dbmodel = models.get_type_model_map()[object_type] + try: + return self._model_query(context, + dbmodel).filter(dbmodel.id == id).one() + except exc.NoResultFound: + raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type) + + def get_rbac_policy(self, context, id, fields=None): + return self._make_rbac_policy_dict( + self._get_rbac_policy(context, id), fields=fields) + + def get_rbac_policies(self, context, filters=None, fields=None, + sorts=None, limit=None, page_reverse=False): + model = common_db_mixin.UnionModel( + models.get_type_model_map(), 'object_type') + return self._get_collection( + context, model, self._make_rbac_policy_dict, filters=filters, + sorts=sorts, limit=limit, page_reverse=page_reverse) + + def _get_object_type(self, context, entry_id): + """Scans all RBAC tables for an ID to figure out the type. + + This will be an expensive operation as the number of RBAC tables grows. + The result is cached since object types cannot be updated for a policy. + """ + if entry_id in self.object_type_cache: + return self.object_type_cache[entry_id] + for otype, model in models.get_type_model_map().items(): + if (context.session.query(model). + filter(model.id == entry_id).first()): + self.object_type_cache[entry_id] = otype + return otype + raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown') diff --git a/neutron/db/rbac_db_models.py b/neutron/db/rbac_db_models.py index 9e0aa44866e..37314664337 100644 --- a/neutron/db/rbac_db_models.py +++ b/neutron/db/rbac_db_models.py @@ -20,7 +20,6 @@ from sqlalchemy.orm import validates from neutron.common import exceptions as n_exc from neutron.db import model_base -from neutron.db import models_v2 class InvalidActionForType(n_exc.InvalidInput): @@ -28,7 +27,7 @@ class InvalidActionForType(n_exc.InvalidInput): "'%(object_type)s'. Valid actions: %(valid_actions)s") -class RBACColumns(models_v2.HasId, models_v2.HasTenant): +class RBACColumns(model_base.HasId, model_base.HasTenant): """Mixin that object-specific RBAC tables should inherit. All RBAC tables should inherit directly from this one because diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py index c9497504d0e..e04634e94e5 100644 --- a/neutron/db/securitygroups_db.py +++ b/neutron/db/securitygroups_db.py @@ -13,7 +13,7 @@ # under the License. import netaddr -from oslo_db import exception +from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa @@ -430,6 +430,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): ip_proto = self._get_ip_proto_number(rule['protocol']) if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: if (rule['port_range_min'] is not None and + rule['port_range_max'] is not None and rule['port_range_min'] <= rule['port_range_max']): pass else: @@ -437,7 +438,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): elif ip_proto == constants.PROTO_NUM_ICMP: for attr, field in [('port_range_min', 'type'), ('port_range_max', 'code')]: - if rule[attr] > 255: + if rule[attr] is not None and rule[attr] > 255: raise ext_sg.SecurityGroupInvalidIcmpValue( field=field, attr=attr, value=rule[attr]) if (rule['port_range_min'] is None and @@ -648,14 +649,23 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): def _ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. - :returns: the default security group id. + :returns: the default security group id for given tenant. """ - query = self._model_query(context, DefaultSecurityGroup) - # the next loop should do 2 iterations at max - while True: + # Make no more than two attempts + for attempts in (1, 2): try: + query = self._model_query(context, DefaultSecurityGroup) default_group = query.filter_by(tenant_id=tenant_id).one() - except exc.NoResultFound: + return default_group['security_group_id'] + except exc.NoResultFound as ex: + if attempts > 1: + # the second iteration means that attempt to add default + # group failed with duplicate error. Since we're still + # not seeing this group we're most probably inside a + # transaction with REPEATABLE READ isolation level -> + # need to restart the whole transaction + raise db_exc.RetryRequest(ex) + security_group = { 'security_group': {'name': 'default', @@ -663,16 +673,13 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): 'description': _('Default security group')} } try: - ret = self.create_security_group( + security_group = self.create_security_group( context, security_group, default_sg=True) - except exception.DBDuplicateEntry as ex: + return security_group['id'] + except db_exc.DBDuplicateEntry as ex: + # default security group was created concurrently LOG.debug("Duplicate default security group %s was " "not created", ex.value) - continue - else: - return ret['id'] - else: - return default_group['security_group_id'] def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. @@ -709,11 +716,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): tenant_id = self._get_tenant_id_for_create(context, port['port']) default_sg = self._ensure_default_security_group(context, tenant_id) - if attributes.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): - sgids = port['port'].get(ext_sg.SECURITYGROUPS) - else: - sgids = [default_sg] - port['port'][ext_sg.SECURITYGROUPS] = sgids + if not attributes.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): + port['port'][ext_sg.SECURITYGROUPS] = [default_sg] def _check_update_deletes_security_groups(self, port): """Return True if port has as a security group and it's value diff --git a/neutron/extensions/dns.py b/neutron/extensions/dns.py new file mode 100644 index 00000000000..495e826521a --- /dev/null +++ b/neutron/extensions/dns.py @@ -0,0 +1,177 @@ +# Copyright (c) 2015 Rackspace +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import six + +from oslo_config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as n_exc + +DNS_LABEL_MAX_LEN = 63 +DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN +FQDN_MAX_LEN = 255 +DNS_DOMAIN_DEFAULT = 'openstacklocal.' + + +def _validate_dns_name(data, max_len=FQDN_MAX_LEN): + msg = _validate_dns_format(data, max_len) + if msg: + return msg + request_dns_name = _get_request_dns_name(data) + if request_dns_name: + msg = _validate_dns_name_with_dns_domain(request_dns_name) + if msg: + return msg + + +def _validate_dns_format(data, max_len=FQDN_MAX_LEN): + # NOTE: An individual name regex instead of an entire FQDN was used + # because its easier to make correct. The logic should validate that the + # dns_name matches RFC 1123 (section 2.1) and RFC 952. + if not data: + return + try: + # Trailing periods are allowed to indicate that a name is fully + # qualified per RFC 1034 (page 7). + trimmed = data if not data.endswith('.') else data[:-1] + if len(trimmed) > 255: + raise TypeError( + _("'%s' exceeds the 255 character FQDN limit") % trimmed) + names = trimmed.split('.') + for name in names: + if not name: + raise TypeError(_("Encountered an empty component.")) + if name.endswith('-') or name[0] == '-': + raise TypeError( + _("Name '%s' must not start or end with a hyphen.") % name) + if not re.match(DNS_LABEL_REGEX, name): + raise TypeError( + _("Name '%s' must be 1-63 characters long, each of " + "which can only be alphanumeric or a hyphen.") % name) + # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if + # it's an FQDN. + if len(names) > 1 and re.match("^[0-9]+$", names[-1]): + raise TypeError(_("TLD '%s' must not be all numeric") % names[-1]) + except TypeError as e: + msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % { + 'data': data, 'reason': e.message} + return msg + + +def _validate_dns_name_with_dns_domain(request_dns_name): + # If a PQDN was passed, make sure the FQDN that will be generated is of + # legal size + dns_domain = _get_dns_domain() + higher_labels = dns_domain + if dns_domain: + higher_labels = '.%s' % dns_domain + higher_labels_len = len(higher_labels) + dns_name_len = len(request_dns_name) + if not request_dns_name.endswith('.'): + if dns_name_len + higher_labels_len > FQDN_MAX_LEN: + msg = _("The dns_name passed is a PQDN and its size is " + "'%(dns_name_len)s'. The dns_domain option in " + "neutron.conf is set to %(dns_domain)s, with a " + "length of '%(higher_labels_len)s'. When the two are " + "concatenated to form a FQDN (with a '.' at the end), " + "the resulting length exceeds the maximum size " + "of '%(fqdn_max_len)s'" + ) % {'dns_name_len': dns_name_len, + 'dns_domain': cfg.CONF.dns_domain, + 'higher_labels_len': higher_labels_len, + 'fqdn_max_len': FQDN_MAX_LEN} + return msg + return + + # A FQDN was passed + if (dns_name_len <= higher_labels_len or not + request_dns_name.endswith(higher_labels)): + msg = _("The dns_name passed is a FQDN. Its higher level labels " + "must be equal to the dns_domain option in neutron.conf, " + "that has been set to '%(dns_domain)s'. It must also " + "include one or more valid DNS labels to the left " + "of '%(dns_domain)s'") % {'dns_domain': + cfg.CONF.dns_domain} + return msg + + +def _get_dns_domain(): + if not cfg.CONF.dns_domain: + return '' + if cfg.CONF.dns_domain.endswith('.'): + return cfg.CONF.dns_domain + return '%s.' % cfg.CONF.dns_domain + + +def _get_request_dns_name(data): + dns_domain = _get_dns_domain() + if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): + return data + return '' + + +def convert_to_lowercase(data): + if isinstance(data, six.string_types): + return data.lower() + msg = _("'%s' cannot be converted to lowercase string") % data + raise n_exc.InvalidInput(error_message=msg) + + +attr.validators['type:dns_name'] = ( + _validate_dns_name) + + +DNSNAME = 'dns_name' +DNSASSIGNMENT = 'dns_assignment' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + DNSNAME: {'allow_post': True, 'allow_put': True, + 'default': '', + 'convert_to': convert_to_lowercase, + 'validate': {'type:dns_name': FQDN_MAX_LEN}, + 'is_visible': True}, + DNSASSIGNMENT: {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + } +} + + +class Dns(extensions.ExtensionDescriptor): + """Extension class supporting DNS Integration.""" + + @classmethod + def get_name(cls): + return "DNS Integration" + + @classmethod + def get_alias(cls): + return "dns-integration" + + @classmethod + def get_description(cls): + return "Provides integration with internal DNS." + + @classmethod + def get_updated(cls): + return "2015-08-15T18:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/metering.py b/neutron/extensions/metering.py index 82a24ae7b88..22d67b5e098 100644 --- a/neutron/extensions/metering.py +++ b/neutron/extensions/metering.py @@ -55,6 +55,7 @@ RESOURCE_ATTRIBUTE_MAP = { 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'shared': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': False, @@ -78,6 +79,7 @@ RESOURCE_ATTRIBUTE_MAP = { 'validate': {'type:subnet': None}}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True} } } diff --git a/neutron/extensions/portbindings.py b/neutron/extensions/portbindings.py index a72033405d3..1079a4e0043 100644 --- a/neutron/extensions/portbindings.py +++ b/neutron/extensions/portbindings.py @@ -75,18 +75,8 @@ VIF_TYPE_DVS = 'dvs' VIF_TYPE_BRIDGE = 'bridge' VIF_TYPE_802_QBG = '802.1qbg' VIF_TYPE_802_QBH = '802.1qbh' -VIF_TYPE_HYPERV = 'hyperv' VIF_TYPE_MIDONET = 'midonet' -VIF_TYPE_IB_HOSTDEV = 'ib_hostdev' -VIF_TYPE_HW_VEB = 'hw_veb' -VIF_TYPE_VROUTER = 'vrouter' VIF_TYPE_OTHER = 'other' -VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, - VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, - VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, - VIF_TYPE_IB_HOSTDEV, VIF_TYPE_HW_VEB, - VIF_TYPE_DVS, VIF_TYPE_OTHER, VIF_TYPE_DISTRIBUTED, - VIF_TYPE_VROUTER] VNIC_NORMAL = 'normal' VNIC_DIRECT = 'direct' diff --git a/neutron/extensions/qos.py b/neutron/extensions/qos.py new file mode 100644 index 00000000000..6653416b78b --- /dev/null +++ b/neutron/extensions/qos.py @@ -0,0 +1,236 @@ +# Copyright (c) 2015 Red Hat Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import itertools + +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.api.v2 import resource_helper +from neutron import manager +from neutron.plugins.common import constants +from neutron.services.qos import qos_consts +from neutron.services import service_base + +QOS_PREFIX = "/qos" + +# Attribute Map +QOS_RULE_COMMON_FIELDS = { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, +} + +RESOURCE_ATTRIBUTE_MAP = { + 'policies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': '', + 'validate': {'type:string': None}}, + 'description': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': '', + 'validate': {'type:string': None}}, + 'shared': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': False, + 'convert_to': attr.convert_to_boolean}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'rules': {'allow_post': False, 'allow_put': False, 'is_visible': True}, + }, + 'rule_types': { + 'type': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + } +} + +SUB_RESOURCE_ATTRIBUTE_MAP = { + 'bandwidth_limit_rules': { + 'parent': {'collection_name': 'policies', + 'member_name': 'policy'}, + 'parameters': dict(QOS_RULE_COMMON_FIELDS, + **{'max_kbps': { + 'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None, + 'validate': {'type:non_negative': None}}, + 'max_burst_kbps': { + 'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': 0, + 'validate': {'type:non_negative': None}}}) + } +} + +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': {qos_consts.QOS_POLICY_ID: { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:uuid_or_none': None}}}, + 'networks': {qos_consts.QOS_POLICY_ID: { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:uuid_or_none': None}}}} + + +class Qos(extensions.ExtensionDescriptor): + """Quality of service API extension.""" + + @classmethod + def get_name(cls): + return "qos" + + @classmethod + def get_alias(cls): + return "qos" + + @classmethod + def get_description(cls): + return "The Quality of Service extension." + + @classmethod + def get_updated(cls): + return "2015-06-08T10:00:00-00:00" + + @classmethod + def get_plugin_interface(cls): + return QoSPluginBase + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + special_mappings = {'policies': 'policy'} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, itertools.chain(RESOURCE_ATTRIBUTE_MAP, + SUB_RESOURCE_ATTRIBUTE_MAP)) + attr.PLURALS.update(plural_mappings) + + resources = resource_helper.build_resource_info( + plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.QOS, + translate_name=True, + allow_bulk=True) + + plugin = manager.NeutronManager.get_service_plugins()[constants.QOS] + for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: + resource_name = collection_name[:-1] + parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') + params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( + 'parameters') + + controller = base.create_resource(collection_name, resource_name, + plugin, params, + allow_bulk=True, + parent=parent, + allow_pagination=True, + allow_sorting=True) + + resource = extensions.ResourceExtension( + collection_name, + controller, parent, + path_prefix=QOS_PREFIX, + attr_map=params) + resources.append(resource) + + return resources + + def update_attributes_map(self, attributes, extension_attrs_map=None): + super(Qos, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items() + + RESOURCE_ATTRIBUTE_MAP.items()) + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class QoSPluginBase(service_base.ServicePluginBase): + + path_prefix = QOS_PREFIX + + def get_plugin_description(self): + return "QoS Service Plugin for ports and networks" + + def get_plugin_type(self): + return constants.QOS + + @abc.abstractmethod + def get_policy(self, context, policy_id, fields=None): + pass + + @abc.abstractmethod + def get_policies(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def create_policy(self, context, policy): + pass + + @abc.abstractmethod + def update_policy(self, context, policy_id, policy): + pass + + @abc.abstractmethod + def delete_policy(self, context, policy_id): + pass + + @abc.abstractmethod + def get_policy_bandwidth_limit_rule(self, context, rule_id, + policy_id, fields=None): + pass + + @abc.abstractmethod + def get_policy_bandwidth_limit_rules(self, context, policy_id, + filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False): + pass + + @abc.abstractmethod + def create_policy_bandwidth_limit_rule(self, context, policy_id, + bandwidth_limit_rule): + pass + + @abc.abstractmethod + def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, + bandwidth_limit_rule): + pass + + @abc.abstractmethod + def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id): + pass + + @abc.abstractmethod + def get_rule_types(self, context, filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False): + pass diff --git a/neutron/extensions/rbac.py b/neutron/extensions/rbac.py new file mode 100644 index 00000000000..23c9e775231 --- /dev/null +++ b/neutron/extensions/rbac.py @@ -0,0 +1,120 @@ +# Copyright (c) 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions as n_exc +from neutron.db import rbac_db_models +from neutron import manager +from neutron.quota import resource_registry + + +class RbacPolicyNotFound(n_exc.NotFound): + message = _("RBAC policy of type %(object_type)s with ID %(id)s not found") + + +class RbacPolicyInUse(n_exc.Conflict): + message = _("RBAC policy on object %(object_id)s cannot be removed " + "because other objects depend on it.\nDetails: %(details)s") + + +def convert_valid_object_type(otype): + normalized = otype.strip().lower() + if normalized in rbac_db_models.get_type_model_map(): + return normalized + msg = _("'%s' is not a valid RBAC object type") % otype + raise n_exc.InvalidInput(error_message=msg) + + +RESOURCE_NAME = 'rbac_policy' +RESOURCE_COLLECTION = 'rbac_policies' + +RESOURCE_ATTRIBUTE_MAP = { + RESOURCE_COLLECTION: { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'primary_key': True}, + 'object_type': {'allow_post': True, 'allow_put': False, + 'convert_to': convert_valid_object_type, + 'is_visible': True, 'default': None, + 'enforce_policy': True}, + 'object_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'default': None, + 'enforce_policy': True}, + 'target_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'enforce_policy': True, + 'default': None}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, 'is_visible': True}, + 'action': {'allow_post': True, 'allow_put': False, + # action depends on type so validation has to occur in + # the extension + 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, + 'is_visible': True}, + } +} + +rbac_quota_opts = [ + cfg.IntOpt('quota_rbac_entry', default=10, + help=_('Default number of RBAC entries allowed per tenant. ' + 'A negative value means unlimited.')) +] +cfg.CONF.register_opts(rbac_quota_opts, 'QUOTAS') + + +class Rbac(extensions.ExtensionDescriptor): + """RBAC policy support.""" + + @classmethod + def get_name(cls): + return "RBAC Policies" + + @classmethod + def get_alias(cls): + return 'rbac-policies' + + @classmethod + def get_description(cls): + return ("Allows creation and modification of policies that control " + "tenant access to resources.") + + @classmethod + def get_updated(cls): + return "2015-06-17T12:15:12-30:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = {'rbac_policies': 'rbac_policy'} + attr.PLURALS.update(plural_mappings) + plugin = manager.NeutronManager.get_plugin() + params = RESOURCE_ATTRIBUTE_MAP['rbac_policies'] + collection_name = 'rbac-policies' + resource_name = 'rbac_policy' + resource_registry.register_resource_by_name(resource_name) + controller = base.create_resource(collection_name, resource_name, + plugin, params, allow_bulk=True, + allow_pagination=False, + allow_sorting=True) + return [extensions.ResourceExtension(collection_name, controller, + attr_map=params)] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + return {} diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py index f199f12025a..5e32036edb8 100644 --- a/neutron/extensions/securitygroup.py +++ b/neutron/extensions/securitygroup.py @@ -217,6 +217,7 @@ RESOURCE_ATTRIBUTE_MAP = { 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'security_group_rules': {'allow_post': False, 'allow_put': False, 'is_visible': True}, @@ -251,6 +252,7 @@ RESOURCE_ATTRIBUTE_MAP = { 'convert_to': convert_ip_prefix_to_cidr}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, } } diff --git a/neutron/ipam/drivers/neutrondb_ipam/driver.py b/neutron/ipam/drivers/neutrondb_ipam/driver.py index 1ddab84340f..da2da230fd8 100644 --- a/neutron/ipam/drivers/neutrondb_ipam/driver.py +++ b/neutron/ipam/drivers/neutrondb_ipam/driver.py @@ -44,12 +44,16 @@ class NeutronDbSubnet(ipam_base.Subnet): """ @classmethod - def create_allocation_pools(cls, subnet_manager, session, pools): + def create_allocation_pools(cls, subnet_manager, session, pools, cidr): for pool in pools: + # IPv6 addresses that start '::1', '::2', etc cause IP version + # ambiguity when converted to integers by pool.first and pool.last. + # Infer the IP version from the subnet cidr. + ip_version = cidr.version subnet_manager.create_pool( session, - netaddr.IPAddress(pool.first).format(), - netaddr.IPAddress(pool.last).format()) + netaddr.IPAddress(pool.first, ip_version).format(), + netaddr.IPAddress(pool.last, ip_version).format()) @classmethod def create_from_subnet_request(cls, subnet_request, ctx): @@ -68,7 +72,8 @@ class NeutronDbSubnet(ipam_base.Subnet): else: pools = subnet_request.allocation_pools # Create IPAM allocation pools and availability ranges - cls.create_allocation_pools(subnet_manager, session, pools) + cls.create_allocation_pools(subnet_manager, session, pools, + subnet_request.subnet_cidr) return cls(ipam_subnet_id, ctx, @@ -347,13 +352,13 @@ class NeutronDbSubnet(ipam_base.Subnet): subnet_id=self.subnet_manager.neutron_id, ip_address=address) - def update_allocation_pools(self, pools): + def update_allocation_pools(self, pools, cidr): # Pools have already been validated in the subnet request object which # was sent to the subnet pool driver. Further validation should not be # required. session = db_api.get_session() self.subnet_manager.delete_allocation_pools(session) - self.create_allocation_pools(self.subnet_manager, session, pools) + self.create_allocation_pools(self.subnet_manager, session, pools, cidr) self._pools = pools def get_details(self): @@ -414,7 +419,8 @@ class NeutronDbPool(subnet_alloc.SubnetAllocator): subnet_request.subnet_id) return subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context) - subnet.update_allocation_pools(subnet_request.allocation_pools) + cidr = netaddr.IPNetwork(subnet._cidr) + subnet.update_allocation_pools(subnet_request.allocation_pools, cidr) return subnet def remove_subnet(self, subnet_id): diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index 18b32303194..cd36472cd2f 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 7.0.0.0b3.dev96\n" +"Project-Id-Version: neutron 7.0.0.0b3.dev400\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-08-10 06:10+0000\n" +"POT-Creation-Date: 2015-08-21 06:08+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -31,62 +31,66 @@ msgstr "" msgid "Error loading plugin by class, %s" msgstr "" -#: neutron/policy.py:267 +#: neutron/policy.py:266 #, python-format msgid "Policy check error while calling %s!" msgstr "" -#: neutron/service.py:105 neutron/service.py:167 +#: neutron/service.py:105 neutron/service.py:185 msgid "Unrecoverable error: please check log for details." msgstr "" -#: neutron/service.py:145 +#: neutron/service.py:124 +msgid "done with wait" +msgstr "" + +#: neutron/service.py:159 #, python-format msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." msgstr "" -#: neutron/service.py:181 +#: neutron/service.py:199 msgid "No known API applications configured." msgstr "" -#: neutron/service.py:286 +#: neutron/service.py:304 msgid "Exception occurs when timer stops" msgstr "" -#: neutron/service.py:295 +#: neutron/service.py:313 msgid "Exception occurs when waiting for timer" msgstr "" -#: neutron/wsgi.py:160 +#: neutron/wsgi.py:169 #, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "" -#: neutron/wsgi.py:803 +#: neutron/wsgi.py:812 #, python-format msgid "InvalidContentType: %s" msgstr "" -#: neutron/wsgi.py:807 +#: neutron/wsgi.py:816 #, python-format msgid "MalformedRequestBody: %s" msgstr "" -#: neutron/wsgi.py:816 +#: neutron/wsgi.py:825 msgid "Internal error" msgstr "" -#: neutron/agent/common/ovs_lib.py:219 neutron/agent/common/ovs_lib.py:319 +#: neutron/agent/common/ovs_lib.py:223 neutron/agent/common/ovs_lib.py:327 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:240 +#: neutron/agent/common/ovs_lib.py:244 #, python-format msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:567 +#: neutron/agent/common/ovs_lib.py:605 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" @@ -120,16 +124,26 @@ msgid "Network %s info call failed." msgstr "" #: neutron/agent/dhcp/agent.py:576 neutron/agent/l3/agent.py:638 -#: neutron/agent/metadata/agent.py:319 +#: neutron/agent/metadata/agent.py:322 #: neutron/plugins/hyperv/agent/l2_agent.py:94 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:109 #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:847 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:130 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:313 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:137 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:318 #: neutron/services/metering/agents/metering_agent.py:283 msgid "Failed reporting state!" msgstr "" +#: neutron/agent/l2/extensions/manager.py:68 +#, python-format +msgid "Agent Extension '%(name)s' failed while handling port update" +msgstr "" + +#: neutron/agent/l2/extensions/manager.py:82 +#, python-format +msgid "Agent Extension '%(name)s' failed while handling port deletion" +msgstr "" + #: neutron/agent/l3/agent.py:233 msgid "Router id is required if not using namespaces." msgstr "" @@ -280,7 +294,7 @@ msgstr "" msgid "Failed unplugging interface '%s'" msgstr "" -#: neutron/agent/linux/ip_conntrack.py:76 +#: neutron/agent/linux/ip_conntrack.py:75 #, python-format msgid "Failed execute conntrack command %s" msgstr "" @@ -317,6 +331,7 @@ msgstr "" #: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:58 #: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:79 #: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:105 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:136 msgid "Failed executing ip command" msgstr "" @@ -329,7 +344,7 @@ msgstr "" msgid "Failure applying iptables rules" msgstr "" -#: neutron/agent/linux/iptables_manager.py:478 +#: neutron/agent/linux/iptables_manager.py:485 #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables " @@ -346,7 +361,7 @@ msgstr "" msgid "Interface monitor is not active" msgstr "" -#: neutron/agent/linux/utils.py:220 +#: neutron/agent/linux/utils.py:239 #, python-format msgid "Unable to convert value in %s" msgstr "" @@ -380,17 +395,17 @@ msgstr "" msgid "Bridge %s does not exist" msgstr "" -#: neutron/agent/ovsdb/native/commands.py:296 +#: neutron/agent/ovsdb/native/commands.py:320 #, python-format msgid "Port %s does not exist" msgstr "" -#: neutron/agent/ovsdb/native/commands.py:307 +#: neutron/agent/ovsdb/native/commands.py:331 #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "" -#: neutron/agent/ovsdb/native/commands.py:401 +#: neutron/agent/ovsdb/native/commands.py:425 #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. " @@ -433,13 +448,13 @@ msgid "" "message %s" msgstr "" -#: neutron/api/rpc/handlers/l3_rpc.py:75 +#: neutron/api/rpc/handlers/l3_rpc.py:77 msgid "" "No plugin for L3 routing registered! Will reply to l3 agent with empty " "router dictionary." msgstr "" -#: neutron/api/v2/base.py:389 +#: neutron/api/v2/base.py:394 #, python-format msgid "Unable to undo add for %(resource)s %(id)s" msgstr "" @@ -450,7 +465,7 @@ msgstr "" msgid "%s failed" msgstr "" -#: neutron/callbacks/manager.py:144 +#: neutron/callbacks/manager.py:143 #, python-format msgid "Error during notification for %(callback)s %(resource)s, %(event)s" msgstr "" @@ -545,11 +560,11 @@ msgstr "" msgid "Unexpected exception while checking supported feature via command: %s" msgstr "" -#: neutron/cmd/sanity/checks.py:142 +#: neutron/cmd/sanity/checks.py:144 msgid "Unexpected exception while checking supported ip link command" msgstr "" -#: neutron/cmd/sanity/checks.py:306 +#: neutron/cmd/sanity/checks.py:308 #, python-format msgid "" "Failed to import required modules. Ensure that the python-openvswitch " @@ -570,23 +585,23 @@ msgstr "" msgid "Failed to schedule network %s" msgstr "" -#: neutron/db/agentschedulers_db.py:310 +#: neutron/db/agentschedulers_db.py:311 #, python-format msgid "" "Unexpected exception occurred while removing network %(net)s from agent " "%(agent)s" msgstr "" -#: neutron/db/agentschedulers_db.py:321 +#: neutron/db/agentschedulers_db.py:322 msgid "Exception encountered during network rescheduling" msgstr "" -#: neutron/db/db_base_plugin_v2.py:226 neutron/plugins/ml2/plugin.py:571 +#: neutron/db/db_base_plugin_v2.py:225 neutron/plugins/ml2/plugin.py:584 #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:982 +#: neutron/db/db_base_plugin_v2.py:985 #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "" @@ -596,7 +611,7 @@ msgstr "" msgid "MAC generation error after %s attempts" msgstr "" -#: neutron/db/dvr_mac_db.py:177 +#: neutron/db/dvr_mac_db.py:187 #, python-format msgid "Could not retrieve gateway port for subnet %s" msgstr "" @@ -617,20 +632,20 @@ msgid "" "changes" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:119 +#: neutron/db/l3_agentschedulers_db.py:128 #, python-format msgid "Failed to reschedule router %s" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:124 +#: neutron/db/l3_agentschedulers_db.py:133 msgid "Exception encountered during router rescheduling." msgstr "" -#: neutron/db/l3_db.py:521 +#: neutron/db/l3_db.py:522 msgid "Router port must have at least one fixed IP" msgstr "" -#: neutron/db/l3_db.py:550 +#: neutron/db/l3_db.py:551 msgid "Cannot have multiple IPv4 subnets on router port" msgstr "" @@ -650,12 +665,12 @@ msgstr "" msgid "IPAM subnet referenced to Neutron subnet %s does not exist" msgstr "" -#: neutron/notifiers/nova.py:248 +#: neutron/notifiers/nova.py:257 #, python-format msgid "Failed to notify nova on events: %s" msgstr "" -#: neutron/notifiers/nova.py:252 neutron/notifiers/nova.py:268 +#: neutron/notifiers/nova.py:261 neutron/notifiers/nova.py:277 #, python-format msgid "Error response returned from nova: %s" msgstr "" @@ -751,182 +766,182 @@ msgid "" msgstr "" #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:256 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1739 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1806 #, python-format msgid "%s Agent terminated!" msgstr "" #: neutron/plugins/ml2/db.py:242 neutron/plugins/ml2/db.py:326 -#: neutron/plugins/ml2/plugin.py:1370 +#: neutron/plugins/ml2/plugin.py:1389 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" -#: neutron/plugins/ml2/managers.py:60 +#: neutron/plugins/ml2/managers.py:62 #, python-format msgid "" "Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" " is already registered for type '%(type)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:76 +#: neutron/plugins/ml2/managers.py:78 #, python-format msgid "No type driver for tenant network_type: %s. Service terminated!" msgstr "" -#: neutron/plugins/ml2/managers.py:83 +#: neutron/plugins/ml2/managers.py:85 #, python-format msgid "No type driver for external network_type: %s. Service terminated!" msgstr "" -#: neutron/plugins/ml2/managers.py:152 +#: neutron/plugins/ml2/managers.py:154 #, python-format msgid "Network %s has no segments" msgstr "" -#: neutron/plugins/ml2/managers.py:251 neutron/plugins/ml2/managers.py:278 +#: neutron/plugins/ml2/managers.py:253 neutron/plugins/ml2/managers.py:280 #, python-format msgid "Failed to release segment '%s' because network type is not supported." msgstr "" -#: neutron/plugins/ml2/managers.py:353 +#: neutron/plugins/ml2/managers.py:389 #, python-format msgid "Mechanism driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/managers.py:639 neutron/plugins/ml2/managers.py:701 +#: neutron/plugins/ml2/managers.py:675 neutron/plugins/ml2/managers.py:737 #, python-format msgid "Failed to bind port %(port)s on host %(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:654 +#: neutron/plugins/ml2/managers.py:690 #, python-format msgid "" "Exceeded maximum binding levels attempting to bind port %(port)s on host " "%(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:697 +#: neutron/plugins/ml2/managers.py:733 #, python-format msgid "Mechanism driver %s failed in bind_port" msgstr "" -#: neutron/plugins/ml2/managers.py:768 +#: neutron/plugins/ml2/managers.py:844 #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:295 +#: neutron/plugins/ml2/plugin.py:286 #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" -#: neutron/plugins/ml2/plugin.py:451 +#: neutron/plugins/ml2/plugin.py:462 #, python-format msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:462 +#: neutron/plugins/ml2/plugin.py:473 #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:548 +#: neutron/plugins/ml2/plugin.py:559 #, python-format msgid "Could not find %s to delete." msgstr "" -#: neutron/plugins/ml2/plugin.py:551 +#: neutron/plugins/ml2/plugin.py:562 #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "" -#: neutron/plugins/ml2/plugin.py:584 +#: neutron/plugins/ml2/plugin.py:597 #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:630 +#: neutron/plugins/ml2/plugin.py:643 #, python-format msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:700 +#: neutron/plugins/ml2/plugin.py:713 #, python-format msgid "Exception auto-deleting port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:713 +#: neutron/plugins/ml2/plugin.py:726 #, python-format msgid "Exception auto-deleting subnet %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:794 +#: neutron/plugins/ml2/plugin.py:807 msgid "mechanism_manager.delete_network_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:815 +#: neutron/plugins/ml2/plugin.py:828 #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:934 +#: neutron/plugins/ml2/plugin.py:947 #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:943 +#: neutron/plugins/ml2/plugin.py:956 msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:1008 +#: neutron/plugins/ml2/plugin.py:1023 #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1020 +#: neutron/plugins/ml2/plugin.py:1035 #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1051 +#: neutron/plugins/ml2/plugin.py:1066 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1185 +#: neutron/plugins/ml2/plugin.py:1206 #, python-format msgid "mechanism_manager.update_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1232 +#: neutron/plugins/ml2/plugin.py:1253 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1351 +#: neutron/plugins/ml2/plugin.py:1370 #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1383 +#: neutron/plugins/ml2/plugin.py:1402 #, python-format msgid "Binding info for DVR port %s not found" msgstr "" -#: neutron/plugins/ml2/rpc.py:154 +#: neutron/plugins/ml2/rpc.py:161 #, python-format msgid "Failed to get details for device %s" msgstr "" -#: neutron/plugins/ml2/rpc.py:242 +#: neutron/plugins/ml2/rpc.py:249 #, python-format msgid "Failed to update device %s up" msgstr "" -#: neutron/plugins/ml2/rpc.py:256 +#: neutron/plugins/ml2/rpc.py:263 #, python-format msgid "Failed to update device %s down" msgstr "" @@ -943,13 +958,6 @@ msgstr "" msgid "Failed to parse vni_ranges. Service terminated!" msgstr "" -#: neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py:206 -#, python-format -msgid "" -"UCS Mech Driver: Failed binding port ID %(id)s on any segment of network " -"%(network)s" -msgstr "" - #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:102 #, python-format msgid "" @@ -1003,7 +1011,7 @@ msgid "Unable to obtain MAC address for unique ID. Agent terminated!" msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1062 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:282 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:307 #, python-format msgid "Error in agent loop. Devices info: %s" msgstr "" @@ -1021,25 +1029,30 @@ msgid "" "%(physnet)s, and network type %(nettype)s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:50 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:51 #, python-format msgid "Failed to get devices for %s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:187 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:201 #, python-format msgid "Failed to set device %s state" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:342 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:367 msgid "Failed on Agent configuration parse. Agent terminated!" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:354 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:379 msgid "Agent Initialization Failed" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:91 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py:82 +#, python-format +msgid "Failed to set device %s max rate" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:94 msgid "Failed to parse supported PCI vendor devices" msgstr "" @@ -1071,111 +1084,118 @@ msgid "" "a different subnet %(orig_subnet)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:414 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:429 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:417 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:440 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:432 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:455 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:433 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:448 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:437 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:452 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:583 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:598 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:614 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:629 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:622 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:637 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:632 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:647 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:641 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:656 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:701 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:716 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:788 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:803 #, python-format msgid "Configuration for devices %s failed!" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:925 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:947 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:984 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1007 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1171 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1205 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1369 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1413 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1405 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1452 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1557 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1613 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1627 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1684 msgid "Error while processing VIF ports" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1733 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1780 +#, python-format +msgid "" +"Tunneling can't be enabled with invalid local_ip '%s'. IP couldn't be " +"found on this host's interfaces." +msgstr "" + +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1799 msgid "Agent failed to create agent config map" msgstr "" diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot index 06cb12665ae..5c3e7a73fac 100644 --- a/neutron/locale/neutron-log-info.pot +++ b/neutron/locale/neutron-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 7.0.0.0b3.dev96\n" +"Project-Id-Version: neutron 7.0.0.0b3.dev400\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-08-10 06:10+0000\n" +"POT-Creation-Date: 2015-08-21 06:07+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -32,27 +32,27 @@ msgstr "" msgid "Loading Plugin: %s" msgstr "" -#: neutron/service.py:186 +#: neutron/service.py:204 #, python-format msgid "Neutron service started, listening on %(host)s:%(port)s" msgstr "" -#: neutron/wsgi.py:796 +#: neutron/wsgi.py:805 #, python-format msgid "%(method)s %(url)s" msgstr "" -#: neutron/wsgi.py:813 +#: neutron/wsgi.py:822 #, python-format msgid "HTTP exception thrown: %s" msgstr "" -#: neutron/wsgi.py:829 +#: neutron/wsgi.py:838 #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: neutron/wsgi.py:832 +#: neutron/wsgi.py:841 #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "" @@ -65,46 +65,46 @@ msgstr "" msgid "Disabled allowed-address-pairs extension." msgstr "" -#: neutron/agent/securitygroups_rpc.py:154 +#: neutron/agent/securitygroups_rpc.py:137 #, python-format msgid "" "Skipping method %s as firewall is disabled or configured as " "NoopFirewallDriver." msgstr "" -#: neutron/agent/securitygroups_rpc.py:166 +#: neutron/agent/securitygroups_rpc.py:149 #, python-format msgid "Preparing filters for devices %s" msgstr "" -#: neutron/agent/securitygroups_rpc.py:197 +#: neutron/agent/securitygroups_rpc.py:179 #, python-format msgid "Security group rule updated %r" msgstr "" -#: neutron/agent/securitygroups_rpc.py:205 +#: neutron/agent/securitygroups_rpc.py:187 #, python-format msgid "Security group member updated %r" msgstr "" -#: neutron/agent/securitygroups_rpc.py:229 +#: neutron/agent/securitygroups_rpc.py:211 msgid "Provider rule updated" msgstr "" -#: neutron/agent/securitygroups_rpc.py:241 +#: neutron/agent/securitygroups_rpc.py:223 #, python-format msgid "Remove device filter for %r" msgstr "" -#: neutron/agent/securitygroups_rpc.py:251 +#: neutron/agent/securitygroups_rpc.py:233 msgid "Refresh firewall rules" msgstr "" -#: neutron/agent/securitygroups_rpc.py:255 +#: neutron/agent/securitygroups_rpc.py:237 msgid "No ports here to refresh firewall" msgstr "" -#: neutron/agent/common/ovs_lib.py:424 neutron/agent/common/ovs_lib.py:457 +#: neutron/agent/common/ovs_lib.py:432 neutron/agent/common/ovs_lib.py:465 #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "" @@ -127,6 +127,16 @@ msgstr "" msgid "agent_updated by server side %s!" msgstr "" +#: neutron/agent/l2/extensions/manager.py:44 +#, python-format +msgid "Loaded agent extensions: %s" +msgstr "" + +#: neutron/agent/l2/extensions/manager.py:57 +#, python-format +msgid "Initializing agent extension '%s'" +msgstr "" + #: neutron/agent/l3/agent.py:573 neutron/agent/l3/agent.py:642 msgid "L3 agent started" msgstr "" @@ -148,7 +158,7 @@ msgstr "" msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "" -#: neutron/agent/linux/dhcp.py:816 +#: neutron/agent/linux/dhcp.py:821 #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is " @@ -160,12 +170,12 @@ msgstr "" msgid "Device %s already exists" msgstr "" -#: neutron/agent/linux/iptables_firewall.py:161 +#: neutron/agent/linux/iptables_firewall.py:168 #, python-format msgid "Attempted to update port filter which is not filtered %s" msgstr "" -#: neutron/agent/linux/iptables_firewall.py:172 +#: neutron/agent/linux/iptables_firewall.py:179 #, python-format msgid "Attempted to remove port filter which is not filtered %r" msgstr "" @@ -224,8 +234,8 @@ msgstr "" #: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:262 #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1100 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:357 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1636 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:382 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1693 msgid "Agent initialized successfully, now running... " msgstr "" @@ -277,7 +287,7 @@ msgstr "" msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:744 neutron/plugins/ml2/plugin.py:891 +#: neutron/db/db_base_plugin_v2.py:743 neutron/plugins/ml2/plugin.py:904 #, python-format msgid "" "Found port (%(port_id)s, %(ip)s) having IP allocation on subnet " @@ -289,23 +299,23 @@ msgstr "" msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "" -#: neutron/db/ipam_backend_mixin.py:230 +#: neutron/db/ipam_backend_mixin.py:228 #, python-format msgid "" "Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " "%(subnet_id)s (CIDR: %(cidr)s)" msgstr "" -#: neutron/db/ipam_backend_mixin.py:268 +#: neutron/db/ipam_backend_mixin.py:266 msgid "Specified IP addresses do not match the subnet IP version" msgstr "" -#: neutron/db/ipam_backend_mixin.py:272 +#: neutron/db/ipam_backend_mixin.py:270 #, python-format msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" msgstr "" -#: neutron/db/ipam_backend_mixin.py:293 +#: neutron/db/ipam_backend_mixin.py:291 #, python-format msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" msgstr "" @@ -321,33 +331,33 @@ msgstr "" msgid "Skipping port %s as no IP is configure on it" msgstr "" -#: neutron/db/l3_dvr_db.py:88 +#: neutron/db/l3_dvr_db.py:89 #, python-format msgid "Centralizing distributed router %s is not supported" msgstr "" -#: neutron/db/l3_dvr_db.py:558 +#: neutron/db/l3_dvr_db.py:565 #, python-format msgid "Agent Gateway port does not exist, so create one: %s" msgstr "" -#: neutron/db/l3_dvr_db.py:641 +#: neutron/db/l3_dvr_db.py:645 #, python-format msgid "SNAT interface port list does not exist, so create one: %s" msgstr "" -#: neutron/db/l3_dvrscheduler_db.py:350 +#: neutron/db/l3_dvrscheduler_db.py:354 msgid "SNAT already bound to a service node." msgstr "" -#: neutron/db/l3_hamode_db.py:191 +#: neutron/db/l3_hamode_db.py:203 #, python-format msgid "" "Attempt %(count)s to allocate a VRID in the network %(network)s for the " "router %(router)s" msgstr "" -#: neutron/db/l3_hamode_db.py:274 +#: neutron/db/l3_hamode_db.py:292 #, python-format msgid "" "Number of active agents lower than max_l3_agents_per_router. L3 agents " @@ -363,7 +373,7 @@ msgstr "" msgid "Disabled vlantransparent extension." msgstr "" -#: neutron/notifiers/nova.py:266 +#: neutron/notifiers/nova.py:275 #, python-format msgid "Nova event response: %s" msgstr "" @@ -487,96 +497,108 @@ msgstr "" msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/managers.py:46 +#: neutron/plugins/ml2/managers.py:48 #, python-format msgid "Configured type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:51 +#: neutron/plugins/ml2/managers.py:53 #, python-format msgid "Loaded type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:68 +#: neutron/plugins/ml2/managers.py:70 #, python-format msgid "Registered types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:79 +#: neutron/plugins/ml2/managers.py:81 #, python-format msgid "Tenant network_types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:169 +#: neutron/plugins/ml2/managers.py:171 #, python-format msgid "Initializing driver for type '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:294 +#: neutron/plugins/ml2/managers.py:296 #, python-format msgid "Configured mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:300 +#: neutron/plugins/ml2/managers.py:302 #, python-format msgid "Loaded mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:312 +#: neutron/plugins/ml2/managers.py:314 #, python-format msgid "Registered mechanism drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:317 +#: neutron/plugins/ml2/managers.py:333 +#, python-format +msgid "" +"%(rule_types)s rule types disabled for ml2 because %(driver)s does not " +"support them" +msgstr "" + +#: neutron/plugins/ml2/managers.py:353 #, python-format msgid "Initializing mechanism driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:726 +#: neutron/plugins/ml2/managers.py:762 #, python-format msgid "Configured extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:732 +#: neutron/plugins/ml2/managers.py:768 #, python-format msgid "Loaded extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:743 +#: neutron/plugins/ml2/managers.py:779 #, python-format msgid "Registered extension drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:749 +#: neutron/plugins/ml2/managers.py:785 #, python-format msgid "Initializing extension driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:757 +#: neutron/plugins/ml2/managers.py:794 #, python-format msgid "Got %(alias)s extension from driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:150 +#: neutron/plugins/ml2/managers.py:805 +#, python-format +msgid "Extension driver '%(name)s' failed in %(method)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:152 msgid "Modular L2 Plugin initialization complete" msgstr "" -#: neutron/plugins/ml2/plugin.py:301 +#: neutron/plugins/ml2/plugin.py:277 #, python-format msgid "Attempt %(count)s to bind port %(port)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:697 +#: neutron/plugins/ml2/plugin.py:710 #, python-format msgid "Port %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:709 +#: neutron/plugins/ml2/plugin.py:722 #, python-format msgid "Subnet %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:1396 +#: neutron/plugins/ml2/plugin.py:1415 #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted " @@ -619,7 +641,7 @@ msgid "Initializing CRD client... " msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py:32 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:802 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:817 #, python-format msgid "" "Skipping ARP spoofing rules for port '%s' because it has port security " @@ -636,15 +658,15 @@ msgid "Stopping linuxbridge agent." msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:861 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:100 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:106 #: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 #, python-format msgid "RPC agent_id: %s" msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:928 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:219 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1246 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:233 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1281 #, python-format msgid "Port %(device)s updated. Details: %(details)s" msgstr "" @@ -660,8 +682,8 @@ msgid "Attachment %s removed" msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:985 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:247 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1324 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:272 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1366 #, python-format msgid "Port %s updated." msgstr "" @@ -671,8 +693,8 @@ msgid "LinuxBridge Agent RPC Daemon Started!" msgstr "" #: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1053 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:263 -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1524 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:288 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1580 msgid "Agent out of sync with plugin!" msgstr "" @@ -682,36 +704,37 @@ msgstr "" msgid "Interface mappings: %s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:180 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:194 #, python-format msgid "Device %(device)s spoofcheck %(spoofcheck)s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:201 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:215 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py:84 #, python-format msgid "No device with MAC %s defined on agent." msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:228 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:243 #, python-format msgid "Device with MAC %s not defined on plugin" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:235 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:250 #, python-format msgid "Removing device with mac_address %s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:256 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:281 msgid "SRIOV NIC Agent RPC Daemon Started!" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:345 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:370 #, python-format msgid "Physical Devices mappings: %s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:346 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:371 #, python-format msgid "Exclude Devices: %s" msgstr "" @@ -725,72 +748,77 @@ msgstr "" msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:592 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:607 #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:656 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:671 #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:793 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:808 #, python-format msgid "Configuration for devices up %(up)s and devices down %(down)s completed." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:834 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:849 #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:900 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:916 #, python-format msgid "Adding %s to list of bridges." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:978 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1001 #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1132 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1166 #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1240 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1275 #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be" " processed" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1279 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1321 #, python-format msgid "Ancillary Ports %s added" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1296 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1338 #, python-format msgid "Ports %s removed" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1312 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1354 #, python-format msgid "Ancillary ports %s removed" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1553 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1559 +#, python-format +msgid "Cleaning stale %s flows" +msgstr "" + +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1609 msgid "Agent tunnel out of sync with plugin!" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1655 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1712 msgid "Agent caught SIGTERM, quitting daemon loop." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1659 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1716 msgid "Agent caught SIGHUP, resetting." msgstr "" @@ -802,13 +830,13 @@ msgstr "" msgid "NVSD Agent initialized successfully, now running... " msgstr "" -#: neutron/quota/__init__.py:180 +#: neutron/quota/__init__.py:208 msgid "" "ConfDriver is used as quota_driver because the loaded plugin does not " "support 'quotas' table." msgstr "" -#: neutron/quota/__init__.py:191 +#: neutron/quota/__init__.py:219 #, python-format msgid "Loaded quota_driver: %s." msgstr "" @@ -828,7 +856,7 @@ msgstr "" msgid "Agent %s already present" msgstr "" -#: neutron/server/__init__.py:50 +#: neutron/server/__init__.py:48 msgid "RPC was already started in parent process by plugin." msgstr "" @@ -847,3 +875,8 @@ msgstr "" msgid "Loading interface driver %s" msgstr "" +#: neutron/services/qos/notification_drivers/manager.py:70 +#, python-format +msgid "Loading %(name)s (%(description)s) notification driver for QoS plugin" +msgstr "" + diff --git a/neutron/locale/neutron-log-warning.pot b/neutron/locale/neutron-log-warning.pot index f4229761142..bd77febff35 100644 --- a/neutron/locale/neutron-log-warning.pot +++ b/neutron/locale/neutron-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 7.0.0.0b3.dev96\n" +"Project-Id-Version: neutron 7.0.0.0b3.dev400\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-08-10 06:10+0000\n" +"POT-Creation-Date: 2015-08-21 06:08+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,7 +17,7 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.0\n" -#: neutron/policy.py:116 +#: neutron/policy.py:115 #, python-format msgid "Unable to find data type descriptor for attribute %s" msgstr "" @@ -35,23 +35,23 @@ msgstr "" msgid "Driver configuration doesn't match with enable_security_group" msgstr "" -#: neutron/agent/securitygroups_rpc.py:142 +#: neutron/agent/securitygroups_rpc.py:125 msgid "" "security_group_info_for_devices rpc call not supported by the server, " "falling back to old security_group_rules_for_devices which scales worse." msgstr "" -#: neutron/agent/common/ovs_lib.py:378 +#: neutron/agent/common/ovs_lib.py:386 #, python-format msgid "Found not yet ready openvswitch port: %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:381 +#: neutron/agent/common/ovs_lib.py:389 #, python-format msgid "Found failed openvswitch port: %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:439 +#: neutron/agent/common/ovs_lib.py:447 #, python-format msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" msgstr "" @@ -84,7 +84,7 @@ msgid "" msgstr "" #: neutron/agent/dhcp/agent.py:570 neutron/agent/l3/agent.py:633 -#: neutron/agent/metadata/agent.py:314 +#: neutron/agent/metadata/agent.py:317 #: neutron/services/metering/agents/metering_agent.py:278 msgid "" "Neutron server does not support state report. State report for this agent" @@ -112,7 +112,7 @@ msgstr "" msgid "Info for router %s was not found. Performing router cleanup" msgstr "" -#: neutron/agent/l3/router_info.py:191 +#: neutron/agent/l3/router_info.py:190 #, python-format msgid "Unable to configure IP address for floating IP: %s" msgstr "" @@ -140,7 +140,7 @@ msgid "" "%(top)r" msgstr "" -#: neutron/agent/linux/iptables_manager.py:698 +#: neutron/agent/linux/iptables_manager.py:705 #, python-format msgid "Attempted to get traffic counters of chain %s which does not exist" msgstr "" @@ -197,19 +197,19 @@ msgid "" "inactive agents." msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:100 +#: neutron/api/rpc/handlers/dhcp_rpc.py:103 #, python-format msgid "" "Action %(action)s for network %(net_id)s could not complete successfully:" " %(reason)s" msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:152 +#: neutron/api/rpc/handlers/dhcp_rpc.py:155 #, python-format msgid "Network %s could not be found, it might have been deleted concurrently." msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:203 +#: neutron/api/rpc/handlers/dhcp_rpc.py:208 #, python-format msgid "Updating lease expiration is now deprecated. Issued from host %s." msgstr "" @@ -245,28 +245,39 @@ msgid "" "in case there was a clock adjustment." msgstr "" -#: neutron/db/agentschedulers_db.py:280 +#: neutron/db/agentschedulers_db.py:281 msgid "No DHCP agents available, skipping rescheduling" msgstr "" -#: neutron/db/agentschedulers_db.py:284 +#: neutron/db/agentschedulers_db.py:285 #, python-format msgid "" "Removing network %(network)s from agent %(agent)s because the agent did " "not report to the server in the last %(dead_time)s seconds." msgstr "" -#: neutron/db/l3_agentschedulers_db.py:106 +#: neutron/db/l3_agentschedulers_db.py:111 +#, python-format +msgid "" +"L3 DVR agent on node %(host)s is down. Not rescheduling from agent in " +"'dvr' mode." +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:115 #, python-format msgid "" "Rescheduling router %(router)s from agent %(agent)s because the agent did" " not report to the server in the last %(dead_time)s seconds." msgstr "" -#: neutron/db/l3_dvrscheduler_db.py:341 +#: neutron/db/l3_dvrscheduler_db.py:342 msgid "No active L3 agents found for SNAT" msgstr "" +#: neutron/db/l3_dvrscheduler_db.py:347 +msgid "No candidates found for SNAT" +msgstr "" + #: neutron/db/securitygroups_rpc_base.py:375 #, python-format msgid "No valid gateway port on subnet %s is found for IPv6 RA" @@ -277,22 +288,22 @@ msgstr "" msgid "Failed to delete namespace %s" msgstr "" -#: neutron/notifiers/nova.py:76 +#: neutron/notifiers/nova.py:77 msgid "" "Authenticating to nova using nova_admin_* options is deprecated. This " "should be done using an auth plugin, like password" msgstr "" -#: neutron/notifiers/nova.py:195 +#: neutron/notifiers/nova.py:204 msgid "Port ID not set! Nova will not be notified of port status change." msgstr "" -#: neutron/notifiers/nova.py:245 +#: neutron/notifiers/nova.py:254 #, python-format msgid "Nova returned NotFound for event: %s" msgstr "" -#: neutron/notifiers/nova.py:263 +#: neutron/notifiers/nova.py:272 #, python-format msgid "Nova event: %s returned with failed status" msgstr "" @@ -331,28 +342,33 @@ msgstr "" msgid "Could not expand segment %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:532 +#: neutron/plugins/ml2/managers.py:342 +#, python-format +msgid "%s does not support QoS; no rule types available" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:543 #, python-format msgid "" "In _notify_port_updated(), no bound segment for port %(port_id)s on " "network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:782 +#: neutron/plugins/ml2/plugin.py:795 msgid "A concurrent port creation has occurred" msgstr "" -#: neutron/plugins/ml2/plugin.py:1455 +#: neutron/plugins/ml2/plugin.py:1475 #, python-format msgid "Port %s not found during update" msgstr "" -#: neutron/plugins/ml2/rpc.py:78 +#: neutron/plugins/ml2/rpc.py:79 #, python-format msgid "Device %(device)s requested by agent %(agent_id)s not found in database" msgstr "" -#: neutron/plugins/ml2/rpc.py:92 +#: neutron/plugins/ml2/rpc.py:93 #, python-format msgid "" "Device %(device)s requested by agent %(agent_id)s on network " @@ -360,7 +376,7 @@ msgid "" msgstr "" #: neutron/plugins/ml2/drivers/mech_agent.py:76 -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:117 +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:120 #, python-format msgid "Attempting to bind with dead agent: %s" msgstr "" @@ -385,18 +401,6 @@ msgstr "" msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" msgstr "" -#: neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py:78 -msgid "update_port_precommit: vlan_id is None." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py:98 -msgid "update_port_postcommit: vlan_id is None." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py:186 -msgid "Bind port: vlan_id is None." -msgstr "" - #: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:108 #, python-format msgid "unable to modify mac_address of ACTIVE port %s" @@ -453,32 +457,44 @@ msgid "" "VXLAN MCAST mode" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:149 -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:162 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:195 #, python-format msgid "Cannot find vf index for pci slot %s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:309 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:352 #, python-format msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:142 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:157 #, python-format msgid "Cannot find vfs %(vfs)s in device %(dev_name)s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:158 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:173 #, python-format msgid "failed to parse vf link show line %(line)s: for %(device)s" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:178 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:192 #, python-format msgid "Failed to set spoofcheck for device %s" msgstr "" +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:258 +#, python-format +msgid "" +"Failed to find pci slot for device %(device)s; skipping extension port " +"cleanup" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py:58 +#: neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py:58 +#, python-format +msgid "Unsupported QoS rule type for %(rule_id)s: %(rule_type)s; skipping" +msgstr "" + #: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:163 #, python-format msgid "" @@ -494,54 +510,59 @@ msgid "" "message: %s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:535 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:550 #, python-format msgid "Action %s not supported" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:956 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:979 #, python-format msgid "" "Creating an interface named %(name)s exceeds the %(limit)d character " "limitation. It was shortened to %(new_name)s to fit." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1149 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1183 #, python-format msgid "VIF port: %s has no ofport configured, and might not be able to transmit" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1261 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1303 #, python-format msgid "Device %s not defined on plugin" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1426 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1473 #, python-format msgid "Invalid remote IP: %s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1469 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1516 msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1472 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1519 msgid "" "OVS is dead. OVSNeutronAgent will keep running and checking OVS status " "periodically." msgstr "" +#: neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py:121 +#, python-format +msgid "Deleting flow %s" +msgstr "" + #: neutron/plugins/oneconvergence/lib/plugin_helper.py:110 msgid "No Token, Re-login" msgstr "" -#: neutron/quota/__init__.py:186 +#: neutron/quota/__init__.py:214 msgid "" "The quota driver neutron.quota.ConfDriver is deprecated as of Liberty. " "neutron.db.quota.driver.DbQuotaDriver should be used in its place" msgstr "" -#: neutron/quota/__init__.py:259 +#: neutron/quota/__init__.py:321 msgid "" "Registering resources to apply quota limits to using the quota_items " "option is deprecated as of Liberty.Resource REST controllers should take " @@ -576,7 +597,7 @@ msgstr "" msgid "No L3 agents can host the router %s" msgstr "" -#: neutron/services/provider_configuration.py:58 +#: neutron/services/provider_configuration.py:60 #, python-format msgid "" "The configured driver %(driver)s has been moved, automatically using " @@ -584,10 +605,15 @@ msgid "" "automatic fixup will be removed in a future release." msgstr "" -#: neutron/services/provider_configuration.py:84 +#: neutron/services/provider_configuration.py:86 msgid "" "Reading service_providers from legacy location in neutron.conf, and " "ignoring values in neutron_*aas.conf files; this override will be going " "away soon." msgstr "" +#: neutron/services/qos/notification_drivers/message_queue.py:30 +#, python-format +msgid "Received %(resource)s %(policy_id)s without context" +msgstr "" + diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index 6c1eb2d63c1..d1e61d2dbe1 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 7.0.0.0b3.dev96\n" +"Project-Id-Version: neutron 7.0.0.0b3.dev400\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-08-10 06:10+0000\n" +"POT-Creation-Date: 2015-08-21 06:07+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -35,19 +35,19 @@ msgstr "" msgid "Multiple plugins for service %s were configured" msgstr "" -#: neutron/policy.py:202 +#: neutron/policy.py:201 #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" -#: neutron/policy.py:232 +#: neutron/policy.py:231 #, python-format msgid "Unable to find resource name in %s" msgstr "" -#: neutron/policy.py:241 +#: neutron/policy.py:240 #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " @@ -75,41 +75,41 @@ msgid "" "scheduler to reduce stampeding. (Disable by setting to 0)" msgstr "" -#: neutron/wsgi.py:52 +#: neutron/wsgi.py:51 msgid "Number of backlog requests to configure the socket with" msgstr "" -#: neutron/wsgi.py:56 +#: neutron/wsgi.py:55 msgid "" "Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " "supported on OS X." msgstr "" -#: neutron/wsgi.py:60 +#: neutron/wsgi.py:59 msgid "Number of seconds to keep retrying to listen" msgstr "" -#: neutron/wsgi.py:63 +#: neutron/wsgi.py:62 msgid "Max header line to accommodate large tokens" msgstr "" -#: neutron/wsgi.py:66 +#: neutron/wsgi.py:65 msgid "Enable SSL on the API server" msgstr "" -#: neutron/wsgi.py:68 +#: neutron/wsgi.py:67 msgid "CA certificate file to use to verify connecting clients" msgstr "" -#: neutron/wsgi.py:71 +#: neutron/wsgi.py:70 msgid "Certificate file to use when starting the server securely" msgstr "" -#: neutron/wsgi.py:74 +#: neutron/wsgi.py:73 msgid "Private key file to use when starting the server securely" msgstr "" -#: neutron/wsgi.py:78 +#: neutron/wsgi.py:77 msgid "" "Determines if connections are allowed to be held open by clients after a " "request is fulfilled. A value of False will ensure that the socket " @@ -117,62 +117,62 @@ msgid "" " client." msgstr "" -#: neutron/wsgi.py:84 +#: neutron/wsgi.py:83 msgid "" "Timeout for client connections socket operations. If an incoming " "connection is idle for this number of seconds it will be closed. A value " "of '0' means wait forever." msgstr "" -#: neutron/wsgi.py:177 +#: neutron/wsgi.py:186 #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" -#: neutron/wsgi.py:197 +#: neutron/wsgi.py:206 #, python-format msgid "Unable to find ssl_cert_file : %s" msgstr "" -#: neutron/wsgi.py:203 +#: neutron/wsgi.py:212 #, python-format msgid "Unable to find ssl_key_file : %s" msgstr "" -#: neutron/wsgi.py:208 +#: neutron/wsgi.py:217 #, python-format msgid "Unable to find ssl_ca_file : %s" msgstr "" -#: neutron/wsgi.py:499 +#: neutron/wsgi.py:508 msgid "Cannot understand JSON" msgstr "" -#: neutron/wsgi.py:665 +#: neutron/wsgi.py:674 msgid "You must implement __call__" msgstr "" -#: neutron/wsgi.py:753 neutron/api/v2/base.py:199 neutron/api/v2/base.py:358 -#: neutron/api/v2/base.py:512 neutron/api/v2/base.py:576 +#: neutron/wsgi.py:762 neutron/api/v2/base.py:204 neutron/api/v2/base.py:363 +#: neutron/api/v2/base.py:517 neutron/api/v2/base.py:581 #: neutron/extensions/l3agentscheduler.py:51 #: neutron/extensions/l3agentscheduler.py:94 msgid "The resource could not be found." msgstr "" -#: neutron/wsgi.py:802 +#: neutron/wsgi.py:811 msgid "Unsupported Content-Type" msgstr "" -#: neutron/wsgi.py:806 +#: neutron/wsgi.py:815 msgid "Malformed request body" msgstr "" -#: neutron/wsgi.py:943 +#: neutron/wsgi.py:952 #, python-format msgid "The requested content type %s is invalid." msgstr "" -#: neutron/wsgi.py:996 +#: neutron/wsgi.py:1005 msgid "Could not deserialize data" msgstr "" @@ -245,16 +245,16 @@ msgstr "" msgid "Timeout in seconds for ovs-vsctl commands" msgstr "" -#: neutron/agent/common/ovs_lib.py:475 +#: neutron/agent/common/ovs_lib.py:483 #, python-format msgid "Unable to determine mac address for %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:583 +#: neutron/agent/common/ovs_lib.py:621 msgid "Cannot match priority on flow deletion or modification" msgstr "" -#: neutron/agent/common/ovs_lib.py:588 +#: neutron/agent/common/ovs_lib.py:626 msgid "Must specify one or more actions on flow addition or modification" msgstr "" @@ -322,6 +322,10 @@ msgstr "" msgid "Use broadcast in DHCP replies" msgstr "" +#: neutron/agent/l2/extensions/manager.py:29 +msgid "Extensions list to use" +msgstr "" + #: neutron/agent/l3/agent.py:272 msgid "" "The 'gateway_external_network_id' option must be configured for this " @@ -465,10 +469,6 @@ msgstr "" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "" -#: neutron/agent/l3/link_local_allocator.py:85 -msgid "Cannot allocate link local address" -msgstr "" - #: neutron/agent/linux/async_process.py:72 msgid "respawn_interval must be >= 0 if provided." msgstr "" @@ -582,7 +582,7 @@ msgstr "" msgid "Location to store IPv6 RA config files" msgstr "" -#: neutron/agent/linux/utils.py:120 +#: neutron/agent/linux/utils.py:137 msgid "" "\n" "Command: {cmd}\n" @@ -792,7 +792,7 @@ msgstr "" msgid "record" msgstr "" -#: neutron/agent/windows/utils.py:54 +#: neutron/agent/windows/utils.py:72 #, python-format msgid "" "\n" @@ -824,12 +824,12 @@ msgid "" " and '%(desc)s'" msgstr "" -#: neutron/api/api_common.py:318 neutron/api/v2/base.py:652 +#: neutron/api/api_common.py:328 neutron/api/v2/base.py:640 #, python-format msgid "Unable to find '%s' in request body" msgstr "" -#: neutron/api/api_common.py:325 +#: neutron/api/api_common.py:335 #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "" @@ -847,251 +847,277 @@ msgstr "" msgid "Unknown API version specified" msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:83 +#: neutron/api/rpc/callbacks/exceptions.py:17 +#, python-format +msgid "Callback for %(resource_type)s returned wrong resource type" +msgstr "" + +#: neutron/api/rpc/callbacks/exceptions.py:21 +#, python-format +msgid "Callback for %(resource_type)s not found" +msgstr "" + +#: neutron/api/rpc/callbacks/exceptions.py:25 +#, python-format +msgid "Cannot add multiple callbacks for %(resource_type)s" +msgstr "" + +#: neutron/api/rpc/handlers/dhcp_rpc.py:86 msgid "Unrecognized action" msgstr "" -#: neutron/api/v2/attributes.py:55 +#: neutron/api/rpc/handlers/resources_rpc.py:38 +#, python-format +msgid "Invalid resource type %(resource_type)s" +msgstr "" + +#: neutron/api/rpc/handlers/resources_rpc.py:42 +#, python-format +msgid "Resource %(resource_id)s of type %(resource_type)s not found" +msgstr "" + +#: neutron/api/v2/attributes.py:56 #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" -#: neutron/api/v2/attributes.py:67 +#: neutron/api/v2/attributes.py:68 #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" -#: neutron/api/v2/attributes.py:82 +#: neutron/api/v2/attributes.py:83 #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "" -#: neutron/api/v2/attributes.py:98 +#: neutron/api/v2/attributes.py:99 #, python-format msgid "'%s' Blank strings are not permitted" msgstr "" -#: neutron/api/v2/attributes.py:110 +#: neutron/api/v2/attributes.py:111 #, python-format msgid "'%s' is not a valid string" msgstr "" -#: neutron/api/v2/attributes.py:115 +#: neutron/api/v2/attributes.py:116 #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "" -#: neutron/api/v2/attributes.py:125 +#: neutron/api/v2/attributes.py:126 #, python-format msgid "'%s' is not a valid boolean value" msgstr "" -#: neutron/api/v2/attributes.py:144 neutron/api/v2/attributes.py:480 +#: neutron/api/v2/attributes.py:145 neutron/api/v2/attributes.py:485 #, python-format msgid "'%s' is not an integer" msgstr "" -#: neutron/api/v2/attributes.py:148 +#: neutron/api/v2/attributes.py:149 #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "" -#: neutron/api/v2/attributes.py:153 +#: neutron/api/v2/attributes.py:154 #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "" -#: neutron/api/v2/attributes.py:162 +#: neutron/api/v2/attributes.py:163 #, python-format msgid "'%s' contains whitespace" msgstr "" -#: neutron/api/v2/attributes.py:177 +#: neutron/api/v2/attributes.py:182 #, python-format msgid "'%s' is not a valid MAC address" msgstr "" -#: neutron/api/v2/attributes.py:206 +#: neutron/api/v2/attributes.py:211 #, python-format msgid "'%s' is not a valid IP address" msgstr "" -#: neutron/api/v2/attributes.py:217 +#: neutron/api/v2/attributes.py:222 #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:234 neutron/api/v2/attributes.py:241 +#: neutron/api/v2/attributes.py:239 neutron/api/v2/attributes.py:246 #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:249 +#: neutron/api/v2/attributes.py:254 #, python-format msgid "Duplicate IP address '%s'" msgstr "" -#: neutron/api/v2/attributes.py:264 +#: neutron/api/v2/attributes.py:269 #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:273 +#: neutron/api/v2/attributes.py:278 #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "" -#: neutron/api/v2/attributes.py:278 +#: neutron/api/v2/attributes.py:283 #, python-format msgid "Duplicate nameserver '%s'" msgstr "" -#: neutron/api/v2/attributes.py:286 +#: neutron/api/v2/attributes.py:291 #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:303 +#: neutron/api/v2/attributes.py:308 #, python-format msgid "Duplicate hostroute '%s'" msgstr "" -#: neutron/api/v2/attributes.py:319 -#: neutron/tests/unit/api/v2/test_attributes.py:502 -#: neutron/tests/unit/api/v2/test_attributes.py:516 -#: neutron/tests/unit/api/v2/test_attributes.py:524 +#: neutron/api/v2/attributes.py:324 +#: neutron/tests/unit/api/v2/test_attributes.py:507 +#: neutron/tests/unit/api/v2/test_attributes.py:521 +#: neutron/tests/unit/api/v2/test_attributes.py:529 #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" -#: neutron/api/v2/attributes.py:325 +#: neutron/api/v2/attributes.py:330 #, python-format msgid "'%s' is not a valid IP subnet" msgstr "" -#: neutron/api/v2/attributes.py:333 neutron/api/v2/attributes.py:394 +#: neutron/api/v2/attributes.py:338 neutron/api/v2/attributes.py:399 #, python-format msgid "'%s' is not a list" msgstr "" -#: neutron/api/v2/attributes.py:338 neutron/api/v2/attributes.py:404 +#: neutron/api/v2/attributes.py:343 neutron/api/v2/attributes.py:409 #, python-format msgid "Duplicate items in the list: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:360 +#: neutron/api/v2/attributes.py:365 #, python-format msgid "'%s' is not a valid input" msgstr "" -#: neutron/api/v2/attributes.py:382 +#: neutron/api/v2/attributes.py:387 #: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:532 #, python-format msgid "'%s' is not a valid UUID" msgstr "" -#: neutron/api/v2/attributes.py:424 +#: neutron/api/v2/attributes.py:429 #, python-format msgid "Validator '%s' does not exist." msgstr "" -#: neutron/api/v2/attributes.py:436 +#: neutron/api/v2/attributes.py:441 #, python-format msgid "'%s' is not a dictionary" msgstr "" -#: neutron/api/v2/attributes.py:485 +#: neutron/api/v2/attributes.py:490 #, python-format msgid "'%s' should be non-negative" msgstr "" -#: neutron/api/v2/attributes.py:504 +#: neutron/api/v2/attributes.py:509 #, python-format msgid "'%s' cannot be converted to boolean" msgstr "" -#: neutron/api/v2/attributes.py:517 +#: neutron/api/v2/attributes.py:522 #: neutron/plugins/nec/extensions/packetfilter.py:73 #, python-format msgid "'%s' is not a integer" msgstr "" -#: neutron/api/v2/attributes.py:540 +#: neutron/api/v2/attributes.py:545 #, python-format msgid "'%s' must be a non negative decimal." msgstr "" -#: neutron/api/v2/attributes.py:554 +#: neutron/api/v2/attributes.py:559 #, python-format msgid "'%s' is not of the form =[value]" msgstr "" +#: neutron/api/v2/attributes.py:901 +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" + +#: neutron/api/v2/attributes.py:908 +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "" + +#: neutron/api/v2/attributes.py:927 +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "" + +#: neutron/api/v2/attributes.py:936 +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request " +"requires admin privileges" +msgstr "" + +#: neutron/api/v2/attributes.py:944 +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "" + +#: neutron/api/v2/attributes.py:952 +#: neutron/extensions/allowedaddresspairs.py:76 +#: neutron/extensions/multiprovidernet.py:45 +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "" + #: neutron/api/v2/base.py:93 msgid "Native pagination depend on native sorting" msgstr "" -#: neutron/api/v2/base.py:537 +#: neutron/api/v2/base.py:542 #, python-format msgid "Invalid format: %s" msgstr "" -#: neutron/api/v2/base.py:604 -msgid "" -"Specifying 'tenant_id' other than authenticated tenant in request " -"requires admin privileges" -msgstr "" - -#: neutron/api/v2/base.py:612 -msgid "Running without keystone AuthN requires that tenant_id is specified" -msgstr "" - -#: neutron/api/v2/base.py:630 +#: neutron/api/v2/base.py:618 msgid "Resource body required" msgstr "" -#: neutron/api/v2/base.py:636 +#: neutron/api/v2/base.py:624 msgid "Bulk operation not supported" msgstr "" -#: neutron/api/v2/base.py:639 +#: neutron/api/v2/base.py:627 msgid "Resources required" msgstr "" -#: neutron/api/v2/base.py:649 +#: neutron/api/v2/base.py:637 msgid "Body contains invalid data" msgstr "" -#: neutron/api/v2/base.py:663 -#, python-format -msgid "Failed to parse request. Required attribute '%s' not specified" -msgstr "" - -#: neutron/api/v2/base.py:670 -#, python-format -msgid "Attribute '%s' not allowed in POST" -msgstr "" - -#: neutron/api/v2/base.py:675 +#: neutron/api/v2/base.py:652 #, python-format msgid "Cannot update read-only attribute %s" msgstr "" -#: neutron/api/v2/base.py:693 -#, python-format -msgid "Invalid input for %(attr)s. Reason: %(reason)s." -msgstr "" - -#: neutron/api/v2/base.py:702 neutron/extensions/allowedaddresspairs.py:76 -#: neutron/extensions/multiprovidernet.py:45 -#, python-format -msgid "Unrecognized attribute(s) '%s'" -msgstr "" - -#: neutron/api/v2/base.py:721 +#: neutron/api/v2/base.py:674 #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" @@ -1196,7 +1222,7 @@ msgstr "" msgid "The core plugin Neutron will use" msgstr "" -#: neutron/common/config.py:54 neutron/db/migration/cli.py:47 +#: neutron/common/config.py:54 msgid "The service plugins Neutron will use" msgstr "" @@ -1394,441 +1420,480 @@ msgstr "" #: neutron/common/exceptions.py:81 #, python-format -msgid "Network %(net_id)s could not be found" +msgid "Object %(id)s not found." msgstr "" #: neutron/common/exceptions.py:85 #, python-format -msgid "Subnet %(subnet_id)s could not be found" +msgid "Network %(net_id)s could not be found" msgstr "" #: neutron/common/exceptions.py:89 #, python-format -msgid "Subnet pool %(subnetpool_id)s could not be found" +msgid "Subnet %(subnet_id)s could not be found" msgstr "" #: neutron/common/exceptions.py:93 #, python-format -msgid "Port %(port_id)s could not be found" +msgid "Subnet pool %(subnetpool_id)s could not be found" msgstr "" #: neutron/common/exceptions.py:97 #, python-format -msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgid "Port %(port_id)s could not be found" msgstr "" -#: neutron/common/exceptions.py:102 -msgid "Policy configuration policy.json could not be found" -msgstr "" - -#: neutron/common/exceptions.py:106 +#: neutron/common/exceptions.py:101 #, python-format -msgid "Failed to init policy %(policy)s because %(reason)s" +msgid "QoS policy %(policy_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:105 +#, python-format +msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found" msgstr "" #: neutron/common/exceptions.py:110 #, python-format +msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgstr "" + +#: neutron/common/exceptions.py:115 +#, python-format +msgid "" +"QoS binding for port %(port_id)s and policy %(policy_id)s could not be " +"found" +msgstr "" + +#: neutron/common/exceptions.py:120 +#, python-format +msgid "" +"QoS binding for network %(net_id)s and policy %(policy_id)s could not be " +"found" +msgstr "" + +#: neutron/common/exceptions.py:125 +msgid "Policy configuration policy.json could not be found" +msgstr "" + +#: neutron/common/exceptions.py:129 +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:133 +#, python-format msgid "Failed to check policy %(policy)s because %(reason)s" msgstr "" -#: neutron/common/exceptions.py:114 +#: neutron/common/exceptions.py:137 #, python-format msgid "Unsupported port state: %(port_state)s" msgstr "" -#: neutron/common/exceptions.py:118 +#: neutron/common/exceptions.py:141 msgid "The resource is inuse" msgstr "" -#: neutron/common/exceptions.py:122 +#: neutron/common/exceptions.py:145 +#, python-format +msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." +msgstr "" + +#: neutron/common/exceptions.py:150 #, python-format msgid "" "Unable to complete operation on network %(net_id)s. There are one or more" " ports still in use on the network." msgstr "" -#: neutron/common/exceptions.py:127 +#: neutron/common/exceptions.py:155 #, python-format msgid "Unable to complete operation on subnet %(subnet_id)s. %(reason)s" msgstr "" -#: neutron/common/exceptions.py:132 +#: neutron/common/exceptions.py:160 msgid "One or more ports have an IP allocation from this subnet." msgstr "" -#: neutron/common/exceptions.py:138 +#: neutron/common/exceptions.py:166 #, python-format msgid "" "Unable to complete operation on port %(port_id)s for network %(net_id)s. " "Port already has an attached device %(device_id)s." msgstr "" -#: neutron/common/exceptions.py:144 +#: neutron/common/exceptions.py:172 #, python-format msgid "Port %(port_id)s cannot be deleted directly via the port API: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:149 +#: neutron/common/exceptions.py:177 #, python-format msgid "" "Unable to complete operation on port %(port_id)s, port is already bound, " "port type: %(vif_type)s, old_mac %(old_mac)s, new_mac %(new_mac)s" msgstr "" -#: neutron/common/exceptions.py:155 +#: neutron/common/exceptions.py:183 #, python-format msgid "" "Unable to complete operation for network %(net_id)s. The mac address " "%(mac)s is in use." msgstr "" -#: neutron/common/exceptions.py:161 +#: neutron/common/exceptions.py:189 #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes" " exceeds the limit %(quota)s." msgstr "" -#: neutron/common/exceptions.py:167 +#: neutron/common/exceptions.py:195 #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" -#: neutron/common/exceptions.py:172 +#: neutron/common/exceptions.py:200 #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the" " specified network." msgstr "" -#: neutron/common/exceptions.py:177 +#: neutron/common/exceptions.py:205 #, python-format msgid "IP address %(ip_address)s is not a valid IP for the specified subnet." msgstr "" -#: neutron/common/exceptions.py:182 +#: neutron/common/exceptions.py:210 #, python-format msgid "" "Unable to complete operation for network %(net_id)s. The IP address " "%(ip_address)s is in use." msgstr "" -#: neutron/common/exceptions.py:187 +#: neutron/common/exceptions.py:215 #, python-format msgid "" "Unable to create the network. The VLAN %(vlan_id)s on physical network " "%(physical_network)s is in use." msgstr "" -#: neutron/common/exceptions.py:193 +#: neutron/common/exceptions.py:221 #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s " "is in use." msgstr "" -#: neutron/common/exceptions.py:198 +#: neutron/common/exceptions.py:226 #, python-format msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." msgstr "" -#: neutron/common/exceptions.py:203 +#: neutron/common/exceptions.py:231 msgid "Tenant network creation is not enabled." msgstr "" -#: neutron/common/exceptions.py:211 +#: neutron/common/exceptions.py:239 msgid "" "Unable to create the network. No tenant network is available for " "allocation." msgstr "" -#: neutron/common/exceptions.py:216 +#: neutron/common/exceptions.py:244 msgid "" "Unable to create the network. No available network found in maximum " "allowed attempts." msgstr "" -#: neutron/common/exceptions.py:221 +#: neutron/common/exceptions.py:249 #, python-format msgid "" "Subnet on port %(port_id)s does not match the requested subnet " "%(subnet_id)s" msgstr "" -#: neutron/common/exceptions.py:226 +#: neutron/common/exceptions.py:254 #, python-format msgid "Malformed request body: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:236 +#: neutron/common/exceptions.py:264 #, python-format msgid "Invalid input for operation: %(error_message)s." msgstr "" -#: neutron/common/exceptions.py:240 +#: neutron/common/exceptions.py:268 #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "" -#: neutron/common/exceptions.py:244 +#: neutron/common/exceptions.py:272 #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on " "port %(port_id)s." msgstr "" -#: neutron/common/exceptions.py:249 +#: neutron/common/exceptions.py:277 #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" -#: neutron/common/exceptions.py:254 +#: neutron/common/exceptions.py:282 #, python-format msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" -#: neutron/common/exceptions.py:259 +#: neutron/common/exceptions.py:287 #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:263 +#: neutron/common/exceptions.py:291 #, python-format msgid "No more IP addresses available on network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:267 +#: neutron/common/exceptions.py:295 #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "" -#: neutron/common/exceptions.py:271 +#: neutron/common/exceptions.py:299 #, python-format msgid "Creation failed. %(dev_name)s already exists." msgstr "" -#: neutron/common/exceptions.py:275 +#: neutron/common/exceptions.py:303 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" -#: neutron/common/exceptions.py:279 +#: neutron/common/exceptions.py:307 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: neutron/common/exceptions.py:283 +#: neutron/common/exceptions.py:311 msgid "Tenant-id was missing from Quota request" msgstr "" -#: neutron/common/exceptions.py:287 +#: neutron/common/exceptions.py:315 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " "%(unders)s" msgstr "" -#: neutron/common/exceptions.py:292 +#: neutron/common/exceptions.py:320 #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it" msgstr "" -#: neutron/common/exceptions.py:297 +#: neutron/common/exceptions.py:325 #, python-format msgid "Invalid extension environment: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:301 +#: neutron/common/exceptions.py:329 #, python-format msgid "Extensions not found: %(extensions)s" msgstr "" -#: neutron/common/exceptions.py:305 +#: neutron/common/exceptions.py:333 #, python-format msgid "Invalid content type %(content_type)s" msgstr "" -#: neutron/common/exceptions.py:309 +#: neutron/common/exceptions.py:337 #, python-format msgid "Unable to find any IP address on external network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:314 +#: neutron/common/exceptions.py:342 msgid "More than one external network exists" msgstr "" -#: neutron/common/exceptions.py:318 +#: neutron/common/exceptions.py:346 #, python-format msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" msgstr "" -#: neutron/common/exceptions.py:323 +#: neutron/common/exceptions.py:351 #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" msgstr "" -#: neutron/common/exceptions.py:328 +#: neutron/common/exceptions.py:356 #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. " "Unable to update." msgstr "" -#: neutron/common/exceptions.py:333 +#: neutron/common/exceptions.py:361 #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" msgstr "" -#: neutron/common/exceptions.py:343 +#: neutron/common/exceptions.py:371 msgid "Empty physical network name." msgstr "" -#: neutron/common/exceptions.py:347 +#: neutron/common/exceptions.py:375 #, python-format msgid "Invalid network Tunnel range: '%(tunnel_range)s' - %(error)s" msgstr "" -#: neutron/common/exceptions.py:358 +#: neutron/common/exceptions.py:386 #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" msgstr "" -#: neutron/common/exceptions.py:362 +#: neutron/common/exceptions.py:390 msgid "VXLAN Network unsupported." msgstr "" -#: neutron/common/exceptions.py:366 +#: neutron/common/exceptions.py:394 #, python-format msgid "Found duplicate extension: %(alias)s" msgstr "" -#: neutron/common/exceptions.py:370 +#: neutron/common/exceptions.py:398 #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or " "matches another tenants router." msgstr "" -#: neutron/common/exceptions.py:375 +#: neutron/common/exceptions.py:403 #, python-format msgid "Invalid CIDR %(input)s given as IP prefix" msgstr "" -#: neutron/common/exceptions.py:379 +#: neutron/common/exceptions.py:407 #, python-format msgid "Router '%(router_id)s' is not compatible with this agent" msgstr "" -#: neutron/common/exceptions.py:383 +#: neutron/common/exceptions.py:411 #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA" msgstr "" -#: neutron/common/exceptions.py:404 +#: neutron/common/exceptions.py:432 msgid "network_id and router_id are None. One must be provided." msgstr "" -#: neutron/common/exceptions.py:408 +#: neutron/common/exceptions.py:436 msgid "Aborting periodic_sync_routers_task due to an error" msgstr "" -#: neutron/common/exceptions.py:420 +#: neutron/common/exceptions.py:448 #, python-format msgid "%(driver)s: Internal driver error." msgstr "" -#: neutron/common/exceptions.py:424 +#: neutron/common/exceptions.py:452 msgid "Unspecified minimum subnet pool prefix" msgstr "" -#: neutron/common/exceptions.py:428 +#: neutron/common/exceptions.py:456 msgid "Empty subnet pool prefix list" msgstr "" -#: neutron/common/exceptions.py:432 +#: neutron/common/exceptions.py:460 msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool" msgstr "" -#: neutron/common/exceptions.py:436 +#: neutron/common/exceptions.py:464 #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool" msgstr "" -#: neutron/common/exceptions.py:440 +#: neutron/common/exceptions.py:468 #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:445 +#: neutron/common/exceptions.py:473 #, python-format msgid "Illegal update to prefixes: %(msg)s" msgstr "" -#: neutron/common/exceptions.py:449 +#: neutron/common/exceptions.py:477 #, python-format msgid "Failed to allocate subnet: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:453 +#: neutron/common/exceptions.py:481 msgid "" "Failed to associate address scope: subnetpools within an address scope " "must have unique prefixes" msgstr "" -#: neutron/common/exceptions.py:458 +#: neutron/common/exceptions.py:486 #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s" msgstr "" -#: neutron/common/exceptions.py:464 +#: neutron/common/exceptions.py:492 #, python-format msgid "Illegal subnetpool update : %(reason)s" msgstr "" -#: neutron/common/exceptions.py:468 +#: neutron/common/exceptions.py:496 #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum " "allowed prefix is %(min_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:473 +#: neutron/common/exceptions.py:501 #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum " "allowed prefix is %(max_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:478 +#: neutron/common/exceptions.py:506 #, python-format msgid "Unable to delete subnet pool: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:482 +#: neutron/common/exceptions.py:510 msgid "Per-tenant subnet pool prefix quota exceeded" msgstr "" -#: neutron/common/exceptions.py:486 +#: neutron/common/exceptions.py:514 #, python-format msgid "Device '%(device_name)s' does not exist" msgstr "" -#: neutron/common/exceptions.py:490 +#: neutron/common/exceptions.py:518 msgid "" "Subnets hosted on the same network must be allocated from the same subnet" " pool" msgstr "" +#: neutron/common/exceptions.py:523 +#, python-format +msgid "Object action %(action)s failed because: %(reason)s" +msgstr "" + #: neutron/common/ipv6_utils.py:36 msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "" @@ -1934,19 +1999,19 @@ msgstr "" msgid "Cannot create resource for another tenant" msgstr "" -#: neutron/db/db_base_plugin_v2.py:117 neutron/db/db_base_plugin_v2.py:121 +#: neutron/db/db_base_plugin_v2.py:116 neutron/db/db_base_plugin_v2.py:120 #, python-format msgid "Invalid route: %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:173 +#: neutron/db/db_base_plugin_v2.py:172 #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" -#: neutron/db/db_base_plugin_v2.py:181 +#: neutron/db/db_base_plugin_v2.py:180 #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " @@ -1954,87 +2019,87 @@ msgid "" "the same value" msgstr "" -#: neutron/db/db_base_plugin_v2.py:189 +#: neutron/db/db_base_plugin_v2.py:188 msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " "to False." msgstr "" -#: neutron/db/db_base_plugin_v2.py:195 +#: neutron/db/db_base_plugin_v2.py:194 msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" -#: neutron/db/db_base_plugin_v2.py:344 +#: neutron/db/db_base_plugin_v2.py:343 #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" -#: neutron/db/db_base_plugin_v2.py:371 +#: neutron/db/db_base_plugin_v2.py:370 msgid "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" -#: neutron/db/db_base_plugin_v2.py:392 +#: neutron/db/db_base_plugin_v2.py:391 msgid "Gateway is not valid on subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:412 neutron/db/db_base_plugin_v2.py:426 +#: neutron/db/db_base_plugin_v2.py:411 neutron/db/db_base_plugin_v2.py:425 #: neutron/plugins/opencontrail/contrail_plugin.py:313 msgid "new subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:419 +#: neutron/db/db_base_plugin_v2.py:418 #, python-format msgid "Error parsing dns address %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:435 +#: neutron/db/db_base_plugin_v2.py:434 msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:439 +#: neutron/db/db_base_plugin_v2.py:438 msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:447 +#: neutron/db/db_base_plugin_v2.py:446 msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "" -#: neutron/db/db_base_plugin_v2.py:457 +#: neutron/db/db_base_plugin_v2.py:456 msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" -#: neutron/db/db_base_plugin_v2.py:463 +#: neutron/db/db_base_plugin_v2.py:462 msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" -#: neutron/db/db_base_plugin_v2.py:542 +#: neutron/db/db_base_plugin_v2.py:541 msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" -#: neutron/db/db_base_plugin_v2.py:559 +#: neutron/db/db_base_plugin_v2.py:558 msgid "cidr and prefixlen must not be supplied together" msgstr "" -#: neutron/db/db_base_plugin_v2.py:584 +#: neutron/db/db_base_plugin_v2.py:583 msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:817 +#: neutron/db/db_base_plugin_v2.py:820 #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with " "shared address scope %(address_scope_id)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:872 +#: neutron/db/db_base_plugin_v2.py:875 msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" -#: neutron/db/db_base_plugin_v2.py:945 +#: neutron/db/db_base_plugin_v2.py:948 msgid "Subnet pool has existing allocations" msgstr "" -#: neutron/db/db_base_plugin_v2.py:952 +#: neutron/db/db_base_plugin_v2.py:955 msgid "mac address update" msgstr "" @@ -2099,34 +2164,34 @@ msgstr "" msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" -#: neutron/db/ipam_backend_mixin.py:214 +#: neutron/db/ipam_backend_mixin.py:212 msgid "0 is not allowed as CIDR prefix length" msgstr "" -#: neutron/db/ipam_backend_mixin.py:225 +#: neutron/db/ipam_backend_mixin.py:223 #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" " with another subnet" msgstr "" -#: neutron/db/ipam_backend_mixin.py:303 +#: neutron/db/ipam_backend_mixin.py:301 #: neutron/plugins/opencontrail/contrail_plugin.py:390 msgid "Exceeded maximim amount of fixed ips per port" msgstr "" -#: neutron/db/ipam_backend_mixin.py:310 +#: neutron/db/ipam_backend_mixin.py:308 #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips " "included invalid subnet %(subnet_id)s" msgstr "" -#: neutron/db/ipam_backend_mixin.py:324 +#: neutron/db/ipam_backend_mixin.py:322 msgid "IP allocation requires subnet_id or ip_address" msgstr "" -#: neutron/db/ipam_backend_mixin.py:372 +#: neutron/db/ipam_backend_mixin.py:370 msgid "Exceeded maximum amount of fixed ips per port" msgstr "" @@ -2152,42 +2217,42 @@ msgid "" "agents." msgstr "" -#: neutron/db/l3_db.py:273 +#: neutron/db/l3_db.py:274 #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "" -#: neutron/db/l3_db.py:311 +#: neutron/db/l3_db.py:312 #, python-format msgid "Network %s is not an external network" msgstr "" -#: neutron/db/l3_db.py:321 +#: neutron/db/l3_db.py:322 #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "" -#: neutron/db/l3_db.py:471 +#: neutron/db/l3_db.py:472 #, python-format msgid "Router already has a port on subnet %s" msgstr "" -#: neutron/db/l3_db.py:488 +#: neutron/db/l3_db.py:489 #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " "of subnet %(sub_id)s" msgstr "" -#: neutron/db/l3_db.py:504 neutron/plugins/opencontrail/contrail_plugin.py:501 +#: neutron/db/l3_db.py:505 neutron/plugins/opencontrail/contrail_plugin.py:501 msgid "Either subnet_id or port_id must be specified" msgstr "" -#: neutron/db/l3_db.py:508 neutron/plugins/opencontrail/contrail_plugin.py:511 +#: neutron/db/l3_db.py:509 neutron/plugins/opencontrail/contrail_plugin.py:511 msgid "Cannot specify both subnet-id and port-id" msgstr "" -#: neutron/db/l3_db.py:529 +#: neutron/db/l3_db.py:530 #, python-format msgid "" "Cannot have multiple router ports with the same network id if both " @@ -2195,77 +2260,77 @@ msgid "" "id %(nid)s" msgstr "" -#: neutron/db/l3_db.py:571 +#: neutron/db/l3_db.py:572 msgid "Subnet for router interface must have a gateway IP" msgstr "" -#: neutron/db/l3_db.py:575 +#: neutron/db/l3_db.py:576 #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot " "be added to Neutron Router." msgstr "" -#: neutron/db/l3_db.py:788 +#: neutron/db/l3_db.py:787 #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" -#: neutron/db/l3_db.py:833 +#: neutron/db/l3_db.py:832 #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" -#: neutron/db/l3_db.py:837 +#: neutron/db/l3_db.py:836 #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is " "owned by a different tenant." msgstr "" -#: neutron/db/l3_db.py:849 +#: neutron/db/l3_db.py:848 #, python-format msgid "" "Floating IP %(floatingip_id) is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" -#: neutron/db/l3_db.py:853 +#: neutron/db/l3_db.py:852 #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" -#: neutron/db/l3_db.py:861 +#: neutron/db/l3_db.py:860 #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "" -#: neutron/db/l3_db.py:868 +#: neutron/db/l3_db.py:867 #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" -#: neutron/db/l3_db.py:872 +#: neutron/db/l3_db.py:871 #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" -#: neutron/db/l3_db.py:901 +#: neutron/db/l3_db.py:900 msgid "fixed_ip_address cannot be specified without a port_id" msgstr "" -#: neutron/db/l3_db.py:945 +#: neutron/db/l3_db.py:944 #, python-format msgid "Network %s is not a valid external network" msgstr "" -#: neutron/db/l3_db.py:949 +#: neutron/db/l3_db.py:948 #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "" @@ -2275,21 +2340,21 @@ msgstr "" msgid "has device owner %s" msgstr "" -#: neutron/db/l3_dvr_db.py:52 +#: neutron/db/l3_dvr_db.py:53 msgid "" "System-wide flag to determine the type of router that tenants can create." " Only admin can override." msgstr "" -#: neutron/db/l3_dvr_db.py:90 +#: neutron/db/l3_dvr_db.py:91 msgid "Migration from distributed router to centralized" msgstr "" -#: neutron/db/l3_dvr_db.py:574 +#: neutron/db/l3_dvr_db.py:579 msgid "Unable to create the Agent Gateway Port" msgstr "" -#: neutron/db/l3_dvr_db.py:606 +#: neutron/db/l3_dvr_db.py:610 msgid "Unable to create the SNAT Interface Port" msgstr "" @@ -2299,22 +2364,34 @@ msgid "" "external_gateway_info." msgstr "" -#: neutron/db/l3_hamode_db.py:44 +#: neutron/db/l3_hamode_db.py:47 msgid "Enable HA mode for virtual routers." msgstr "" -#: neutron/db/l3_hamode_db.py:47 +#: neutron/db/l3_hamode_db.py:50 msgid "Maximum number of agents on which a router will be scheduled." msgstr "" -#: neutron/db/l3_hamode_db.py:51 +#: neutron/db/l3_hamode_db.py:54 msgid "Minimum number of agents on which a router will be scheduled." msgstr "" -#: neutron/db/l3_hamode_db.py:55 +#: neutron/db/l3_hamode_db.py:58 msgid "Subnet used for the l3 HA admin network." msgstr "" +#: neutron/db/l3_hamode_db.py:60 +msgid "" +"The network type to use when creating the HA network for an HA router. By" +" default or if empty, the first 'tenant_network_types' is used. This is " +"helpful when the VRRP traffic should use a specific network which is not " +"the default one." +msgstr "" + +#: neutron/db/l3_hamode_db.py:66 +msgid "The physical network name with which the HA network can be created." +msgstr "" + #: neutron/db/rbac_db_models.py:27 #, python-format msgid "" @@ -2322,12 +2399,12 @@ msgid "" "actions: %(valid_actions)s" msgstr "" -#: neutron/db/securitygroups_db.py:271 neutron/db/securitygroups_db.py:612 +#: neutron/db/securitygroups_db.py:271 neutron/db/securitygroups_db.py:613 #, python-format msgid "cannot be deleted due to %s" msgstr "" -#: neutron/db/securitygroups_db.py:663 +#: neutron/db/securitygroups_db.py:673 msgid "Default security group" msgstr "" @@ -2353,68 +2430,109 @@ msgstr "" msgid "%s cannot be called while in offline mode" msgstr "" -#: neutron/db/migration/cli.py:44 +#: neutron/db/migration/cli.py:54 +#, python-format +msgid "Can be one of '%s'." +msgstr "" + +#: neutron/db/migration/cli.py:56 +msgid "(No services are currently installed)." +msgstr "" + +#: neutron/db/migration/cli.py:62 msgid "Neutron plugin provider module" msgstr "" -#: neutron/db/migration/cli.py:50 -#, python-format -msgid "The advanced service to execute the command against. Can be one of '%s'." +#: neutron/db/migration/cli.py:65 +msgid "The advanced service to execute the command against. " msgstr "" -#: neutron/db/migration/cli.py:54 +#: neutron/db/migration/cli.py:69 +#, python-format +msgid "The subproject to execute the command against. Can be one of %s." +msgstr "" + +#: neutron/db/migration/cli.py:73 msgid "Enforce using split branches file structure." msgstr "" -#: neutron/db/migration/cli.py:60 +#: neutron/db/migration/cli.py:79 msgid "Neutron quota driver class" msgstr "" -#: neutron/db/migration/cli.py:68 +#: neutron/db/migration/cli.py:87 msgid "URL to database" msgstr "" -#: neutron/db/migration/cli.py:71 +#: neutron/db/migration/cli.py:90 msgid "Database engine" msgstr "" -#: neutron/db/migration/cli.py:98 +#: neutron/db/migration/cli.py:101 +#, python-format +msgid "Running %(cmd)s for %(project)s ..." +msgstr "" + +#: neutron/db/migration/cli.py:107 +msgid "OK" +msgstr "" + +#: neutron/db/migration/cli.py:112 +#, python-format +msgid "Sub-project %s not installed." +msgstr "" + +#: neutron/db/migration/cli.py:128 msgid "You must provide a revision or relative delta" msgstr "" -#: neutron/db/migration/cli.py:102 +#: neutron/db/migration/cli.py:132 msgid "Negative relative revision (downgrade) not supported" msgstr "" -#: neutron/db/migration/cli.py:108 +#: neutron/db/migration/cli.py:138 msgid "Use either --delta or relative revision, not both" msgstr "" -#: neutron/db/migration/cli.py:111 +#: neutron/db/migration/cli.py:141 msgid "Negative delta (downgrade) not supported" msgstr "" -#: neutron/db/migration/cli.py:124 +#: neutron/db/migration/cli.py:154 msgid "Downgrade no longer supported" msgstr "" -#: neutron/db/migration/cli.py:181 +#: neutron/db/migration/cli.py:212 +#, python-format +msgid "Unexpected label for script %(script_name)s: %(labels)s" +msgstr "" + +#: neutron/db/migration/cli.py:261 #, python-format msgid "No new branches are allowed except: %s" msgstr "" -#: neutron/db/migration/cli.py:199 +#: neutron/db/migration/cli.py:279 #, python-format msgid "HEADS file does not match migration timeline heads, expected: %s" msgstr "" -#: neutron/db/migration/cli.py:250 +#: neutron/db/migration/cli.py:334 msgid "Available commands" msgstr "" -#: neutron/db/migration/cli.py:324 +#: neutron/db/migration/cli.py:350 #, python-format -msgid "Package neutron-%s not installed" +msgid "Failed to locate source for %s." +msgstr "" + +#: neutron/db/migration/cli.py:422 +#, python-format +msgid "Package %s not installed" +msgstr "" + +#: neutron/db/migration/cli.py:511 +msgid "Cannot specify both --service and --subproject." msgstr "" #: neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py:45 @@ -2919,13 +3037,13 @@ msgstr "" msgid "'%s' is not an integer or uuid" msgstr "" -#: neutron/extensions/securitygroup.py:269 +#: neutron/extensions/securitygroup.py:271 msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" -#: neutron/extensions/securitygroup.py:273 +#: neutron/extensions/securitygroup.py:275 msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." @@ -3008,6 +3126,20 @@ msgstr "" msgid "Unsupported request type" msgstr "" +#: neutron/objects/base.py:24 +#, python-format +msgid "Unable to update the following object fields: %(fields)s" +msgstr "" + +#: neutron/objects/base.py:28 +msgid "Failed to create a duplicate object" +msgstr "" + +#: neutron/objects/base.py:69 +#, python-format +msgid "'%s' is not supported for filtering" +msgstr "" + #: neutron/plugins/brocade/NeutronPlugin.py:61 #: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:22 #: neutron/services/l3_router/brocade/l3_router_plugin.py:23 @@ -3584,21 +3716,21 @@ msgstr "" msgid "Cannot delete network '%s' that is a member of a multi-segment network" msgstr "" -#: neutron/plugins/common/utils.py:44 +#: neutron/plugins/common/utils.py:47 #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "" -#: neutron/plugins/common/utils.py:49 +#: neutron/plugins/common/utils.py:52 msgid "End of tunnel range is less than start of tunnel range" msgstr "" -#: neutron/plugins/common/utils.py:59 +#: neutron/plugins/common/utils.py:62 #, python-format msgid "%s is not a valid VLAN tag" msgstr "" -#: neutron/plugins/common/utils.py:63 +#: neutron/plugins/common/utils.py:66 msgid "End of VLAN range is less than start of VLAN range" msgstr "" @@ -3996,16 +4128,16 @@ msgid "" "configured in type_drivers config option." msgstr "" -#: neutron/plugins/ml2/managers.py:99 +#: neutron/plugins/ml2/managers.py:101 msgid "network_type required" msgstr "" -#: neutron/plugins/ml2/managers.py:206 neutron/plugins/ml2/managers.py:215 +#: neutron/plugins/ml2/managers.py:208 neutron/plugins/ml2/managers.py:217 #, python-format msgid "network_type value '%s' not supported" msgstr "" -#: neutron/plugins/ml2/plugin.py:239 +#: neutron/plugins/ml2/plugin.py:246 msgid "binding:profile value too large" msgstr "" @@ -4014,6 +4146,11 @@ msgstr "" msgid "%(method)s failed." msgstr "" +#: neutron/plugins/ml2/common/exceptions.py:28 +#, python-format +msgid "Extension %(driver)s failed." +msgstr "" + #: neutron/plugins/ml2/drivers/type_flat.py:34 msgid "" "List of physical_network names with which flat networks can be created. " @@ -4229,15 +4366,15 @@ msgid "" "supports matching ARP headers." msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:52 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:53 msgid "Device not found" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:66 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:67 msgid "Device has no virtual functions" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:326 +#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:351 #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "" @@ -4286,22 +4423,22 @@ msgstr "" msgid "Unsupported network type %(net_type)s." msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:35 +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:36 msgid "" "Supported PCI vendor devices, defined by vendor_id:product_id according " "to the PCI ID Repository. Default enables support for Intel and Mellanox " "SR-IOV capable NICs" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:41 +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:42 msgid "SRIOV neutron agent is required for port binding" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:92 +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:95 msgid "Parsing supported pci_vendor_devs failed" msgstr "" -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:189 +#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:192 #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" @@ -4328,48 +4465,48 @@ msgid "" " daemon, i.e. value of 2 will double the request timeout each retry" msgstr "" -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:30 +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:27 msgid "HTTP URL of OpenDaylight REST interface." msgstr "" -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:32 +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:29 msgid "HTTP username for authentication" msgstr "" -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:34 +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:31 msgid "HTTP password for authentication" msgstr "" -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:36 +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:33 msgid "HTTP timeout in seconds." msgstr "" -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:38 +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:35 msgid "Tomcat session timeout in minutes." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:67 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:69 #, python-format msgid "Unable to retrieve port details for devices: %(devices)s " msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1675 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1732 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1689 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1746 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1711 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1768 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1714 +#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1771 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -4449,6 +4586,12 @@ msgstr "" msgid "Make the l2 agent run in DVR mode." msgstr "" +#: neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py:102 +msgid "" +"Reset flow table on start. Setting this to True will cause brief traffic " +"interruption." +msgstr "" + #: neutron/plugins/nec/config.py:33 msgid "Host to connect to." msgstr "" @@ -4634,122 +4777,51 @@ msgstr "" msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" msgstr "" -#: neutron/plugins/vmware/extensions/networkgw.py:100 -msgid "Cannot create a gateway with an empty device list" -msgstr "" - -#: neutron/plugins/vmware/extensions/networkgw.py:116 -#, python-format -msgid "Unexpected keys found in device description:%s" -msgstr "" - -#: neutron/plugins/vmware/extensions/networkgw.py:120 -#, python-format -msgid "%s: provided data are not iterable" -msgstr "" - -#: neutron/plugins/vmware/extensions/networkgw.py:127 -msgid "A connector type is required to create a gateway device" -msgstr "" - -#: neutron/plugins/vmware/extensions/networkgw.py:136 -#, python-format -msgid "Unknown connector type: %s" -msgstr "" - -#: neutron/plugins/vmware/extensions/networkgw.py:143 -msgid "Number of network gateways allowed per tenant, -1 for unlimited" -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:34 -msgid "Need to be admin in order to create queue called default" -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:38 -msgid "Default queue already exists." -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:42 -#, python-format -msgid "Invalid value for dscp %(data)s must be integer value between 0 and 63." -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:47 -msgid "The qos marking cannot be set to 'trusted' when the DSCP field is set" -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:52 -msgid "Invalid bandwidth rate, min greater than max." -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:56 -#, python-format -msgid "Invalid bandwidth rate, %(data)s must be a non negative integer." -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:61 -#, python-format -msgid "Queue %(id)s does not exist" -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:65 -msgid "Unable to delete queue attached to port." -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:69 -msgid "Port is not associated with lqueue" -msgstr "" - -#: neutron/plugins/vmware/extensions/qos.py:80 -#, python-format -msgid "'%s' must be a non negative integer." -msgstr "" - -#: neutron/quota/__init__.py:42 +#: neutron/quota/__init__.py:43 msgid "" "Resource name(s) that are supported in quota features. This option is now" " deprecated for removal." msgstr "" -#: neutron/quota/__init__.py:47 +#: neutron/quota/__init__.py:48 msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" -#: neutron/quota/__init__.py:51 +#: neutron/quota/__init__.py:52 msgid "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" -#: neutron/quota/__init__.py:55 +#: neutron/quota/__init__.py:56 msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" -#: neutron/quota/__init__.py:59 +#: neutron/quota/__init__.py:60 msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" -#: neutron/quota/__init__.py:63 +#: neutron/quota/__init__.py:64 msgid "Default driver to use for quota checks" msgstr "" -#: neutron/quota/__init__.py:66 +#: neutron/quota/__init__.py:67 msgid "" "Keep in track in the database of current resourcequota usage. Plugins " "which do not leverage the neutron database should set this flag to False" msgstr "" -#: neutron/quota/__init__.py:147 neutron/quota/__init__.py:152 +#: neutron/quota/__init__.py:148 neutron/quota/__init__.py:153 msgid "Access to this resource was denied." msgstr "" -#: neutron/server/__init__.py:38 +#: neutron/server/__init__.py:36 msgid "" "ERROR: Unable to find configuration file via the default search paths " "(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" -#: neutron/server/__init__.py:63 +#: neutron/server/__init__.py:61 #, python-format msgid "ERROR: %s" msgstr "" @@ -4760,55 +4832,55 @@ msgid "" "::[:default]" msgstr "" -#: neutron/services/provider_configuration.py:71 +#: neutron/services/provider_configuration.py:73 #, python-format msgid "Provider name is limited by 255 characters: %s" msgstr "" -#: neutron/services/provider_configuration.py:101 +#: neutron/services/provider_configuration.py:103 msgid "Invalid service provider format" msgstr "" -#: neutron/services/provider_configuration.py:109 +#: neutron/services/provider_configuration.py:111 #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" -#: neutron/services/provider_configuration.py:116 +#: neutron/services/provider_configuration.py:118 #, python-format msgid "Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s" msgstr "" -#: neutron/services/provider_configuration.py:131 +#: neutron/services/provider_configuration.py:133 #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" -#: neutron/services/provider_configuration.py:136 +#: neutron/services/provider_configuration.py:138 #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" -#: neutron/services/provider_configuration.py:141 +#: neutron/services/provider_configuration.py:143 #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" -#: neutron/services/provider_configuration.py:154 +#: neutron/services/provider_configuration.py:156 #, python-format msgid "Driver %s is not unique across providers" msgstr "" -#: neutron/services/provider_configuration.py:164 +#: neutron/services/provider_configuration.py:166 #, python-format msgid "Multiple default providers for service %s" msgstr "" -#: neutron/services/provider_configuration.py:175 +#: neutron/services/provider_configuration.py:177 #, python-format msgid "Multiple providers specified for service %s" msgstr "" @@ -4917,7 +4989,15 @@ msgstr "" msgid "An interface driver must be specified" msgstr "" -#: neutron/tests/base.py:115 +#: neutron/services/qos/notification_drivers/manager.py:22 +msgid "Drivers list to use to send the update notification" +msgstr "" + +#: neutron/services/qos/notification_drivers/manager.py:54 +msgid "A QoS driver must be specified" +msgstr "" + +#: neutron/tests/base.py:116 #, python-format msgid "Unknown attribute '%s'." msgstr "" @@ -4956,12 +5036,12 @@ msgid "" "operation." msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:444 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:476 #, python-format msgid "Deleting port %s" msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:445 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:477 #, python-format msgid "The port '%s' was deleted" msgstr "" @@ -4991,8 +5071,8 @@ msgid "" "network %(network)s with segments to bind %(segments_to_bind)s" msgstr "" -#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:950 -#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:967 +#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:1078 +#: neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py:1095 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000000..f2ac880939f --- /dev/null +++ b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,1108 @@ +# Translations template for neutron. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +# ADİL REŞİT DURSUN , 2015 +# Alper Çiftçi , 2015 +# Zana iLHAN , 2015 +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-08-21 06:08+0000\n" +"PO-Revision-Date: 2015-08-20 15:49+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/neutron/" +"language/tr_TR/)\n" +"Language: tr_TR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.0\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#, python-format +msgid "%(method)s to %(url)s, unexpected response code: %(status)d" +msgstr "%(url)s ye %(method)s, beklenmedik yanıt kodu: %(status)d" + +#, python-format +msgid "" +"%(service)s for %(resource_type)s with uuid %(uuid)s not found. The process " +"should not have died" +msgstr "" +"uuid %(uuid)s ile %(resource_type)s için %(service)s bulunamadı!, İşlem " +"sonlanmamış olmalı." + +#, python-format +msgid "%s Agent terminated!" +msgstr "%s Ajanı sonlandırıldı!" + +#, python-format +msgid "%s failed" +msgstr "%s başarısız" + +#, python-format +msgid "" +"%s used in config as ipv6_gateway is not a valid IPv6 link-local address." +msgstr "" +"ipv6_gateway geçerli bir IPv6 link-local adresi olmadığından yapılandırmada " +"%s kullanıldı." + +#, python-format +msgid "" +"'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." +msgstr "" +"henüz start_rpc_listeners implemente edilmediği için 'rpc_workers = %d' göz " +"ardı edildi." + +#, python-format +msgid "" +"Add interface in the rollback of a remove_router_interface operation failed " +"%s" +msgstr "" +"Bir remove_router_interface işleminin geri dönüşünde arayüz ekleme başarısız " +"%s" + +msgid "Address not present on interface" +msgstr "Adres arayüzde mevcut değil" + +msgid "Agent Initialization Failed" +msgstr "Ajan İlklendirme Başarısız" + +msgid "Agent failed to create agent config map" +msgstr "Ajan ajan yapılandırma haritasını oluşturmada başarısız" + +#, python-format +msgid "An error occurred while communicating with async process [%s]." +msgstr "[%s] asenkron işlem ile haberleşirken bir hata oluştu." + +#, python-format +msgid "An error occurred while killing [%s]." +msgstr "[%s] sonlandırılırken bir hata oluştu." + +#, python-format +msgid "An exception occurred while creating the %(resource)s:%(item)s" +msgstr "%(resource)s:%(item)s oluşturulurken bir istisna oluştu" + +msgid "An interface driver must be specified" +msgstr "Bir arayüz sürücüsü belirtmeniz gerekmektedir" + +#, python-format +msgid "Binding info for DVR port %s not found" +msgstr "DVR bağlantı noktası %s için bağlama bilgisi bulunamadı" + +#, python-format +msgid "" +"Bridge %(bridge)s for physical network %(physical_network)s does not exist. " +"Agent terminated!" +msgstr "" +"%(physical_network)s fiziksel ağı için %(bridge)s köprüsü mevcut değil. Ajan " +"sonlandırıldı!" + +#, python-format +msgid "Bridge %s does not exist" +msgstr "Köprü %s mevcut değil" + +msgid "Brocade NOS driver error" +msgstr "Brocade NOS sürücüsü hatası" + +#, python-format +msgid "Cannot delete bridge %s, does not exist" +msgstr "%s köprüsü silinemiyor, mevcut değil" + +msgid "Cannot have multiple IPv4 subnets on router port" +msgstr "Yönlendirici bağlantı noktasında birden fazla IPv4 alt ağı olamaz" + +#, python-format +msgid "" +"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " +"tunneling disabled" +msgstr "" +"net-id=%(net_uuid)s için %(network_type)s ağı hazırlanamıyor - tünelleme " +"kapalı" + +#, python-format +msgid "" +"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" +"net-id=%(net_uuid)s için VLAN ağı hazırlanamıyor - physical_network " +"%(physical_network)s için köprü yok" + +#, python-format +msgid "" +"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" +"net-id=%(net_uuid)s için düz ağ hazırlanamıyor - physical_network " +"%(physical_network)s için köprü yok" + +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for net-id=" +"%(net_uuid)s" +msgstr "" +"net-id=%(net_uuid)s için %(network_type)s bilinmeyen ağ türü hazırlanamıyor" + +#, python-format +msgid "" +"Cannot reclaim unknown network type %(network_type)s for net-id=%(net_uuid)s" +msgstr "" +"net-id=%(net_uuid)s için bilinmeyen ağ türü %(network_type)s iadesi " +"istenemiyor" + +msgid "Cannot run ebtables. Please ensure that it is installed." +msgstr "ebtables çalıştırılamadı. Lütfen kurulu olduğundan emin olun." + +#, python-format +msgid "" +"Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on a " +"different subnet %(orig_subnet)s" +msgstr "" +"%(port_subnet)s alt ağındaki merkezi-SNAT %(port)s bağlantı noktası başka " +"bir alt ağda %(orig_subnet)s görüldü" + +msgid "" +"Check for Open vSwitch ARP responder support failed. Please ensure that the " +"version of openvswitch being used has ARP flows support." +msgstr "" +"Open vSwitch ARP yanıtlayıcısı desteği kontrolü başarısız. Lütfen kullanılan " +"openvswitch sürümünün ARP akışı desteği olduğundan emin olun." + +msgid "" +"Check for Open vSwitch VXLAN support failed. Please ensure that the version " +"of openvswitch being used has VXLAN support." +msgstr "" +"Open vSwitch VXLAN desteği kontrolü başarısız. Lütfen kullanılan openvswitch " +"sürümünün VXLAN desteği olduğundan emin olun." + +msgid "" +"Check for Open vSwitch patch port support failed. Please ensure that the " +"version of openvswitch being used has patch port support or disable features " +"requiring patch ports (gre/vxlan, etc.)." +msgstr "" +"Open vSwitch yama bağlantı noktası desteği kontrolü başarısız. Lütfen " +"kullanılan openvswitch sürümünün yama bağlantı noktası desteği olduğundan " +"emin olun ya da yama bağlantı noktalarına ihtiyaç duyan özellikleri kapatın " +"(gre/vxlan, vs.)." + +msgid "" +"Check for Open vSwitch support of ARP header matching failed. ARP spoofing " +"suppression will not work. A newer version of OVS is required." +msgstr "" +"Open vSwitch ARP başlığı eşleşme desteği kontrolü başarısız. ARP yanıltma " +"önleme çalışmayacak. Daha yeni sürüm OVS gerekiyor." + +msgid "" +"Check for VF management support failed. Please ensure that the version of ip " +"link being used has VF support." +msgstr "" +"VF yönetim desteği kontrolü başarısız. Lütfen kullanılan ip bağlantısı " +"sürümünün VF desteği olduğundan emin olun." + +msgid "" +"Check for iproute2 VXLAN support failed. Please ensure that the iproute2 has " +"VXLAN support." +msgstr "" +"Iproute2 VXLAN desteği kontrolü başarısız. iproute2'nin VXLAN desteği " +"olduğundan emin olun." + +msgid "Check for native OVSDB support failed." +msgstr "Doğal OVSDB desteği kontrolü başarısız." + +#, python-format +msgid "Connect failed to switch: %s" +msgstr "Anahtara bağlantı başarısız: %s" + +#, python-format +msgid "Could not delete %(res)s %(id)s." +msgstr "%(res)s %(id)s silinemedi." + +#, python-format +msgid "Could not find %s to delete." +msgstr "%s silmek için bulunamadı." + +#, python-format +msgid "Could not parse: %s" +msgstr "%s çözümlenemiyor." + +#, python-format +msgid "Could not retrieve gateway port for subnet %s" +msgstr "Alt ağ %s için geçit bağlantı noktası alınamadı" + +#, python-format +msgid "Create floating ip failed with error %s" +msgstr "Değişken ip oluşturma %s hatasıyla başarısız" + +#, python-format +msgid "Create router failed in SDN-VE with error %s" +msgstr "SDN-VE'de yönlendirici oluşturma başarısız hata %s" + +#, python-format +msgid "DVR: Duplicate DVR router interface detected for subnet %s" +msgstr "DVR: %s alt ağı için çift DVR yönlendirici arayüzü algılandı" + +msgid "" +"DVR: Failed to obtain a valid local DVR MAC address - L2 Agent operating in " +"Non-DVR Mode" +msgstr "" +"DVR: Geçerli yerel DVR MAC adresi elde etme başarısız - L2 Ajan Non-DVR " +"kipinde işletiliyor" + +msgid "DVR: Failed updating arp entry" +msgstr "DVR: arp kayıt güncelleme hatası" + +#, python-format +msgid "DVR: Unable to retrieve subnet information for subnet_id %s" +msgstr "DVR: %s subnet_id için alt ağ bilgisi getirilemedi" + +msgid "DVR: error adding redirection logic" +msgstr "DVR: yönlendirme mantığı ekleme hatası" + +msgid "DVR: no map match_port found!" +msgstr "DVR: map match_port bulunamadı!" + +msgid "DVR: removed snat failed" +msgstr "DVR: kaldırılan snat hatası" + +#, python-format +msgid "Delete floatingip failed in SDN-VE: %s" +msgstr "Değişken ip silme SDN-VE'de başarısız oldu: %s" + +#, python-format +msgid "Delete net failed after deleting the network in DB: %s" +msgstr "Ağ DB'den silindikten sonra ağ silme başarısız oldu: %s" + +#, python-format +msgid "" +"Delete port operation failed in SDN-VE after deleting the port from DB: %s" +msgstr "" +"Bağlantı noktası DB'den silindikten sonra SDN-VE içinde bağlantı noktası " +"silme başarısız oldu: %s" + +#, python-format +msgid "" +"Delete router operation failed in SDN-VE after deleting the router in DB: %s" +msgstr "" +"Yönlendirici silme işlemi SDN-VE'de yönlendirici DB'den silindikten sonra " +"başarısız oldu: %s" + +#, python-format +msgid "" +"Delete subnet operation failed in SDN-VE after deleting the subnet from DB: " +"%s" +msgstr "" +"Alt ağ silme işlemi SDN-VE'de alt ağ DB'den silindikten sonra başarısız " +"oldu: %s" + +#, python-format +msgid "Deleting newly created neutron port %s" +msgstr "Yeni oluşturulmuş neutron bağlantısı %s siliniyor" + +#, python-format +msgid "Did not find tenant: %r" +msgstr "Kiracı bulunamadı: %r" + +#, python-format +msgid "Driver %(driver)s does not implement %(func)s" +msgstr "Sürücü %(driver)s %(func)s yi uygulamıyor" + +#, python-format +msgid "Driver %(driver)s:%(func)s runtime error" +msgstr "Sürücü %(driver)s:%(func)s çalışma zamanı hatası" + +msgid "Duplicate adddress detected" +msgstr "Çift adres algılandı" + +#, python-format +msgid "Error during notification for %(callback)s %(resource)s, %(event)s" +msgstr "%(callback)s %(resource)s için bilgilendirme sırasında hata, %(event)s" + +msgid "Error executing command" +msgstr "Komut çalıştırırken hata" + +#, python-format +msgid "Error fetching extended attributes for extension '%s'" +msgstr " '%s' uzantısına dair özellikler getirilirken hata oluştu." + +#, python-format +msgid "Error importing interface driver '%(driver)s': %(inner)s" +msgstr " '%(driver)s': %(inner)s arayüz sürücüsü dahil edilirken hata oluştu" + +msgid "Error in agent event loop" +msgstr "Ajan olay döngüsünde hata" + +#, python-format +msgid "Error in agent loop. Devices info: %s" +msgstr "Ajan döngüsünde hata. Aygıt bilgisi: %s" + +#, python-format +msgid "Error loading plugin by class, %s" +msgstr "Plugin yüklenirken %s sınıfı tarafından bir hata oluştu!" + +#, python-format +msgid "Error loading plugin by name, %s" +msgstr "%s isimli plugin yüklenirken bir hata oluştu!" + +#, python-format +msgid "Error loading provider '%(provider)s' for service %(service_type)s" +msgstr "" +"%(service_type)s servisi için '%(provider)s' sağlayıcısını yüklemede hata" + +#, python-format +msgid "Error received from ovsdb monitor: %s" +msgstr "ovsdb monitör den hata alındı: %s" + +#, python-format +msgid "Error response returned from nova: %s" +msgstr "Nova'dan hata yanıtı döndü: %s" + +#, python-format +msgid "Error unable to destroy namespace: %s" +msgstr "Hata, isim uzayı: %s silinemedi" + +#, python-format +msgid "Error while deleting router %s" +msgstr "Yönlendirici %s silinirken hata" + +#, python-format +msgid "Error while handling pidfile: %s" +msgstr "%s pid dosyası işlenirken bir hata oluştu" + +msgid "Error while processing VIF ports" +msgstr "VIF bağlantı noktaları işlenirken hata" + +msgid "Error while synchronizing tunnels" +msgstr "Tüneller eş zamanlanırken hata" + +#, python-format +msgid "Error while writing HA state for %s" +msgstr "%s için HA durumu yazılırken hata" + +msgid "Error, plugin is not set" +msgstr "Hata, eklenti ayarlanmamış" + +#, python-format +msgid "Error, unable to destroy IPset: %s" +msgstr "Hata, IPset: %s silinemedi" + +#, python-format +msgid "Error, unable to remove iptables rule for IPset: %s" +msgstr "Hata, IPset: %s için iptables kuralı kaldırılamıyor" + +#, python-format +msgid "Error: Could not reach server: %(url)s Exception: %(excp)s." +msgstr "Hata. Sunucuya erişilemedi: %(url)s İstisna: %(excp)s." + +#, python-format +msgid "" +"Exceeded %s second limit waiting for address to leave the tentative state." +msgstr "" +"Adresin belirsiz durumdan çıkması için %s saniye bekleme sınırı aşıldı." + +#, python-format +msgid "" +"Exceeded maximum binding levels attempting to bind port %(port)s on host " +"%(host)s" +msgstr "" +"%(host)s istemcisi üzerinde %(port)s bağlantı noktasına bağlanma girişiminde " +"azami bağlanma seviyesi aşıldı" + +#, python-format +msgid "Exception auto-deleting port %s" +msgstr "%s bağlanı noktasını otomatik silme sırasında istisna" + +#, python-format +msgid "Exception auto-deleting subnet %s" +msgstr "%s alt ağını otomatik silme sırasında istisna" + +#, python-format +msgid "Exception deleting fixed_ip from port %s" +msgstr "%s bağlantı noktasından fixed_ip silinirken istisna" + +msgid "Exception encountered during network rescheduling" +msgstr "Ağ yeniden zamanlama sırasında istisna oluştu" + +msgid "Exception encountered during router rescheduling." +msgstr "Yönlendirici yeniden zamanlama sırasında istisna oluştu." + +#, python-format +msgid "Exception loading extension: %s" +msgstr "Uzantı yükleme hatası: %s" + +msgid "Exception occurs when timer stops" +msgstr "Zamanlayıcı durmaya çalışırken hata oluşur." + +msgid "Exception occurs when waiting for timer" +msgstr "Zamanlayıcıyı beklerken hata oluşur" + +msgid "Exiting agent as programmed in check_child_processes_actions" +msgstr "" +"check_child_processes_actions deki programlanan ajan/işlevden çıkılıyor " + +#, python-format +msgid "" +"Exiting agent because of a malfunction with the %(service)s process " +"identified by uuid %(uuid)s" +msgstr "" +"%(uuid)s ile tanımlanan %(service)s işlemlerden bir uyumsuzluk hatasından " +"dolayı çıkılıyor" + +#, python-format +msgid "Extension driver '%(name)s' failed in %(method)s" +msgstr "Eklenti sürücüsü '%(name)s' %(method)s içerisinde başarısız" + +#, python-format +msgid "Extension path '%s' doesn't exist!" +msgstr "'%s' Uzantı dizini bulunamıyor." + +#, python-format +msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" +msgstr "fw: %(fwid)s için %(func_name)s içinde FWaaS RPC hatası" + +#, python-format +msgid "FWaaS RPC info call failed for '%s'." +msgstr "'%s' için FWaaS RPC bilgi çağrısı başarısız" + +#, python-format +msgid "Failed creating vxlan interface for %(segmentation_id)s" +msgstr "%(segmentation_id)s için vxlan arayüzü oluşturma başarısız" + +#, python-format +msgid "Failed deleting egress connection state of floatingip %s" +msgstr "" +"%s floatingip bağlantısının çıkış sevye durumu silinmeye çalışılırken bir " +"hata ile karşılaştı." + +#, python-format +msgid "Failed deleting ingress connection state of floatingip %s" +msgstr "" +"%s floatingip bağlantısının giris sevye durumu silinmeye çalışılırken bir " +"hata ile karşılaştı." + +msgid "Failed executing ip command" +msgstr "IP comutu çalıştırılamadı" + +msgid "Failed fwaas process services sync" +msgstr "fwaas süreç servisleri eş zamanlama başarısız" + +msgid "Failed on Agent configuration parse. Agent terminated!" +msgstr "Ajan yapılandırma aşamasında başarısız olundu. Ajan sonlandırıldı!" + +#, python-format +msgid "Failed on Agent initialisation : %s. Agent terminated!" +msgstr "Ajan ilklendirme başarısız: %s. Ajan sonlandırıldı!" + +msgid "Failed reporting state!" +msgstr "Raporlama durumu sağlanamıyor." + +#, python-format +msgid "" +"Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" +msgstr "" +"%(ns)s bilinirlik alanında bulunan %(iface)s deki %(addr)s ne gereksiz/ ARP " +"gönderilemedi." + +msgid "Failed synchronizing routers" +msgstr "Yönlendiricileri eş zamanlama başarısız" + +msgid "Failed synchronizing routers due to RPC error" +msgstr "RPC hatasından dolayı yönlendirici senkronizasyonu sağlanamıyor" + +#, python-format +msgid "Failed to bind port %(port)s on host %(host)s" +msgstr "" +"%(host)s istemcisi üzerindeki %(port)s bağlantı noktasına bağlanılamadı" + +#, python-format +msgid "Failed to commit binding results for %(port)s after %(max)s tries" +msgstr "" +"%(port)s için bağlama sonuçlarını gönderme %(max)s denemeden sonra başarısız " +"oldu" + +msgid "" +"Failed to create OVS patch port. Cannot have tunneling enabled on this " +"agent, since this version of OVS does not support tunnels or patch ports. " +"Agent terminated!" +msgstr "" +"OVS yama bağlantı noktası oluşturma başarısız. Bu ajanda tünelleme " +"etkinleştirilemez, çünkü bu OVS sürümü tünelleri ya da yama bağlantı " +"noktalarını desteklemiyor. Ajan sonlandırıldı!" + +msgid "Failed to create floatingip" +msgstr "Değişken ip oluşturma başarısız" + +msgid "Failed to create router" +msgstr "Yönlendirici oluşturma başarısız" + +msgid "Failed to create subnet, deleting it from neutron" +msgstr "Alt ağ oluşturma başarısız, neutron'dan siliniyor" + +#, python-format +msgid "Failed to destroy stale namespace %s" +msgstr "Vadesi geçmiş isim uzayı %s silinemedi" + +#, python-format +msgid "Failed to fetch router information for '%s'" +msgstr "%s icin yönlendirici bilgisine erisilemiyor" + +#, python-format +msgid "Failed to get devices for %s" +msgstr "%s için aygıtları alma başarısız" + +#, python-format +msgid "Failed to get traffic counters, router: %s" +msgstr "Trafik sayaçları alınamadı, yönlendirici: %s" + +#, python-format +msgid "" +"Failed to import required modules. Ensure that the python-openvswitch " +"package is installed. Error: %s" +msgstr "" +"Gerekli modülleri içe aktarma başarısız. python-openvswitch paketinin kurulu " +"olduğuna emin olun. Hata: %s" + +#, python-format +msgid "Failed to notify nova on events: %s" +msgstr "Nova şu olaylar üzerine bilgilendirilemiyor: %s" + +msgid "Failed to parse network_vlan_ranges. Service terminated!" +msgstr "network_vlan_ranges ayrıştırma başarısız. Servis sonlandırıldı!" + +msgid "Failed to parse supported PCI vendor devices" +msgstr "Desteklenen PCI satıcı aygıtları ayrıştırma başarısız" + +msgid "Failed to parse tunnel_id_ranges. Service terminated!" +msgstr "tunnel_id_ranges ayrıştırma başarısız. Servis sonlandırıldı!" + +msgid "Failed to parse vni_ranges. Service terminated!" +msgstr "vni_ranges ayrıştırma başarısız. Servis sonlandırıldı!" + +#, python-format +msgid "Failed to process compatible router '%s'" +msgstr "Uyumlu '%s' yönlendirici bilgisi işlenemiyor" + +#, python-format +msgid "Failed to process or handle event for line %s" +msgstr "%s satırı için olay ele alınamıyor ya da işlenemiyor" + +#, python-format +msgid "Failed to release segment '%s' because network type is not supported." +msgstr "'%s' dilimi bırakılamadı çünkü ağ türü desteklenmiyor." + +#, python-format +msgid "Failed to reschedule router %s" +msgstr "Yönlendirici %s yeniden zamanlama başarısız" + +#, python-format +msgid "Failed to schedule network %s" +msgstr "Ağ %s zamanlama başarısız" + +#, python-format +msgid "Failed to set device %s state" +msgstr "%s aygıtı durumu ayarlama başarısız" + +#, python-format +msgid "Failed to set-up %(type)s tunnel port to %(ip)s" +msgstr "%(ip)s'ye %(type)s tünel bağlantı noktası kurulumu başarısız" + +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "Bilinirlik alanı silme hatası: %s" + +#, python-format +msgid "Failed unplugging interface '%s'" +msgstr "%s arayuzu devre dışı bırakılamadı." + +msgid "Failure applying iptables rules" +msgstr "Iptables kuralları uygulanırken başarısız olundu" + +#, python-format +msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" +msgstr "fw: %(fwid)s için %(func_name)s için Güvenlik Duvarı Hatası" + +#, python-format +msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" +msgstr "" +"fw: %(fwid)s için %(fwmsg)s fw durumunda Güvenlik Duvarı Sürücüsü Hatası" + +msgid "Fork failed" +msgstr "Fork yapılırken hata ile karşılaşıldı." + +#, python-format +msgid "" +"IPTablesManager.apply failed to apply the following set of iptables rules:\n" +"%s" +msgstr "" +"IPTablesManager.apply aşağıdakı iptables bilgileri uygulanamadı\n" +"%s" + +#, python-format +msgid "" +"Interface %(interface)s for physical network %(physical_network)s does not " +"exist. Agent terminated!" +msgstr "" +"%(physical_network)s fiziksel ağı için %(interface)s arayüzü mevcut değil. " +"Ajan sonlandırıldı!" + +msgid "Interface monitor is not active" +msgstr "Arayüz izleme etkin değil" + +msgid "Internal error" +msgstr "İçsel hata" + +#, python-format +msgid "InvalidContentType: %s" +msgstr "UyumsuzİçerikTipi: %s" + +#, python-format +msgid "" +"It was impossible to process the following extensions: %s because of missing " +"requirements." +msgstr "" +"Belirtilen uzantılar çalıştırılması mümkün olamıyor: %s dair eksik " +"ihtiyaclardan dolayı." + +#, python-format +msgid "Login Failed: %s" +msgstr "Giriş Başarısız: %s" + +#, python-format +msgid "MAC generation error after %s attempts" +msgstr "%s denemeden sonra MAC üretme hatası" + +#, python-format +msgid "MalformedRequestBody: %s" +msgstr "BozukİstekGövdesi: %s" + +#, python-format +msgid "Mechanism driver %s failed in bind_port" +msgstr "Mekanizma sürücüsü %s bind_port başarısız" + +#, python-format +msgid "Mechanism driver '%(name)s' failed in %(method)s" +msgstr "Mekanizma sürücüsü '%(name)s' %(method)s içinde başarısız oldu" + +#, python-format +msgid "" +"Message received from the host: %(host)s during the registration of " +"%(agent_name)s has a timestamp: %(agent_time)s. This differs from the " +"current server timestamp: %(serv_time)s by %(diff)s seconds, which is more " +"than the threshold agent downtime: %(threshold)s." +msgstr "" +"%(agent_name)s kaydı sırasında %(host)s istemcisinden alınan iletinin " +"%(agent_time)s zaman damgası var. Bu mevcut sunucu zaman damgası: " +"%(serv_time)s ile %(diff)s saniye farklı, ki bu %(threshold)s eşik ajan " +"aksama süresinden fazla." + +msgid "Missing subnet/agent_gateway_port" +msgstr "Eksik subnet/agent_gateway_port" + +#, python-format +msgid "Multiple ports have port_id starting with %s" +msgstr "Birden çok bağlantı noktası %s port_id ile başlıyor" + +#, python-format +msgid "NETCONF error: %s" +msgstr "NETCONF hatası: %s" + +#, python-format +msgid "Network %s has no segments" +msgstr "%s ağının dilimi yok" + +#, python-format +msgid "Network %s info call failed." +msgstr " %s ağ bilgi çağırısı yapılamıyor." + +#, python-format +msgid "" +"No FloatingIP agent gateway port returned from server for 'network-id': %s" +msgstr "" +"Sunucudan 'network-id': %s için DeğişkenIP ajan geçit bağlantı noktası " +"dönmedi" + +#, python-format +msgid "No Host supplied to bind DVR Port %s" +msgstr "%s DVR Bağlantı noktasına bağlanma için istemci sağlanmadı" + +msgid "No known API applications configured." +msgstr "Hiçi bir tanımlı API uygulaması konfigüre edilmedi." + +#, python-format +msgid "No local VLAN available for net-id=%s" +msgstr "net-id=%s için uygun yerel VLAN yok" + +#, python-format +msgid "No mapping for physical network %s" +msgstr "%s fiziksel ağı için eşleştirme yok" + +msgid "No plugin for L3 routing registered to handle router scheduling" +msgstr "" +"Yönlendirici zamanlamayı işlemesi için L3 yönlendirme için kaydedilmiş " +"eklenti yok" + +msgid "" +"No plugin for L3 routing registered! Will reply to l3 agent with empty " +"router dictionary." +msgstr "" +"L3 yönlendirme için kaydedilmiş eklenti yok. l3 ajanına boş yönlendirici " +"sözlüğüyle yanıt verilecek." + +#, python-format +msgid "" +"No plugin for L3 routing registered. Cannot notify agents with the message %s" +msgstr "" +"L3 yönlendirme için eklenti kaydedilmemiş. Ajanlar %s iletisiyle " +"bilgilendirilemiyor" + +msgid "No tunnel_ip specified, cannot delete tunnels" +msgstr "tunnel_ip belirtilmemiş, tüneller silinemiyor" + +msgid "No tunnel_type specified, cannot create tunnels" +msgstr "tunnel_type belirtilmemiş, tünel oluşturulamıyor" + +msgid "No tunnel_type specified, cannot delete tunnels" +msgstr "tunnel_type belirtilmemiş, tüneller silinemiyor" + +#, python-format +msgid "No type driver for external network_type: %s. Service terminated!" +msgstr "Harici network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!" + +#, python-format +msgid "No type driver for tenant network_type: %s. Service terminated!" +msgstr "Kiracı network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!" + +msgid "No valid Segmentation ID to perform UCAST test." +msgstr "UCAST testi yapmak için geçerli Dilimlendirme ID'si yok." + +#, python-format +msgid "Not enough candidates, a HA router needs at least %s agents" +msgstr "Yeterli aday yok, bir HA yönlendirici en az %s ajana ihtiyaç duyar" + +msgid "" +"Nova notifications are enabled, but novaclient is not installed. Either " +"disable nova notifications or install python-novaclient." +msgstr "" +"Nova iletileri etkin, ama novaclient kurulu değil. Ya nova iletilerini " +"kapatın ya da python-novaclient kurun." + +#, python-format +msgid "OVS flows could not be applied on bridge %s" +msgstr "OVS akışları %s köprüsüne uygulanamıyor." + +#, python-format +msgid "OVSDB Error: %s" +msgstr "OVSDB Hatası: %s" + +#, python-format +msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" +msgstr "" +"physical_interface_mappings ayrıştırma başarısız: %s. Ajan sonlandırıldı!" + +#, python-format +msgid "Pidfile %s already exist. Daemon already running?" +msgstr "%s Pid zaten mevcut. Servis zaten calisiyor?" + +#, python-format +msgid "Policy check error while calling %s!" +msgstr "%s cağrılırken politika doğrulama hatası oluştu!" + +#, python-format +msgid "Port %(port)s does not exist on %(bridge)s!" +msgstr "Bağlantı noktası %(port)s %(bridge)s köprüsü üzerinde mevcut değil!" + +#, python-format +msgid "Port %s does not exist" +msgstr "Bağlantı noktası %s mevcut değil" + +#, python-format +msgid "" +"Refusing to bind port for segment ID %(id)s, segment %(seg)s, phys net " +"%(physnet)s, and network type %(nettype)s" +msgstr "" +"Dilim ID %(id)s, dilim %(seg)s, fiziksel ağ %(physnet)s, ve ağ türü " +"%(nettype)s için bağlantı noktası bağlantısı reddedildi" + +#, python-format +msgid "Removing incompatible router '%s'" +msgstr "Uygunsuz '%s' yönlendirici bilgisi kaldırılıyor" + +#, python-format +msgid "" +"Request %(method)s %(uri)s body = %(body)s failed with status %(status)s. " +"Reason: %(reason)s)" +msgstr "" +"İstek %(method)s %(uri)s body = %(body)s %(status)s durumu ile başarısız " +"oldu. Sebep: %(reason)s)" + +#, python-format +msgid "Request failed from Controller side with Status=%s" +msgstr "İstek kontrolcü tarafından Durum=%s ile başarısız oldu" + +#, python-format +msgid "Response is Null, Request timed out: %(method)s to %(uri)s" +msgstr "Yanıt NULL, İstek zaman aşımına uğradı: %(uri)s ye %(method)s" + +msgid "Retrying after 1 second..." +msgstr "1 saniye sonra tekrar deneniyor..." + +msgid "Router id is required if not using namespaces." +msgstr "" +"Eğer tanım alanı kullanmıyorsanız bir yönlendirici bilgisi belirtmeniz " +"gerekmektedir, " + +msgid "RuntimeError in obtaining namespace list for namespace cleanup." +msgstr "" +"İsim uzayı temizliği için isim uzayı listesi elde edilirken RuntimeError." + +msgid "" +"SdnvePluginV2._add_router_interface_only: failed to add the interface in the " +"roll back. of a remove_router_interface operation" +msgstr "" +"SdnvePluginV2._add_router_interface_only: arayüzün bir " +"remove_router_interface işleminden geri dönüş içinde eklenmesi başarısız oldu" + +#, python-format +msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" +msgstr "" +"%(port)s bağlantı noktası için serileştirilmiş profil DB değeri '%(value)s' " +"geçersiz" + +#, python-format +msgid "" +"Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" +msgstr "" +"%(port)s bağlantı noktası için serileştirilmiş vif_details DB değeri " +"'%(value)s' geçersiz" + +#, python-format +msgid "The external network bridge '%s' does not exist" +msgstr "%s harici ağ geçidi mevcut degil" + +#, python-format +msgid "The following routers have not physical match: %s" +msgstr "Şu yönlendiriciler fiziksel eşleşmeye sahip değil: %s" + +#, python-format +msgid "" +"The installed version of dnsmasq is too old. Please update to at least " +"version %s." +msgstr "Yüklü dnsmasq sürümü çok eski. Lütfen en az %s sürümüne güncelleyin." + +msgid "" +"The user that is executing neutron does not have permissions to read the " +"namespaces. Enable the use_helper_for_ns_read configuration option." +msgstr "" +"Neutron'u çalıştıran kullanıcının isim uzaylarını okuma yetkisi yok. " +"use_helper_for_ns_read yapılandırma seçeneğini etkinleştirin." + +#, python-format +msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" +msgstr "" +"%(pname)s portu üzerindeki ofportların çekilmesi zamana aşımına uğrandı. " +"Hata: %(exception)s" + +#, python-format +msgid "" +"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s' is " +"already registered for type '%(type)s'" +msgstr "" +"Tür sürücüsü '%(new_driver)s' atlandı çünkü tür sürücüsü '%(old_driver)s' " +"'%(type)s' türü için zaten kaydedilmiş" + +#, python-format +msgid "Unable to %(action)s dhcp for %(net_id)s." +msgstr "%(net_id)s için %(action)s dhcp de yapılamıyor. " + +#, python-format +msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" +msgstr "%(interface)s %(bridge_name)s e eklenemedi. İstisna: %(e)s" + +#, python-format +msgid "Unable to add vxlan interface for network %s" +msgstr "%s ağı için vxlan arayüzü eklenemedi" + +#, python-format +msgid "Unable to convert value in %s" +msgstr "%s degeri dönüştürülemiyor" + +#, python-format +msgid "Unable to delete port '%(pname)s' on switch. Exception: %(exp)s" +msgstr "" +"Anahtar üzerindeki '%(pname)s' bağlantı noktası silinemiyor. İstisna: %(exp)s" + +#, python-format +msgid "Unable to establish connection with Controller %s" +msgstr "Kontrolcü %s ile bağlantı kurulamadı" + +#, python-format +msgid "Unable to execute %(cmd)s." +msgstr " %(cmd)s çalıştırılamıyor." + +#, python-format +msgid "Unable to execute %(cmd)s. Exception: %(exception)s" +msgstr "%(cmd)s çalıştırılamadı. Hata: %(exception)s" + +#, python-format +msgid "Unable to find agent %s." +msgstr "%s ajanı bulunamıyor." + +#, python-format +msgid "Unable to generate mac address after %s attempts" +msgstr "%s denemeden sonra mac adresi üretilemedi" + +#, python-format +msgid "Unable to listen on %(host)s:%(port)s" +msgstr "%(host)s:%(port)s dinlenemiyor" + +msgid "Unable to obtain MAC address for unique ID. Agent terminated!" +msgstr "Benzersiz ID için MAC adresi elde edilemedi. Ajan sonlandırıldı!" + +#, python-format +msgid "Unable to parse route \"%s\"" +msgstr "\"%s\" rotası ayrıştırılamadı" + +#, python-format +msgid "Unable to process HA router %s without HA port" +msgstr "HA bağlantısı olmadan HA yönlendiricisi %s işlenemiyor" + +#, python-format +msgid "Unable to sync network state on deleted network %s" +msgstr "Silinmiş %s ağları için senkronizasyon sağlanamıyor" + +msgid "Unable to sync network state." +msgstr "Ağ durumu senkronize edilemiyor." + +#, python-format +msgid "Unable to undo add for %(resource)s %(id)s" +msgstr "%(resource)s %(id)s için ekleme geri alınamıyor" + +msgid "Unexpected error." +msgstr "Beklenmeyen hata." + +#, python-format +msgid "" +"Unexpected exception occurred while removing network %(net)s from agent " +"%(agent)s" +msgstr "" +"%(net)s ağı %(agent)s ajanından kaldırılırken beklenmedik istisna oluştu" + +#, python-format +msgid "Unexpected exception while checking supported feature via command: %s" +msgstr "" +"Şu komutla desteklenen özellik kontrolü yapılırken beklenmedik istisna: %s" + +msgid "Unexpected exception while checking supported ip link command" +msgstr "Desteklenen ip bağlantısı komutu kontrol edilirken beklenmedik istisna" + +msgid "Unhandled exception occurred" +msgstr "Ele alınmayan istisna oluştu" + +#, python-format +msgid "Unknown network_type %(network_type)s for network %(network_id)s." +msgstr "%(network_id)s ağı için bilinmeyen network_type %(network_type)s." + +msgid "Unrecoverable error: please check log for details." +msgstr "Düzeltilemeyen hata: Lütfen detaylar için loglara bakınız." + +#, python-format +msgid "Update floating ip failed with error %s" +msgstr "Değişken ip güncelleme %s hatasıyla başarısız oldu" + +#, python-format +msgid "Update router failed in SDN-VE with error %s" +msgstr "Yönlendirici güncelleme SDN-VE'de %s hatasıyla başarısız oldu" + +#, python-format +msgid "Update router-add-interface failed in SDN-VE with error %s" +msgstr "router-add-interface güncelleme SDN-VE'de %s hatasıyla başarısız oldu" + +#, python-format +msgid "Update router-remove-interface failed : %s" +msgstr "router-remove-interface güncelleme başarısız: %s" + +#, python-format +msgid "" +"Will not send event %(method)s for network %(net_id)s: no agent available. " +"Payload: %(payload)s" +msgstr "" +"%(net_id)s ağı için %(method)s oalyı gönderilmeyecek: uygun ajan yok. " +"Fayadalı yük: %(payload)s" + +#, python-format +msgid "_bind_port_if_needed failed, deleting port '%s'" +msgstr "_bind_port_if_needed başarısız, '%s' bağlantı noktası siliniyor" + +#, python-format +msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" +msgstr "" +"_bind_port_if_needed başarısız. '%s' toplu oluşturmasından tüm bağlantı " +"noktaları siliniyor" + +msgid "login failed" +msgstr "giriş başarısız" + +#, python-format +msgid "" +"mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " +"'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" +msgstr "" +"mechanism_manager.create_%(res)s_postcommit %(res)s: '%(failed_id)s' için " +"başarısız. %(res)ss %(resource_ids)s siliniyor" + +#, python-format +msgid "" +"mechanism_manager.create_network_postcommit failed, deleting network '%s'" +msgstr "" +"mechanism_manager.create_network_postcommit başarısız, '%s' ağı siliniyor" + +#, python-format +msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" +msgstr "" +"mechanism_manager.create_port_postcommit başarısız, '%s' bağlantı noktası " +"siliniyor" + +#, python-format +msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" +msgstr "" +"mechanism_manager.create_subnet_postcommit başarısız, alt ağ '%s' siliniyor" + +msgid "mechanism_manager.delete_network_postcommit failed" +msgstr "mechanism_manager.delete_network_postcommit başarısız" + +#, python-format +msgid "mechanism_manager.delete_port_postcommit failed for port %s" +msgstr "" +"mechanism_manager.delete_port_postcommit %s bağlantı noktası için başarısız" + +msgid "mechanism_manager.delete_subnet_postcommit failed" +msgstr "mechanism_manager.delete_subnet_postcommit başarısız" + +#, python-format +msgid "" +"process_ancillary_network_ports - iteration:%d - failure while retrieving " +"port details from server" +msgstr "" +"process_ancillary_network_ports - yineleme:%d - sunucudan bağlantı noktası " +"detaylarını alma başarısız" + +#, python-format +msgid "" +"process_network_ports - iteration:%d - failure while retrieving port details " +"from server" +msgstr "" +"process_network_ports - yineleme:%d - sunucudan bağlantı noktası detaylarını " +"alma başarısız" + +#, python-format +msgid "request: Request failed from Controller side :%s" +msgstr "istek: İstek Kontrolcü tarafında başarısız oldu: %s" + +#, python-format +msgid "respawning %(service)s for uuid %(uuid)s" +msgstr "uuid %(uuid)s icin %(service)s yeniden başlatılıyor." + +#, python-format +msgid "tunnel_type %s not supported by agent" +msgstr "tunnel_type %s ajan tarafından desteklenmiyor" diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000000..aef1bcafc05 --- /dev/null +++ b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,684 @@ +# Translations template for neutron. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +# ADİL REŞİT DURSUN , 2015 +# Alper Çiftçi , 2015 +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-08-21 06:07+0000\n" +"PO-Revision-Date: 2015-08-21 01:06+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/neutron/" +"language/tr_TR/)\n" +"Language: tr_TR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.0\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#, python-format +msgid "%(action)s failed (client error): %(exc)s" +msgstr "%(action)s başarısız (istemci hatası): %(exc)s" + +#, python-format +msgid "%(method)s %(url)s" +msgstr "%(method)s %(url)s" + +#, python-format +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" +"%(plugin_key)s: %(args)s bağımsız değişkenlerine sahip %(function_name)s " +"atlandı" + +#, python-format +msgid "%(prog)s version %(version)s" +msgstr "%(prog)s sürüm %(version)s" + +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "%(type)s ID aralığı: %(range)s" + +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "%(url)s hata döndürdü: %(exception)s" + +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s HTTP %(status)d ile geri döndü" + +#, python-format +msgid "%d probe(s) deleted" +msgstr "%d sonda silindi" + +#, python-format +msgid "" +"Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "%(network_id)s ağı için %(network_type)s türünde %(id)s dilimi eklendi" + +#, python-format +msgid "Adding %s to list of bridges." +msgstr "%s köprü listesine ekleniyor." + +#, python-format +msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" +msgstr "Ağ %(net)s %(host)s istemcisi üzerinde %(agent)s ajanına ekleniyor" + +#, python-format +msgid "Agent %s already present" +msgstr "Ajan %s zaten mevcut" + +#, python-format +msgid "Agent Gateway port does not exist, so create one: %s" +msgstr "Ajan geçit bağlantı noktası mevcut değil, bir tane oluştur: %s" + +msgid "Agent caught SIGHUP, resetting." +msgstr "Ajan SIGHUP yakaladı, sıfırlanıyor." + +msgid "Agent caught SIGTERM, quitting daemon loop." +msgstr "Ajan SIGTERM yakaladı, artalan işlemi döngüsünden çıkılıyor." + +msgid "Agent initialised successfully, now running... " +msgstr "Ajan başarıyla ilklendirildi, şimdi çalıştırılıyor... " + +msgid "Agent initialized successfully, now running... " +msgstr "Ajan başarıyla ilklendirildi, şimdi çalıştırılıyor... " + +msgid "Agent out of sync with plugin!" +msgstr "Ajan ve eklenti uyumsuz!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Ajan tüneli eklentiyle uyumsuz!" + +#, python-format +msgid "Allocated vlan (%d) from the pool" +msgstr "Havuzdan vlan (%d) ayrıldı" + +msgid "" +"Allow sorting is enabled because native pagination requires native sorting" +msgstr "" +"Sıralamaya izin verme etkin çünkü doğal sayfalama doğal sıralamaya ihtiyaç " +"duyar" + +#, python-format +msgid "Allowable flat physical_network names: %s" +msgstr "İzin verilebilecek düz fiziksel ağ isimleri: %s" + +msgid "Arbitrary flat physical_network names allowed" +msgstr "Rastgele seçilmiş düz fiziksel ağ isimlerine izin verilmez" + +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "%(vlan_id)s net-id=%(net_uuid)s için yerel olarak atanıyor" + +#, python-format +msgid "Attachment %s removed" +msgstr "Eklenti %s kaldırıldı" + +#, python-format +msgid "" +"Attempt %(count)s to allocate a VRID in the network %(network)s for the " +"router %(router)s" +msgstr "" +"%(network)s ağında %(router)s yönlendiricisi için VRID ayırmak için girişim " +"%(count)s" + +#, python-format +msgid "Attempt %(count)s to bind port %(port)s" +msgstr "%(port)s bağlantı noktası bağlama için girişim %(count)s" + +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" +"Filtrelenmiş %r olmayan bağlantı noktası filtresi kaldırılmaya çalışıldı" + +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "%s filtrelenmemiş bağlantı noktası filtresi güncellenmeye çalışıldı" + +msgid "Bad resource for forming a create request" +msgstr "Oluşturma isteği oluşturmak için kötü kaynak" + +msgid "Bad resource for forming a delete request" +msgstr "Silme isteği oluşturmak için kötü kaynak" + +msgid "Bad resource for forming a list request" +msgstr "Liste isteği oluşturmak için kötü kaynak" + +msgid "Bad resource for forming a show request" +msgstr "Gösterme isteği oluşturmak için kötü kaynak" + +msgid "Bad resource for forming a update request" +msgstr "Güncelleme isteği oluşturmak için kötü kaynak" + +#, python-format +msgid "" +"Binding info for port %s was not found, it might have been deleted already." +msgstr "" +"Bağlantı noktası %s için bağlama bilgisi bulunamadı, zaten silinmiş olabilir." + +#, python-format +msgid "" +"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " +"in port's address IP versions" +msgstr "" +"Dhcp seçeneği %(opt)s uygulanamıyor çünkü ip_version %(version)d bağlantı " +"noktasının adres IP sürümleri içinde değil" + +#, python-format +msgid "Centralizing distributed router %s is not supported" +msgstr "Dağıtık yönlendirici %s merkezileştirme desteklenmiyor" + +#, python-format +msgid "Cleaning bridge: %s" +msgstr "Köprü temizleniyor: %s" + +#, python-format +msgid "Clearing orphaned ARP spoofing entries for devices %s" +msgstr "Aygıtlar %s için sahipsiz ARP aldatma girdileri temizleniyor" + +msgid "" +"ConfDriver is used as quota_driver because the loaded plugin does not " +"support 'quotas' table." +msgstr "" +"Yüklenen eklenti 'quotas' tablosunu desteklemediğinden ConfDriver " +"quota_driver olarak kullanılıyor." + +#, python-format +msgid "Config paste file: %s" +msgstr "Yapılandırma yapıştırma dosyası: %s" + +#, python-format +msgid "Configured extension driver names: %s" +msgstr "Yapılandırılan eklenti sürücü isimleri: %s" + +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "Yapılandırılan mekanizma sürücü isimleri: %s" + +#, python-format +msgid "Configured type driver names: %s" +msgstr "Tür sürücü isimleri yapılandırıldı: %s" + +#, python-format +msgid "Controller IPs: %s" +msgstr "Kontrolcü IP'si: %s" + +msgid "DHCP agent started" +msgstr "DHCP ajanı başlatıldı" + +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "%s servis türü için varsayılan sağlayıcı belirtilmemiş" + +#, python-format +msgid "Deleting port: %s" +msgstr "Bağlantı noktası siliniyor: %s" + +#, python-format +msgid "Destroying IPset: %s" +msgstr "IPset siliniyor: %s" + +#, python-format +msgid "Destroying IPsets with prefix: %s" +msgstr "Şu öneke sahip IPset'ler siliniyor: %s" + +#, python-format +msgid "Device %s already exists" +msgstr "Aygıt %s zaten mevcut" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Aygıt %s eklentide tanımlanmamış" + +#, python-format +msgid "Device with MAC %s not defined on plugin" +msgstr "%s MAC'ine sahip aygıt eklentide tanımlanmadı" + +msgid "Disabled allowed-address-pairs extension." +msgstr "allowed-address-pairs eklentisi kapatıldı." + +msgid "Disabled security-group extension." +msgstr "Güvenlik grubu eklentisi kapatıldı." + +msgid "Disabled vlantransparent extension." +msgstr "vlantransparent eklentisi kapalı." + +#, python-format +msgid "Exclude Devices: %s" +msgstr "Aygıtları Hariç Tut: %s" + +#, python-format +msgid "" +"Failed to schedule network %s, no eligible agents or it might be already " +"scheduled by another server" +msgstr "" +"%s ağı zamanlanamadı, uygun ajan yok veya başka bir sunucu tarafından zaten " +"zamanlanmış olabilir" + +msgid "Fake SDNVE controller initialized" +msgstr "Sahte SDNVE kontrolcüsü ilklendirildi" + +msgid "Fake SDNVE controller: check and create tenant" +msgstr "Sahte SDNVE kontrolcüsü: kiracıyı kontrol et ve oluştur" + +msgid "Fake SDNVE controller: create" +msgstr "Sahte SDNVE kontrolcüsü: oluştur" + +msgid "Fake SDNVE controller: delete" +msgstr "Sahte SDNVE kontrolcüsü: sil" + +msgid "Fake SDNVE controller: get controller" +msgstr "Sahte SDNVE kontrolcüsü: kontrolcüyü al" + +msgid "Fake SDNVE controller: get tenant by id" +msgstr "Sahte SDNVE kontrolcüsü: id ile kiracı al" + +msgid "Fake SDNVE controller: list" +msgstr "Sahte SDNVE kontrolcüsü: listele" + +msgid "Fake SDNVE controller: show" +msgstr "Sahte SDNVE kontrolcüsü: göster" + +msgid "Fake SDNVE controller: update" +msgstr "Sahte SDNVE kontrolcüsü: güncelle" + +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "Havuzda geçersiz IP adresi bulundu: %(start)s - %(end)s:" + +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Kesişen aralıklar bulundu: %(l_range)s and %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "Alt ağ CIDR'den büyük havuz bulundu:%(start)s - %(end)s" + +#, python-format +msgid "" +"Found port (%(port_id)s, %(ip)s) having IP allocation on subnet %(subnet)s, " +"cannot delete" +msgstr "" +"%(subnet)s alt ağında IP ayrılmış bağlantı noktası (%(port_id)s, %(ip)s) " +"bulundu, silinemez" + +#, python-format +msgid "Got %(alias)s extension from driver '%(drv)s'" +msgstr "'%(drv)s' sürücüsünden %(alias)s eklentisi alındı" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP istisnası fırlatıldı: %s" + +#, python-format +msgid "" +"Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s after " +"%(delta)s" +msgstr "" +"%(host)s istemcisi, uuid %(uuid)s üstündeki %(type)s ajandan %(delta)s sonra " +"kalp atışı alındı" + +msgid "IPset cleanup completed successfully" +msgstr "IPset temizliği başarıyla tamamlandı" + +msgid "IPv6 is not enabled on this system." +msgstr "IPv6 bu sistemde etkin değil." + +msgid "Initializing CRD client... " +msgstr "CRD istemcisi ilklendiriliyor... " + +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "'%s' türü için sürücü ilklendiriliyor" + +#, python-format +msgid "Initializing extension driver '%s'" +msgstr "Eklenti sürücüsü ilklendiriliyor '%s'" + +msgid "Initializing extension manager." +msgstr "Genişletme yöneticisi başlatılıyor" + +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "Mekanizma sürücüsü ilklendiriliyor '%s'" + +#, python-format +msgid "Interface mappings: %s" +msgstr "Arayüz eşleştirmeleri: %s" + +#, python-format +msgid "L2 Agent operating in DVR Mode with MAC %s" +msgstr "L2 Ajanı %s MAC'i ile DVR Kipinde çalışıyor" + +msgid "L3 agent started" +msgstr "L3 ajanı başlatıldı" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "LinuxBridge Ajanı RPC Artalan İşlemleri Başlatıldı!" + +#, python-format +msgid "Loaded extension driver names: %s" +msgstr "Yüklenen eklenti sürücü isimleri: %s" + +#, python-format +msgid "Loaded extension: %s" +msgstr "Yüklenen bölüm: %s" + +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "Yüklenen mekanizma sürücü isimleri: %s" + +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "quota_driver yüklendi: %s." + +#, python-format +msgid "Loaded type driver names: %s" +msgstr "Tür sürücü isimleri yüklendi: %s" + +#, python-format +msgid "Loading Metering driver %s" +msgstr "Ölçme sürücüsü %s yükleniyor" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Eklenti Yükleniyor: %s" + +#, python-format +msgid "Loading core plugin: %s" +msgstr "Çekirdek eklenti yükleniyor: %s" + +#, python-format +msgid "Loading interface driver %s" +msgstr "Arayüz sürücüsü %s yükleniyor" + +msgid "Logging enabled!" +msgstr "Günlükleme etkin!" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "Döngü yinelemsi aralığı aştı (%(polling_interval)s ile %(elapsed)s)!" + +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "ML2 FlatTypeDriver ilklendirmesi tamamlandı" + +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "ML2 LocalTypeDriver ilklendirmesi tamamlandı" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "Fiziksel ağ %(physical_network)s %(bridge)s köprüsüne eşleştiriliyor" + +#, python-format +msgid "" +"Mapping physical network %(physical_network)s to interface %(interface)s" +msgstr "" +"%(physical_network)s fiziksel ağını %(interface)s arayüzüne eşleştiriyor" + +msgid "Modular L2 Plugin initialization complete" +msgstr "Modüler L2 Eklentisi ilklendirme tamamlandı" + +msgid "NVSD Agent initialized successfully, now running... " +msgstr "NVSD Ajanı başarıyla ilklendirildi, şimdi çalıştırılıyor... " + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Ağ VLAN aralığı: %s" + +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "Neutron servisi başlatıldı, %(host)s:%(port)s üzerinde dinliyor" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "Hiçbir %s Eklenti yüklenmedi" + +#, python-format +msgid "No device with MAC %s defined on agent." +msgstr "Ajanda %s MAC'ine sahip bir aygıt tanımlanmamış." + +msgid "No ip allocation set" +msgstr "Ip ayırma ayarlanmamış" + +msgid "No ports here to refresh firewall" +msgstr "Burda güvenlik duvarını tazelemek için bağlantı noktası yok" + +#, python-format +msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" +msgstr "Eşleşmeyen kiracı ve ağ türleri: %(ttype)s %(ntype)s" + +#, python-format +msgid "Nova event response: %s" +msgstr "Nova olay yanıtı: %s" + +#, python-format +msgid "" +"Number of active agents lower than max_l3_agents_per_router. L3 agents " +"available: %s" +msgstr "" +"Etkin ajan sayısı max_l3_agents_per_router'den küçük. Kullanılabilir L3 " +"ajanları: %s" + +msgid "OVS cleanup completed successfully" +msgstr "OVS temizliği başarıyla tamamlandı" + +#, python-format +msgid "Physical Devices mappings: %s" +msgstr "Fiziksel Aygıtların eşleştirmeleri: %s" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Bağlantı noktası %(device)s güncellendi. Detaylar: %(details)s" + +#, python-format +msgid "Port %(port_id)s not present in bridge %(br_name)s" +msgstr "Bağlantı noktası %(port_id)s %(br_name)s köprüsünde mevcut değil" + +#, python-format +msgid "Port %s updated." +msgstr "Bağlantı noktası %s güncellendi." + +#, python-format +msgid "Port %s was deleted concurrently" +msgstr "Bağlantı noktası %s eş zamanlı olarak silindi" + +#, python-format +msgid "" +"Port %s was not found on the integration bridge and will therefore not be " +"processed" +msgstr "" +"Bağlantı noktası %s tümleştirme köprüsünde bulunamadı ve bu yüzden " +"işlenmeyecek" + +#, python-format +msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" +msgstr "" +"'%(port_name)s' bağlantı noktası '%(vlan_tag)d' vlan etiketini kaybetti!" + +msgid "PortSecurityExtensionDriver initialization complete" +msgstr "PortSecurityExtensionDriver ilklendirme tamamlandı" + +#, python-format +msgid "Ports %s removed" +msgstr "Portlar %s silindi" + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Aygıtlar için filtreler hazırlanıyor %s" + +#, python-format +msgid "Process runs with uid/gid: %(uid)s/%(gid)s" +msgstr "Süreç şu uid/gid ile çalışıyor: %(uid)s/%(gid)s" + +msgid "Provider rule updated" +msgstr "Sağlayıcı kuralı güncellendi" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC agent_id: %s" + +msgid "RPC was already started in parent process by plugin." +msgstr "RPC üst süreçte eklenti tarafından zaten başlatılmıştı." + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "vlan = %(vlan_id)s'in net-id = %(net_uuid)s'den iades isteniyor" + +msgid "Refresh firewall rules" +msgstr "Güvenlik duvarı kurallarını tazele" + +#, python-format +msgid "Registered extension drivers: %s" +msgstr "Eklenti sürücüleri kaydedildi: %s" + +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "Kaydedilen mekanizma sürücüleri: %s" + +#, python-format +msgid "Registered types: %s" +msgstr "Kaydedilen türler: %s" + +#, python-format +msgid "Remove device filter for %r" +msgstr "%r için aygıt filtresini kaldır" + +#, python-format +msgid "Removing device with mac_address %s" +msgstr "%s mac_address'e sahip aygıt kaldırılıyor" + +#, python-format +msgid "Removing iptables rule for IPset: %s" +msgstr "IPset için iptables kuralı siliniyor: %s" + +#, python-format +msgid "Router %(router_id)s transitioned to %(state)s" +msgstr "Yönlendirici %(router_id)s %(state)s durumuna geçti" + +#, python-format +msgid "" +"Router %s is not managed by this agent. It was possibly deleted concurrently." +msgstr "" +"%s yönlendiricisi bu ajan tarafından yönetilmiyor. Muhtemelen eş zamanlı " +"olarak silindi." + +msgid "SNAT already bound to a service node." +msgstr "SNAT zaten bir servis düğümüne bağlı." + +#, python-format +msgid "SNAT interface port list does not exist, so create one: %s" +msgstr "" +"SNAT arayüzü bağlantı noktası listesi mevcut değil, bir tane oluştur: %s" + +msgid "SRIOV NIC Agent RPC Daemon Started!" +msgstr "SRIOV NIC Ajanı RPC Artalan İşlemleri Başlatıldı!" + +#, python-format +msgid "Scheduling unhosted network %s" +msgstr "Sunulmamış ağ %s zamanlanıyor" + +#, python-format +msgid "Security group member updated %r" +msgstr "Güvenlik grubu üyesi güncellendi %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Güvenlik grubu kuralı güncellendi %r" + +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "Servis %s çekirdek eklenti tarafından destekleniyor" + +msgid "Set a new controller if needed." +msgstr "Gerekirse yeni bir kontrolcü ayarla." + +#, python-format +msgid "Set the controller to a new controller: %s" +msgstr "Kontrolcüyü yeni kontrolcüye ayarla: %s" + +#, python-format +msgid "" +"Skipping ARP spoofing rules for port '%s' because it has port security " +"disabled" +msgstr "" +"'%s' bağlantı noktası için ARP aldatma kuralları atlanıyor çünkü bağlanı " +"noktası güvenliği kapalı" + +#, python-format +msgid "" +"Skipping method %s as firewall is disabled or configured as " +"NoopFirewallDriver." +msgstr "" +"Güvenlik duvarı kapalı ya da NoopFirewallDriver olarak yapılandırıldığından " +"%s metodu atlanıyor." + +msgid "" +"Skipping period L3 agent status check because automatic router rescheduling " +"is disabled." +msgstr "" +"Devre L3 ajan durum kontrolü atlanıyor çünkü otomatik yönlendirici yeniden " +"zamanlama kapalı." + +msgid "" +"Skipping periodic DHCP agent status check because automatic network " +"rescheduling is disabled." +msgstr "" +"Aralıklı DHCP ajan durum kontrolü atlanıyor çünkü otomatik ağ yeniden " +"zamanlama kapalı." + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "Bağlantı noktası %s atlanıyor çünkü üzerinde yapılandırılmış IP yok" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "Belirtilen IP adresleri alt ağ IP sürümüyle eşleşmiyor" + +msgid "Stopping linuxbridge agent." +msgstr "Linuxbridge ajanı durduruluyor." + +#, python-format +msgid "Subnet %s was deleted concurrently" +msgstr "Alt ağ %s eş zamanlı olarak silindi" + +msgid "Synchronizing state" +msgstr "Durum eşzamanlandırılıyor" + +msgid "Synchronizing state complete" +msgstr "Durum eş zamanlandırma tamamlandı" + +#, python-format +msgid "Tenant network_types: %s" +msgstr "Kiracı network_types: %s" + +#, python-format +msgid "The IP addr of available SDN-VE controllers: %s" +msgstr "Kullanılabilir SDN-VE kontrolcülerinin IP adresi: %s" + +#, python-format +msgid "The SDN-VE controller IP address: %s" +msgstr "SDN-VE kontrolcüsü IP adresi: %s" + +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"CIDR: %(new_cidr)s için doğrulama başarısız - %(subnet_id)s (CIDR: %(cidr)s) " +"ile çakışıyor" + +msgid "VlanTypeDriver initialization complete" +msgstr "VlanTypeDriver ilklendirme tamamlandı" + +#, python-format +msgid "agent_updated by server side %s!" +msgstr "ajan sunucu tarafında güncellendi %s!" + +#, python-format +msgid "port_unbound(): net_uuid %s not in local_vlan_map" +msgstr "port_unbound(): net_uuid %s local_vlan_map içinde değil" diff --git a/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po new file mode 100644 index 00000000000..cb78796f756 --- /dev/null +++ b/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po @@ -0,0 +1,527 @@ +# Translations template for neutron. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +# ADİL REŞİT DURSUN , 2015 +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-08-21 06:08+0000\n" +"PO-Revision-Date: 2015-08-21 01:06+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/neutron/" +"language/tr_TR/)\n" +"Language: tr_TR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.0\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#, python-format +msgid "%(agent_type)s agent %(agent_id)s is not active" +msgstr "%(agent_type)s ajanı %(agent_id)s etkin değil" + +#, python-format +msgid "" +"%(port_num)d router ports found on the metadata access network. Only the " +"port %(port_id)s, for router %(router_id)s will be considered" +msgstr "" +"Metadata erişim ağında %(port_num)d yönlendirici bağlantı noktası bulundu. " +"Yalnızca %(port_id)s bağlantı noktası, %(router_id)s yönlendiricisi için " +"değerlendirilecek" + +#, python-format +msgid "%(type)s tunnel %(id)s not found" +msgstr "%(type)s tünel %(id)s bulunamadı" + +msgid "A concurrent port creation has occurred" +msgstr "Eş zamanlı bağlantı noktası oluşturma meydana geldi" + +#, python-format +msgid "" +"Action %(action)s for network %(net_id)s could not complete successfully: " +"%(reason)s" +msgstr "" +"%(net_id)s ağı için %(action)s eylemi başarıyla tamamlanamadı: %(reason)s" + +#, python-format +msgid "Action %s not supported" +msgstr "%s eylemi desteklenmiyor" + +#, python-format +msgid "Attempted to get traffic counters of chain %s which does not exist" +msgstr "%s zincirinin mevcut olmayan trafik sayaçları alınmaya çalışıldı" + +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "Var olmayan %s zinciri kaldırılmaya çalışılıyor" + +#, python-format +msgid "Attempting to bind with dead agent: %s" +msgstr "Ölü ajanla bağlama deneniyor: %s" + +msgid "" +"Authenticating to nova using nova_admin_* options is deprecated. This should " +"be done using an auth plugin, like password" +msgstr "" +"Nova'ya nova_admin_* seçeneklerini kullanarak kimlik doğrulama artık " +"kullanılmıyor. Bu parola gibi bir yetkilendirme eklentisi kullanılarak " +"yapılmalı" + +#, python-format +msgid "Cannot find vf index for pci slot %s" +msgstr "%s pci yuvası için vf indisi bulunamıyor" + +#, python-format +msgid "Cannot find vfs %(vfs)s in device %(dev_name)s" +msgstr "%(dev_name)s aygıtında vfs %(vfs)s bulunamıyor" + +#, python-format +msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid." +msgstr "" +"%(host)s istemcisi üstündeki %(agent_type)s ajanı için yapılandırma geçersiz." + +#, python-format +msgid "Could not expand segment %s" +msgstr "Dilim %s genişletilemedi" + +#, python-format +msgid "" +"Creating an interface named %(name)s exceeds the %(limit)d character " +"limitation. It was shortened to %(new_name)s to fit." +msgstr "" +"%(name)s isimli bir arayüz oluşturmak %(limit)d karakter sınırını aşar. " +"Sığması için %(new_name)s'e kısaltıldı." + +#, python-format +msgid "DHCP agent %s is not active" +msgstr "DHCP ajanı %s etkin değil" + +msgid "DVR functionality requires a server upgrade." +msgstr "DVR işlevselliği sunucu yükseltmesi gerektiriyor." + +#, python-format +msgid "Device %(device)s requested by agent %(agent_id)s not found in database" +msgstr "" +"%(agent_id)s ajanı tarafından istenen %(device)s aygıtı veri tabanında " +"bulunamadı" + +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network %(network_id)s " +"not bound, vif_type: %(vif_type)s" +msgstr "" +"%(network_id)s ağı üstündeki %(agent_id)s ajanı tarafından istenen " +"%(device)s aygıtı bağlı değil, vif_type: %(vif_type)s" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Aygıt %s eklenti üzerinde tanımlanmamış" + +#, python-format +msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" +msgstr "%(file)s içinde beklenen isim \"%(ext_name)s\" bulunamadı" + +msgid "Driver configuration doesn't match with enable_security_group" +msgstr "Sürücü yapılandırması enable_security_group ile eşleşmiyor" + +#, python-format +msgid "Endpoint with ip %s already exists" +msgstr "%s ip'sine sahip son uç zaten mevcut" + +#, python-format +msgid "Extension %s not supported by any of loaded plugins" +msgstr "Eklenti %s yüklenen hiçbir eklenti tarafından desteklenmiyor" + +#, python-format +msgid "Extension file %(f)s wasn't loaded due to %(exception)s" +msgstr "Eklenti dosyası %(f)s %(exception)s sebebiyle yüklenmedi" + +#, python-format +msgid "Failed to delete namespace %s" +msgstr "%s isim uzayı silme başarısız" + +#, python-format +msgid "Failed trying to delete interface: %s" +msgstr "Arayüzü silme denemesi başarısız: %s" + +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "Bilinirlik alanı silme hatası: %s" + +#, python-format +msgid "Found failed openvswitch port: %s" +msgstr "Başarısız olmuş openvswitch bağlantı noktası bulundu: %s" + +#, python-format +msgid "Found not yet ready openvswitch port: %s" +msgstr "Henüz hazır olmayan openvswitch bağlantı noktası bulundu: %s" + +#, python-format +msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" +msgstr "" +"router=%r için admin_state_up=False atlanıyor. True ile üzerine yazılıyor" + +#, python-format +msgid "" +"In _notify_port_updated(), no bound segment for port %(port_id)s on network " +"%(network_id)s" +msgstr "" +"_notify_port_updated() içinde, %(network_id)s ağı üzerindeki %(port_id)s " +"bağlantı noktası için bağlı dilim yok" + +#, python-format +msgid "Info for router %s was not found. Performing router cleanup" +msgstr "" +"%s yönlendiricisi için bilgi bulunamadı. Yönlendirici temizliği " +"gerçekleştiriliyor" + +#, python-format +msgid "Interface %s not found in the heleos back-end, likely already deleted" +msgstr "Arayüz %s heleos arka-ucunda bulunamadı, muhtemelen zaten silinmiş" + +msgid "Invalid Interface ID, will lead to incorrect tap device name" +msgstr "Geçersiz arayüz kimliği, geçersiz tap aygıt ismine yol açacak" + +msgid "Invalid Network ID, will lead to incorrect bridge name" +msgstr "Geçersiz Ağ ID'si, geçersiz köprü ismine yol açacak" + +#, python-format +msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" +msgstr "" +"Geçersiz Dilimlendirme kimliği: %s, geçersiz vxlan aygıt ismine sebep olacak" + +msgid "Invalid VLAN ID, will lead to incorrect subinterface name" +msgstr "Geçersiz VLAN ID'si, geçersiz alt arayüz ismine yol açacak" + +#, python-format +msgid "Invalid remote IP: %s" +msgstr "Geçersiz uzak IP: %s" + +#, python-format +msgid "" +"Invalid value for pagination_max_limit: %s. It should be an integer greater " +"to 0" +msgstr "" +"pagination_max_limit: %s için geçersiz değer. 0'dan büyük bir tam sayı olmalı" + +#, python-format +msgid "" +"L2 agent could not get DVR MAC address from server. Retrying. Detailed " +"message: %s" +msgstr "" +"L2 ajanı sunucudan DVR MAC adresini alamadı. Tekrar deneniyor. Detaylı " +"ileti: %s" + +#, python-format +msgid "Loaded plugins do not implement extension %s interface" +msgstr "Yüklü eklentiler eklenti %s arayüzünü uygulamıyor" + +#, python-format +msgid "Network %s could not be found, it might have been deleted concurrently." +msgstr "%s ağı bulunamadı, eş zamanlı olarak silinmiş olabilir." + +#, python-format +msgid "Network %s has been deleted." +msgstr "Ağ %s silindi." + +#, python-format +msgid "" +"Network %s may have been deleted and its resources may have already been " +"disposed." +msgstr "Ağ %s silinmiş ve kaynakları ortadan kaldırılmış olabilir." + +msgid "" +"Neutron server does not support state report. State report for this agent " +"will be disabled." +msgstr "" +"Neutron sunucusu durum raporu desteklemiyor. Bu ajan için durum raporu " +"kapatılacak." + +msgid "No DHCP agents available, skipping rescheduling" +msgstr "Uygun DHCP ajanı yok, yeniden zamanlama atlanıyor" + +#, python-format +msgid "No L3 agents can host the router %s" +msgstr "Hiçbir L3 ajanı %s yönlendiricisini sunamaz" + +msgid "No Token, Re-login" +msgstr "Jeton yok, Yeniden-giriş" + +msgid "No active L3 agents" +msgstr "Etkin L3 ajanı yok" + +msgid "No active L3 agents found for SNAT" +msgstr "SNAT için etkin L3 ajanı bulunamadı" + +#, python-format +msgid "No flat network found on physical network %s" +msgstr "Fiziksel ağ %s üzerinde düz ağ bulunamadı" + +msgid "No more DHCP agents" +msgstr "Daha fazla DHCP ajanı yok" + +msgid "No policy profile populated from VSM" +msgstr "VSM'den herhangi bir ilke profili doldurulmadı" + +#, python-format +msgid "No routers compatible with L3 agent configuration on host %s" +msgstr "" +"Hiçbir yönlendirici %s istemcisi üzerindeki L3 ajanı yapılandırmasıyla " +"uyumlu değil" + +#, python-format +msgid "No valid gateway port on subnet %s is found for IPv6 RA" +msgstr "" +"IPv6 RA için %s alt ağı üzerinde geçerli ağ geçidi bağlantı noktası " +"bulunamadı" + +#, python-format +msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" +msgstr "%(physical_network)s fiziksel ağında vlan_id %(vlan_id)s bulunamadı" + +#, python-format +msgid "Nova event: %s returned with failed status" +msgstr "Nova olayı: %s başarısız durum döndürdü" + +#, python-format +msgid "Nova returned NotFound for event: %s" +msgstr "Nova %s eylemi için NotFound döndürdü" + +msgid "" +"OVS is dead. OVSNeutronAgent will keep running and checking OVS status " +"periodically." +msgstr "" +"OVS ölü. OVSNeutronAgent çalışmaya devam edip OVS durumunu aralıklarla " +"kontrol edecek." + +msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." +msgstr "" +"OVS yeniden başlatıldı. OVSNeutronAgent köprüleri sıfırlayacak ve bağlantı " +"noktalarını kurtaracak." + +#, python-format +msgid "" +"Only %(active)d of %(total)d DHCP agents associated with network " +"'%(net_id)s' are marked as active, so notifications may be sent to inactive " +"agents." +msgstr "" +"'%(net_id)s' ağıyla ilişkilendirilmiş %(total)d DHCP ajanından yalnızca " +"%(active)d kadarı etkin olarak işaretlenmiş, yani iletiler etkin olmayan " +"ajanlara gönderilebilir." + +#, python-format +msgid "" +"Option \"%(option)s\" must be supported by command \"%(command)s\" to enable " +"%(mode)s mode" +msgstr "" +"\"%(option)s\" seçeneği %(mode)s kipini etkinleştirmek için \"%(command)s\" " +"komutuyla desteklenmeli" + +#, python-format +msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment" +msgstr "" +"%(agent)s ajanı tarafından güncellenen %(port)s bağlantı noktası herhangi " +"bir dilime bağlı değil" + +#, python-format +msgid "Port %s not found during update" +msgstr "%s bağlantı noktası güncelleme sırasında bulunamadı" + +msgid "Port ID not set! Nova will not be notified of port status change." +msgstr "" +"Bağlantı noktası kimliği ayarlanmamış! Nova bağlantı noktası durumu " +"değişikliğinde bilgilendirilmeyecek." + +msgid "" +"Reading service_providers from legacy location in neutron.conf, and ignoring " +"values in neutron_*aas.conf files; this override will be going away soon." +msgstr "" +"service_providers neutron.conf'daki eski yerinden okunuyor, ve neutron_*aas." +"conf dosyalarındaki değerler atlanıyor; bu üzerine yazma yakında gidecek." + +msgid "" +"Registering resources to apply quota limits to using the quota_items option " +"is deprecated as of Liberty.Resource REST controllers should take care of " +"registering resources with the quota engine." +msgstr "" +"quota_items seçeneğini kullanarak kota sınırlarını uygulamak için kaynak " +"kaydetme Liberty itibariyle kullanılmıyor. Kaynakların kota motoruyla " +"kaydında kaynak REST kontrolcüleri kullanılmalı." + +#, python-format +msgid "" +"Removing network %(network)s from agent %(agent)s because the agent did not " +"report to the server in the last %(dead_time)s seconds." +msgstr "" +"%(network)s ağı %(agent)s ajanından çıkarılıyor çünkü ajan sunucuya son " +"%(dead_time)s saniye rapor vermedi." + +#, python-format +msgid "" +"Rescheduling router %(router)s from agent %(agent)s because the agent did " +"not report to the server in the last %(dead_time)s seconds." +msgstr "" +"Yönlendirici %(router)s %(agent)s ajanından yeniden zamanlanıyor çünkü ajan " +"sunucuya son %(dead_time)s saniye rapor vermedi." + +msgid "" +"Security group agent binding currently not set. This should be set by the " +"end of the init process." +msgstr "" +"Güvenlik grubu ajan bağlama şu an ayarlanmış değil. Bu init sürecinin " +"sonunda ayarlanmış olmalı." + +msgid "Server does not support metadata RPC, fallback to using neutron client" +msgstr "Sunucu metadata RPC desteklemiyor, neutron istemcisine geri dönülüyor" + +#, python-format +msgid "" +"The configured driver %(driver)s has been moved, automatically using " +"%(new_driver)s instead. Please update your config files, as this automatic " +"fixup will be removed in a future release." +msgstr "" +"Yapılandırılan sürücü %(driver)s taşınnmış, yerine otomatik olarak " +"%(new_driver)s kullanılıyor. Lütfen yapılandırma dosyalarınızı güncelleyin, " +"çünkü bu otomatik düzeltme ileri sürümlerde kaldırılacak." + +msgid "" +"The remote metadata server responded with Forbidden. This response usually " +"occurs when shared secrets do not match." +msgstr "" +"Uzak metadata sunucu Yasaklı yanıtı döndü. Bu yanıt genellikle paylaşılan " +"gizler eşleşmediğinde oluşur." + +#, python-format +msgid "The router %s had no physical representation, likely already deleted" +msgstr "%s yönlendiricisinin fiziksel temsili yoktu, muhtemelen zaten silinmiş" + +msgid "" +"The user that is executing neutron can read the namespaces without using the " +"root_helper. Disable the use_helper_for_ns_read option to avoid a " +"performance impact." +msgstr "" +"Neutron'u çalıştıran kullanıcı root_helper kullanmadan isim uzaylarını " +"okuyabilir. Performansı etkilememesi için use_helper_for_ns_read seçeneğini " +"kapatın." + +#, python-format +msgid "" +"Time since last %s agent reschedule check has exceeded the interval between " +"checks. Waiting before check to allow agents to send a heartbeat in case " +"there was a clock adjustment." +msgstr "" +"Son %s ajan yeniden zamanlama kontrolünden sonra geçen zaman kontroller " +"arası zaman aralığını aştı. Bir saat ayarlama yapılmış olması durumunu " +"hesaba katmak için ajanların kalp atışı gönderebilmesi için kontrolden önce " +"bekleniyor." + +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r %(top)r" +msgstr "" +"Mevcut olmayan kural silinmeye çalışıldı: %(chain)r %(rule)r %(wrap)r %(top)r" + +msgid "Tunnel synchronization requires a server upgrade." +msgstr "Tünel eş zamanlama sunucu yükseltmesi gerektiriyor." + +#, python-format +msgid "" +"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " +"current state; please check that the network and/or its subnet(s) still " +"exist." +msgstr "" +"%(net_id)s için %(action)s dhcp yapılamadı: mevcut durumuyla ilgili bir " +"çatışma var; lütfen ağ ve/veya alt ağ(lar)ının hala mevcut olduğunu kontrol " +"edin." + +#, python-format +msgid "Unable to configure IP address for floating IP: %s" +msgstr "Değişken IP için IP adresi yapılandırılamıyor: %s" + +#, python-format +msgid "Unable to find data type descriptor for attribute %s" +msgstr "%s özniteliği için veri türü tanımlayıcısı bulunamadı" + +msgid "Unable to retrieve the agent ip, check the agent configuration." +msgstr "Ajan ip'si alınamıyor, ajan yapılandırmasını kontrol edin." + +#, python-format +msgid "" +"Unable to schedule network %s: no agents available; will retry on subsequent " +"port and subnet creation events." +msgstr "" +"Ağ %s zamanlanamadı: hiçbir ajan uygun değil; sonraki bağlantı noktası " +"üzerinden ve alt ağ oluşturma olayları tekrar denenecek." + +#, python-format +msgid "Updating lease expiration is now deprecated. Issued from host %s." +msgstr "" +"Kira sona erme tarihlerini güncelleme artık kullanılmıyor. %s istemcisinden " +"yayınlandı." + +#, python-format +msgid "" +"VIF port: %s has no ofport configured, and might not be able to transmit" +msgstr "" +"VIF bağlantı noktası: %s'in yapılandırılmış bir ofport'u yok, aktarım " +"yapamayabilir" + +msgid "VXLAN is enabled, a valid local_ip must be provided" +msgstr "VXLAN etkin, geçerli bir local_ip sağlanmalı" + +#, python-format +msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s" +msgstr "aygıt pci uyuşmazlığı: %(device_mac)s - %(pci_slot)s" + +#, python-format +msgid "failed to parse vf link show line %(line)s: for %(device)s" +msgstr "" +"vf bağlantısı gösteri satırı %(line)s: %(device)s için ayrıştırma başarısız" + +#, python-format +msgid "" +"l3-agent cannot check service plugins enabled at the neutron server when " +"startup due to RPC error. It happens when the server does not support this " +"RPC API. If the error is UnsupportedVersion you can ignore this warning. " +"Detail message: %s" +msgstr "" +"RPC hatası sebebiyle l3-agent açılışta neutron sunucusundaki neutron servis " +"eklentilerinin etkinliğini kontrol edemiyor. Bu durum sunucu bu RPC API'sini " +"desteklemediğinde olabilir. Hata UnsupportedVersion ise bu uyarıyı göz ardı " +"edebilirsiniz. Detaylı ileti: %s" + +#, python-format +msgid "" +"l3-agent cannot check service plugins enabled on the neutron server. " +"Retrying. Detail message: %s" +msgstr "" +"l3-agent neutron sunucusunda etkin servis eklentilerini kontrol edemiyor. " +"Tekrar deneniyor. Detaylı ileti: %s" + +#, python-format +msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" +msgstr "VIF: %(vif)s için ofport: %(ofport)s pozitif tam sayı değil" + +msgid "" +"security_group_info_for_devices rpc call not supported by the server, " +"falling back to old security_group_rules_for_devices which scales worse." +msgstr "" +"security_group_info_for_devices rpc çağrısı sunucu tarafından " +"desteklenmiyor, daha kötü ölçeklenen eski security_group_rules_for_devices'e " +"dönülüyor." + +#, python-format +msgid "unable to modify mac_address of ACTIVE port %s" +msgstr "%s ETKİN bağlantı noktasının mac_address'i değiştirilemiyor" + +#, python-format +msgid "vlan_id %(vlan)s on physical network %(network)s not found" +msgstr "%(network)s fiziksel ağındaki vlan_id %(vlan)s bulunamadı" + +#, python-format +msgid "vxlan_id %s not found" +msgstr "vxlan_id %s bulunamadı" diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index f902b2b62df..95f40a46a49 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -35,6 +35,7 @@ LOG = logging.getLogger(__name__) VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' +VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed'} @@ -121,6 +122,11 @@ class Notifier(object): return {'name': 'network-changed', 'server_uuid': device_id} + def _get_port_delete_event(self, port): + return {'server_uuid': port['device_id'], + 'name': VIF_DELETED, + 'tag': port['id']} + @property def _plugin(self): # NOTE(arosen): this cannot be set in __init__ currently since @@ -160,7 +166,7 @@ class Notifier(object): def create_port_changed_event(self, action, original_obj, returned_obj): port = None - if action == 'update_port': + if action in ['update_port', 'delete_port']: port = returned_obj['port'] elif action in ['update_floatingip', 'create_floatingip', @@ -178,7 +184,10 @@ class Notifier(object): port = self._plugin.get_port(ctx, port_id) if port and self._is_compute_port(port): - return self._get_network_changed_event(port['device_id']) + if action == 'delete_port': + return self._get_port_delete_event(port) + else: + return self._get_network_changed_event(port['device_id']) def record_port_status_changed(self, port, current_port_status, previous_port_status, initiator): diff --git a/neutron/plugins/nec/__init__.py b/neutron/objects/__init__.py similarity index 100% rename from neutron/plugins/nec/__init__.py rename to neutron/objects/__init__.py diff --git a/neutron/objects/base.py b/neutron/objects/base.py new file mode 100644 index 00000000000..371fd896d13 --- /dev/null +++ b/neutron/objects/base.py @@ -0,0 +1,167 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_db import exception as obj_exc +from oslo_utils import reflection +from oslo_versionedobjects import base as obj_base +import six + +from neutron.common import exceptions +from neutron.db import api as db_api + + +class NeutronObjectUpdateForbidden(exceptions.NeutronException): + message = _("Unable to update the following object fields: %(fields)s") + + +class NeutronDbObjectDuplicateEntry(exceptions.Conflict): + message = _("Failed to create a duplicate %(object_type)s: " + "for attribute(s) %(attributes)s with value(s) %(values)s") + + def __init__(self, object_class, db_exception): + super(NeutronDbObjectDuplicateEntry, self).__init__( + object_type=reflection.get_class_name(object_class, + fully_qualified=False), + attributes=db_exception.columns, + values=db_exception.value) + + +def get_updatable_fields(cls, fields): + fields = fields.copy() + for field in cls.fields_no_update: + if field in fields: + del fields[field] + return fields + + +@six.add_metaclass(abc.ABCMeta) +class NeutronObject(obj_base.VersionedObject, + obj_base.VersionedObjectDictCompat, + obj_base.ComparableVersionedObject): + + synthetic_fields = [] + + def __init__(self, context=None, **kwargs): + super(NeutronObject, self).__init__(context, **kwargs) + self.obj_set_defaults() + + def to_dict(self): + return dict(self.items()) + + @classmethod + def clean_obj_from_primitive(cls, primitive, context=None): + obj = cls.obj_from_primitive(primitive, context) + obj.obj_reset_changes() + return obj + + @classmethod + def get_by_id(cls, context, id): + raise NotImplementedError() + + @classmethod + def validate_filters(cls, **kwargs): + bad_filters = [key for key in kwargs + if key not in cls.fields or key in cls.synthetic_fields] + if bad_filters: + bad_filters = ', '.join(bad_filters) + msg = _("'%s' is not supported for filtering") % bad_filters + raise exceptions.InvalidInput(error_message=msg) + + @classmethod + @abc.abstractmethod + def get_objects(cls, context, **kwargs): + raise NotImplementedError() + + def create(self): + raise NotImplementedError() + + def update(self): + raise NotImplementedError() + + def delete(self): + raise NotImplementedError() + + +class NeutronDbObject(NeutronObject): + + # should be overridden for all persistent objects + db_model = None + + fields_no_update = [] + + def from_db_object(self, *objs): + for field in self.fields: + for db_obj in objs: + if field in db_obj: + setattr(self, field, db_obj[field]) + break + self.obj_reset_changes() + + @classmethod + def get_by_id(cls, context, id): + db_obj = db_api.get_object(context, cls.db_model, id=id) + if db_obj: + obj = cls(context, **db_obj) + obj.obj_reset_changes() + return obj + + @classmethod + def get_objects(cls, context, **kwargs): + cls.validate_filters(**kwargs) + db_objs = db_api.get_objects(context, cls.db_model, **kwargs) + objs = [cls(context, **db_obj) for db_obj in db_objs] + for obj in objs: + obj.obj_reset_changes() + return objs + + def _get_changed_persistent_fields(self): + fields = self.obj_get_changes() + for field in self.synthetic_fields: + if field in fields: + del fields[field] + return fields + + def _validate_changed_fields(self, fields): + fields = fields.copy() + # We won't allow id update anyway, so let's pop it out not to trigger + # update on id field touched by the consumer + fields.pop('id', None) + + forbidden_updates = set(self.fields_no_update) & set(fields.keys()) + if forbidden_updates: + raise NeutronObjectUpdateForbidden(fields=forbidden_updates) + + return fields + + def create(self): + fields = self._get_changed_persistent_fields() + try: + db_obj = db_api.create_object(self._context, self.db_model, fields) + except obj_exc.DBDuplicateEntry as db_exc: + raise NeutronDbObjectDuplicateEntry(object_class=self.__class__, + db_exception=db_exc) + + self.from_db_object(db_obj) + + def update(self): + updates = self._get_changed_persistent_fields() + updates = self._validate_changed_fields(updates) + + if updates: + db_obj = db_api.update_object(self._context, self.db_model, + self.id, updates) + self.from_db_object(self, db_obj) + + def delete(self): + db_api.delete_object(self._context, self.db_model, self.id) diff --git a/neutron/plugins/nec/db/__init__.py b/neutron/objects/qos/__init__.py similarity index 100% rename from neutron/plugins/nec/db/__init__.py rename to neutron/objects/qos/__init__.py diff --git a/neutron/objects/qos/policy.py b/neutron/objects/qos/policy.py new file mode 100644 index 00000000000..258512221fe --- /dev/null +++ b/neutron/objects/qos/policy.py @@ -0,0 +1,163 @@ +# Copyright 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields + +from neutron.common import exceptions +from neutron.db import api as db_api +from neutron.db.qos import api as qos_db_api +from neutron.db.qos import models as qos_db_model +from neutron.objects import base +from neutron.objects.qos import rule as rule_obj_impl + + +@obj_base.VersionedObjectRegistry.register +class QosPolicy(base.NeutronDbObject): + + db_model = qos_db_model.QosPolicy + + port_binding_model = qos_db_model.QosPortPolicyBinding + network_binding_model = qos_db_model.QosNetworkPolicyBinding + + fields = { + 'id': obj_fields.UUIDField(), + 'tenant_id': obj_fields.UUIDField(), + 'name': obj_fields.StringField(), + 'description': obj_fields.StringField(), + 'shared': obj_fields.BooleanField(default=False), + 'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True), + } + + fields_no_update = ['id', 'tenant_id'] + + synthetic_fields = ['rules'] + + def to_dict(self): + dict_ = super(QosPolicy, self).to_dict() + if 'rules' in dict_: + dict_['rules'] = [rule.to_dict() for rule in dict_['rules']] + return dict_ + + def obj_load_attr(self, attrname): + if attrname != 'rules': + raise exceptions.ObjectActionError( + action='obj_load_attr', reason='unable to load %s' % attrname) + + if not hasattr(self, attrname): + self.reload_rules() + + def reload_rules(self): + rules = rule_obj_impl.get_rules(self._context, self.id) + setattr(self, 'rules', rules) + self.obj_reset_changes(['rules']) + + @staticmethod + def _is_policy_accessible(context, db_obj): + #TODO(QoS): Look at I3426b13eede8bfa29729cf3efea3419fb91175c4 for + # other possible solutions to this. + return (context.is_admin or + db_obj.shared or + db_obj.tenant_id == context.tenant_id) + + @classmethod + def get_by_id(cls, context, id): + # We want to get the policy regardless of its tenant id. We'll make + # sure the tenant has permission to access the policy later on. + admin_context = context.elevated() + with db_api.autonested_transaction(admin_context.session): + policy_obj = super(QosPolicy, cls).get_by_id(admin_context, id) + if (not policy_obj or + not cls._is_policy_accessible(context, policy_obj)): + return + + policy_obj.reload_rules() + return policy_obj + + @classmethod + def get_objects(cls, context, **kwargs): + # We want to get the policy regardless of its tenant id. We'll make + # sure the tenant has permission to access the policy later on. + admin_context = context.elevated() + with db_api.autonested_transaction(admin_context.session): + objs = super(QosPolicy, cls).get_objects(admin_context, + **kwargs) + result = [] + for obj in objs: + if not cls._is_policy_accessible(context, obj): + continue + obj.reload_rules() + result.append(obj) + return result + + @classmethod + def _get_object_policy(cls, context, model, **kwargs): + with db_api.autonested_transaction(context.session): + binding_db_obj = db_api.get_object(context, model, **kwargs) + if binding_db_obj: + return cls.get_by_id(context, binding_db_obj['policy_id']) + + @classmethod + def get_network_policy(cls, context, network_id): + return cls._get_object_policy(context, cls.network_binding_model, + network_id=network_id) + + @classmethod + def get_port_policy(cls, context, port_id): + return cls._get_object_policy(context, cls.port_binding_model, + port_id=port_id) + + # TODO(QoS): Consider extending base to trigger registered methods for us + def create(self): + with db_api.autonested_transaction(self._context.session): + super(QosPolicy, self).create() + self.reload_rules() + + def delete(self): + models = ( + ('network', self.network_binding_model), + ('port', self.port_binding_model) + ) + with db_api.autonested_transaction(self._context.session): + for object_type, model in models: + binding_db_obj = db_api.get_object(self._context, model, + policy_id=self.id) + if binding_db_obj: + raise exceptions.QosPolicyInUse( + policy_id=self.id, + object_type=object_type, + object_id=binding_db_obj['%s_id' % object_type]) + + super(QosPolicy, self).delete() + + def attach_network(self, network_id): + qos_db_api.create_policy_network_binding(self._context, + policy_id=self.id, + network_id=network_id) + + def attach_port(self, port_id): + qos_db_api.create_policy_port_binding(self._context, + policy_id=self.id, + port_id=port_id) + + def detach_network(self, network_id): + qos_db_api.delete_policy_network_binding(self._context, + policy_id=self.id, + network_id=network_id) + + def detach_port(self, port_id): + qos_db_api.delete_policy_port_binding(self._context, + policy_id=self.id, + port_id=port_id) diff --git a/neutron/objects/qos/rule.py b/neutron/objects/qos/rule.py new file mode 100644 index 00000000000..4398c7004ee --- /dev/null +++ b/neutron/objects/qos/rule.py @@ -0,0 +1,71 @@ +# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import sys + +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields +import six + +from neutron.common import utils +from neutron.db import api as db_api +from neutron.db.qos import models as qos_db_model +from neutron.objects import base +from neutron.services.qos import qos_consts + + +def get_rules(context, qos_policy_id): + all_rules = [] + with db_api.autonested_transaction(context.session): + for rule_type in qos_consts.VALID_RULE_TYPES: + rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type) + rule_cls = getattr(sys.modules[__name__], rule_cls_name) + + rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id) + all_rules.extend(rules) + return all_rules + + +@six.add_metaclass(abc.ABCMeta) +class QosRule(base.NeutronDbObject): + + fields = { + 'id': obj_fields.UUIDField(), + 'qos_policy_id': obj_fields.UUIDField() + } + + fields_no_update = ['id', 'qos_policy_id'] + + # should be redefined in subclasses + rule_type = None + + def to_dict(self): + dict_ = super(QosRule, self).to_dict() + dict_['type'] = self.rule_type + return dict_ + + +@obj_base.VersionedObjectRegistry.register +class QosBandwidthLimitRule(QosRule): + + db_model = qos_db_model.QosBandwidthLimitRule + + fields = { + 'max_kbps': obj_fields.IntegerField(nullable=True), + 'max_burst_kbps': obj_fields.IntegerField(nullable=True) + } + + rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT diff --git a/neutron/objects/qos/rule_type.py b/neutron/objects/qos/rule_type.py new file mode 100644 index 00000000000..fb0754b9394 --- /dev/null +++ b/neutron/objects/qos/rule_type.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields + +from neutron import manager +from neutron.objects import base +from neutron.services.qos import qos_consts + + +class RuleTypeField(obj_fields.BaseEnumField): + + def __init__(self, **kwargs): + self.AUTO_TYPE = obj_fields.Enum( + valid_values=qos_consts.VALID_RULE_TYPES) + super(RuleTypeField, self).__init__(**kwargs) + + +@obj_base.VersionedObjectRegistry.register +class QosRuleType(base.NeutronObject): + + fields = { + 'type': RuleTypeField(), + } + + # we don't receive context because we don't need db access at all + @classmethod + def get_objects(cls, **kwargs): + cls.validate_filters(**kwargs) + core_plugin = manager.NeutronManager.get_plugin() + return [cls(type=type_) + for type_ in core_plugin.supported_qos_rule_types] diff --git a/neutron/plugins/cisco/l2device_plugin_base.py b/neutron/plugins/cisco/l2device_plugin_base.py index 56fd91f199d..660e5f8d05e 100644 --- a/neutron/plugins/cisco/l2device_plugin_base.py +++ b/neutron/plugins/cisco/l2device_plugin_base.py @@ -160,9 +160,9 @@ class L2DevicePluginBase(object): fn_obj = base.__dict__[method] if inspect.isfunction(fn_obj): abstract_fn_obj = cls.__dict__[method] - arg_count = fn_obj.func_code.co_argcount + arg_count = fn_obj.__code__.co_argcount expected_arg_count = \ - abstract_fn_obj.func_code.co_argcount + abstract_fn_obj.__code__.co_argcount method_ok = arg_count == expected_arg_count if method_ok: continue diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py index edf52f5932b..65a0fb3e55d 100644 --- a/neutron/plugins/common/constants.py +++ b/neutron/plugins/common/constants.py @@ -23,6 +23,7 @@ VPN = "VPN" METERING = "METERING" L3_ROUTER_NAT = "L3_ROUTER_NAT" FLAVORS = "FLAVORS" +QOS = "QOS" # Maps extension alias to service type EXT_TO_SERVICE_MAPPING = { @@ -33,7 +34,8 @@ EXT_TO_SERVICE_MAPPING = { 'vpnaas': VPN, 'metering': METERING, 'router': L3_ROUTER_NAT, - 'flavors': FLAVORS + 'flavors': FLAVORS, + 'qos': QOS, } # Service operation status constants @@ -54,6 +56,7 @@ ACTIVE_PENDING_STATUSES = ( # Network Type constants TYPE_FLAT = 'flat' +TYPE_GENEVE = 'geneve' TYPE_GRE = 'gre' TYPE_LOCAL = 'local' TYPE_VXLAN = 'vxlan' @@ -66,6 +69,10 @@ TYPE_NONE = 'none' MIN_VLAN_TAG = 1 MAX_VLAN_TAG = 4094 +# For Geneve Tunnel +MIN_GENEVE_VNI = 1 +MAX_GENEVE_VNI = 2 ** 24 - 1 + # For GRE Tunnel MIN_GRE_ID = 1 MAX_GRE_ID = 2 ** 32 - 1 @@ -76,5 +83,6 @@ MAX_VXLAN_VNI = 2 ** 24 - 1 VXLAN_UDP_PORT = 4789 # Network Type MTU overhead +GENEVE_ENCAP_MIN_OVERHEAD = 50 GRE_ENCAP_OVERHEAD = 42 VXLAN_ENCAP_OVERHEAD = 50 diff --git a/neutron/plugins/common/utils.py b/neutron/plugins/common/utils.py index 40ca2cffd35..61e7c164f24 100644 --- a/neutron/plugins/common/utils.py +++ b/neutron/plugins/common/utils.py @@ -16,6 +16,9 @@ Common utilities and helper functions for Openstack Networking Plugins. """ +import webob.exc + +from neutron.api.v2 import attributes from neutron.common import exceptions as n_exc from neutron.plugins.common import constants as p_const @@ -32,10 +35,15 @@ def is_valid_vxlan_vni(vni): return p_const.MIN_VXLAN_VNI <= vni <= p_const.MAX_VXLAN_VNI +def is_valid_geneve_vni(vni): + return p_const.MIN_GENEVE_VNI <= vni <= p_const.MAX_GENEVE_VNI + + def verify_tunnel_range(tunnel_range, tunnel_type): """Raise an exception for invalid tunnel range or malformed range.""" mappings = {p_const.TYPE_GRE: is_valid_gre_id, - p_const.TYPE_VXLAN: is_valid_vxlan_vni} + p_const.TYPE_VXLAN: is_valid_vxlan_vni, + p_const.TYPE_GENEVE: is_valid_geneve_vni} if tunnel_type in mappings: for ident in tunnel_range: if not mappings[tunnel_type](ident): @@ -96,3 +104,37 @@ def in_pending_status(status): return status in (p_const.PENDING_CREATE, p_const.PENDING_UPDATE, p_const.PENDING_DELETE) + + +def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[attr_name] + try: + attributes.populate_tenant_id(context, res_dict, attr_info, True) + attributes.verify_attributes(res_dict, attr_info) + except webob.exc.HTTPBadRequest as e: + # convert webob exception into ValueError as these functions are + # for internal use. webob exception doesn't make sense. + raise ValueError(e.detail) + attributes.fill_default_value(attr_info, res_dict, + check_allow_post=check_allow_post) + attributes.convert_value(attr_info, res_dict) + return res_dict + + +def create_network(core_plugin, context, net): + net_data = _fixup_res_dict(context, attributes.NETWORKS, + net.get('network', {})) + return core_plugin.create_network(context, {'network': net_data}) + + +def create_subnet(core_plugin, context, subnet): + subnet_data = _fixup_res_dict(context, attributes.SUBNETS, + subnet.get('subnet', {})) + return core_plugin.create_subnet(context, {'subnet': subnet_data}) + + +def create_port(core_plugin, context, port, check_allow_post=True): + port_data = _fixup_res_dict(context, attributes.PORTS, + port.get('port', {}), + check_allow_post=check_allow_post) + return core_plugin.create_port(context, {'port': port_data}) diff --git a/neutron/plugins/hyperv/agent/l2_agent.py b/neutron/plugins/hyperv/agent/l2_agent.py index 5b6a8f31dec..956e1ec38fa 100644 --- a/neutron/plugins/hyperv/agent/l2_agent.py +++ b/neutron/plugins/hyperv/agent/l2_agent.py @@ -29,6 +29,7 @@ from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context from neutron.i18n import _LE +from neutron.plugins.ml2.drivers.hyperv import constants as h_const LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -82,7 +83,7 @@ class HyperVNeutronAgent(hyperv_neutron_agent.HyperVNeutronAgentMixin): 'host': CONF.host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': configurations, - 'agent_type': n_const.AGENT_TYPE_HYPERV, + 'agent_type': h_const.AGENT_TYPE_HYPERV, 'start_flag': True} def _report_state(self): diff --git a/neutron/plugins/ibm/README b/neutron/plugins/ibm/README deleted file mode 100644 index 732fd777689..00000000000 --- a/neutron/plugins/ibm/README +++ /dev/null @@ -1,6 +0,0 @@ -IBM SDN-VE Neutron Plugin - -This plugin implements Neutron v2 APIs. - -For more details on how to use it please refer to the following page: -http://wiki.openstack.org/wiki/IBM-Neutron diff --git a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py deleted file mode 100644 index a9827c52e14..00000000000 --- a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -from oslo_service import loopingcall -import six - -from neutron.agent.common import ovs_lib -from neutron.agent.linux import ip_lib -from neutron.agent import rpc as agent_rpc -from neutron.common import config as common_config -from neutron.common import constants as n_const -from neutron.common import topics -from neutron.common import utils as n_utils -from neutron.i18n import _LE, _LI -from neutron import context -from neutron.plugins.ibm.common import constants - - -LOG = logging.getLogger(__name__) -cfg.CONF.import_group('SDNVE', 'neutron.plugins.ibm.common.config') -cfg.CONF.import_group('SDNVE_AGENT', 'neutron.plugins.ibm.common.config') - - -class SdnvePluginApi(agent_rpc.PluginApi): - - def sdnve_info(self, context, info): - cctxt = self.client.prepare() - return cctxt.call(context, 'sdnve_info', info=info) - - -class SdnveNeutronAgent(object): - - target = oslo_messaging.Target(version='1.1') - - def __init__(self, integ_br, interface_mappings, - info, polling_interval, - controller_ip, reset_br, out_of_band): - '''The agent initialization. - - Sets the following parameters and sets up the integration - bridge and physical interfaces if need be. - :param integ_br: name of the integration bridge. - :param interface_mappings: interfaces to physical networks. - :param info: local IP address of this hypervisor. - :param polling_interval: interval (secs) to poll DB. - :param controller_ip: Ip address of SDN-VE controller. - ''' - - super(SdnveNeutronAgent, self).__init__() - self.int_bridge_name = integ_br - self.controller_ip = controller_ip - self.interface_mappings = interface_mappings - self.polling_interval = polling_interval - self.info = info - self.reset_br = reset_br - self.out_of_band = out_of_band - - self.agent_state = { - 'binary': 'neutron-sdnve-agent', - 'host': cfg.CONF.host, - 'topic': n_const.L2_AGENT_TOPIC, - 'configurations': {'interface_mappings': interface_mappings, - 'reset_br': self.reset_br, - 'out_of_band': self.out_of_band, - 'controller_ip': self.controller_ip}, - 'agent_type': n_const.AGENT_TYPE_SDNVE, - 'start_flag': True} - - if self.int_bridge_name: - self.int_br = self.setup_integration_br(integ_br, reset_br, - out_of_band, - self.controller_ip) - self.setup_physical_interfaces(self.interface_mappings) - else: - self.int_br = None - - self.setup_rpc() - - def _report_state(self): - try: - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_LE("Failed reporting state!")) - - def setup_rpc(self): - if self.int_br: - mac = self.int_br.get_local_port_mac() - self.agent_id = '%s%s' % ('sdnve', (mac.replace(":", ""))) - else: - nameaddr = socket.gethostbyname(socket.gethostname()) - self.agent_id = '%s%s' % ('sdnve_', (nameaddr.replace(".", "_"))) - - self.topic = topics.AGENT - self.plugin_rpc = SdnvePluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - - self.context = context.get_admin_context_without_session() - self.endpoints = [self] - consumers = [[constants.INFO, topics.UPDATE]] - - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - if self.polling_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=self.polling_interval) - - # Plugin calls the agents through the following - def info_update(self, context, **kwargs): - LOG.debug("info_update received") - info = kwargs.get('info', {}) - new_controller = info.get('new_controller') - out_of_band = info.get('out_of_band') - if self.int_br and new_controller: - LOG.debug("info_update received. New controller " - "is to be set to: %s", new_controller) - self.int_br.set_controller(["tcp:" + new_controller]) - if out_of_band: - LOG.debug("info_update received. New controller " - "is set to be out of band") - self.int_br.set_db_attribute("Controller", - self.int_bridge_name, - "connection-mode", - "out-of-band") - - def setup_integration_br(self, bridge_name, reset_br, out_of_band, - controller_ip=None): - '''Sets up the integration bridge. - - Create the bridge and remove all existing flows if reset_br is True. - Otherwise, creates the bridge if not already existing. - :param bridge_name: the name of the integration bridge. - :param reset_br: A boolean to rest the bridge if True. - :param out_of_band: A boolean indicating controller is out of band. - :param controller_ip: IP address to use as the bridge controller. - :returns: the integration bridge - ''' - - int_br = ovs_lib.OVSBridge(bridge_name) - if reset_br: - int_br.reset_bridge() - int_br.remove_all_flows() - else: - int_br.create() - - # set the controller - if controller_ip: - int_br.set_controller(["tcp:" + controller_ip]) - if out_of_band: - int_br.set_db_attribute("Controller", bridge_name, - "connection-mode", "out-of-band") - - return int_br - - def setup_physical_interfaces(self, interface_mappings): - '''Sets up the physical network interfaces. - - Link physical interfaces to the integration bridge. - :param interface_mappings: map physical net names to interface names. - ''' - - for physical_network, interface in six.iteritems(interface_mappings): - LOG.info(_LI("Mapping physical network %(physical_network)s to " - "interface %(interface)s"), - {'physical_network': physical_network, - 'interface': interface}) - # Connect the physical interface to the bridge - if not ip_lib.device_exists(interface): - LOG.error(_LE("Interface %(interface)s for physical network " - "%(physical_network)s does not exist. Agent " - "terminated!"), - {'physical_network': physical_network, - 'interface': interface}) - raise SystemExit(1) - self.int_br.add_port(interface) - - def sdnve_info(self): - details = self.plugin_rpc.sdnve_info( - self.context, - {'info': self.info}) - return details - - def rpc_loop(self): - - while True: - start = time.time() - LOG.debug("Agent in the rpc loop.") - - # sleep till end of polling interval - elapsed = (time.time() - start) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.info(_LI("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - - def daemon_loop(self): - self.rpc_loop() - - -def create_agent_config_map(config): - interface_mappings = n_utils.parse_mappings( - config.SDNVE.interface_mappings) - - controller_ips = config.SDNVE.controller_ips - LOG.info(_LI("Controller IPs: %s"), controller_ips) - controller_ip = controller_ips[0] - - return { - 'integ_br': config.SDNVE.integration_bridge, - 'interface_mappings': interface_mappings, - 'controller_ip': controller_ip, - 'info': config.SDNVE.info, - 'polling_interval': config.SDNVE_AGENT.polling_interval, - 'reset_br': config.SDNVE.reset_bridge, - 'out_of_band': config.SDNVE.out_of_band} - - -def main(): - cfg.CONF.register_opts(ip_lib.OPTS) - common_config.init(sys.argv[1:]) - common_config.setup_logging() - - try: - agent_config = create_agent_config_map(cfg.CONF) - except ValueError as e: - LOG.exception(_LE("%s Agent terminated!"), e) - raise SystemExit(1) - - plugin = SdnveNeutronAgent(**agent_config) - - # Start everything. - LOG.info(_LI("Agent initialized successfully, now running... ")) - plugin.daemon_loop() diff --git a/neutron/plugins/ibm/common/config.py b/neutron/plugins/ibm/common/config.py deleted file mode 100644 index 73580bca763..00000000000 --- a/neutron/plugins/ibm/common/config.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg - - -DEFAULT_INTERFACE_MAPPINGS = [] -DEFAULT_CONTROLLER_IPS = ['127.0.0.1'] - -sdnve_opts = [ - cfg.BoolOpt('use_fake_controller', default=False, - help=_("Whether to use a fake controller.")), - cfg.StrOpt('base_url', default='/one/nb/v2/', - help=_("Base URL for SDN-VE controller REST API.")), - cfg.ListOpt('controller_ips', default=DEFAULT_CONTROLLER_IPS, - help=_("List of IP addresses of SDN-VE controller(s).")), - cfg.StrOpt('info', default='sdnve_info_string', - help=_("SDN-VE RPC subject.")), - cfg.StrOpt('port', default='8443', - help=_("SDN-VE controller port number.")), - cfg.StrOpt('format', default='json', - help=_("SDN-VE request/response format.")), - cfg.StrOpt('userid', default='admin', - help=_("SDN-VE administrator user ID.")), - cfg.StrOpt('password', default='admin', secret=True, - help=_("SDN-VE administrator password.")), - cfg.StrOpt('integration_bridge', - help=_("Integration bridge to use.")), - cfg.BoolOpt('reset_bridge', default=True, - help=_("Whether to reset the integration bridge before use.")), - cfg.BoolOpt('out_of_band', default=True, - help=_("Indicating if controller is out of band or not.")), - cfg.ListOpt('interface_mappings', - default=DEFAULT_INTERFACE_MAPPINGS, - help=_("List of : " - "mappings.")), - cfg.StrOpt('default_tenant_type', default='OVERLAY', - help=_("Tenant type: OVERLAY (default) or OF.")), - cfg.StrOpt('overlay_signature', default='SDNVE-OVERLAY', - help=_("The string in tenant description that indicates " - "the tenant is a OVERLAY tenant.")), - cfg.StrOpt('of_signature', default='SDNVE-OF', - help=_("The string in tenant description that indicates " - "the tenant is a OF tenant.")), -] - -sdnve_agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("Agent polling interval if necessary.")), - cfg.BoolOpt('rpc', default=True, - help=_("Whether to use rpc.")), - -] - - -cfg.CONF.register_opts(sdnve_opts, "SDNVE") -cfg.CONF.register_opts(sdnve_agent_opts, "SDNVE_AGENT") diff --git a/neutron/plugins/ibm/sdnve_api.py b/neutron/plugins/ibm/sdnve_api.py deleted file mode 100644 index 63546d30394..00000000000 --- a/neutron/plugins/ibm/sdnve_api.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from six.moves import http_client as httplib - -import httplib2 -from keystoneclient.v2_0 import client as keyclient -from oslo_config import cfg -from oslo_log import log as logging -from six.moves.urllib import parse - -from neutron.api.v2 import attributes -from neutron.common import utils -from neutron.i18n import _LE, _LI -from neutron.plugins.ibm.common import config # noqa -from neutron.plugins.ibm.common import constants -from neutron import wsgi - -LOG = logging.getLogger(__name__) - -SDNVE_VERSION = '2.0' -SDNVE_ACTION_PREFIX = '/sdnve' -SDNVE_RETRIES = 0 -SDNVE_RETRIY_INTERVAL = 1 -SDNVE_TENANT_TYPE_OVERLAY = u'DOVE' -SDNVE_URL = 'https://%s:%s%s' - - -class RequestHandler(object): - '''Handles processing requests to and responses from controller.''' - - def __init__(self, controller_ips=None, port=None, ssl=None, - base_url=None, userid=None, password=None, - timeout=10, formats=None): - '''Initializes the RequestHandler for communication with controller - - Following keyword arguments are used; if not specified, default - values are used. - :param port: Username for authentication. - :param timeout: Time out for http requests. - :param userid: User id for accessing controller. - :param password: Password for accessing the controller. - :param base_url: The base url for the controller. - :param controller_ips: List of controller IP addresses. - :param formats: Supported formats. - ''' - self.port = port or cfg.CONF.SDNVE.port - self.timeout = timeout - self._s_meta = None - self.connection = None - self.httpclient = httplib2.Http( - disable_ssl_certificate_validation=True) - self.cookie = None - - userid = userid or cfg.CONF.SDNVE.userid - password = password or cfg.CONF.SDNVE.password - if (userid and password): - self.httpclient.add_credentials(userid, password) - - self.base_url = base_url or cfg.CONF.SDNVE.base_url - self.controller_ips = controller_ips or cfg.CONF.SDNVE.controller_ips - - LOG.info(_LI("The IP addr of available SDN-VE controllers: %s"), - self.controller_ips) - self.controller_ip = self.controller_ips[0] - LOG.info(_LI("The SDN-VE controller IP address: %s"), - self.controller_ip) - - self.new_controller = False - self.format = formats or cfg.CONF.SDNVE.format - - self.version = SDNVE_VERSION - self.action_prefix = SDNVE_ACTION_PREFIX - self.retries = SDNVE_RETRIES - self.retry_interval = SDNVE_RETRIY_INTERVAL - - def serialize(self, data): - '''Serializes a dictionary with a single key.''' - - if isinstance(data, dict): - return wsgi.Serializer().serialize(data, self.content_type()) - elif data: - raise TypeError(_("unable to serialize object type: '%s'") % - type(data)) - - def deserialize(self, data, status_code): - '''Deserializes an xml or json string into a dictionary.''' - - # NOTE(mb): Temporary fix for backend controller requirement - data = data.replace("router_external", "router:external") - - if status_code == httplib.NO_CONTENT: - return data - try: - deserialized_data = wsgi.Serializer( - metadata=self._s_meta).deserialize(data, self.content_type()) - deserialized_data = deserialized_data['body'] - except Exception: - deserialized_data = data - - return deserialized_data - - def content_type(self, format=None): - '''Returns the mime-type for either 'xml' or 'json'.''' - - return 'application/%s' % (format or self.format) - - def delete(self, url, body=None, headers=None, params=None): - return self.do_request("DELETE", url, body=body, - headers=headers, params=params) - - def get(self, url, body=None, headers=None, params=None): - return self.do_request("GET", url, body=body, - headers=headers, params=params) - - def post(self, url, body=None, headers=None, params=None): - return self.do_request("POST", url, body=body, - headers=headers, params=params) - - def put(self, url, body=None, headers=None, params=None): - return self.do_request("PUT", url, body=body, - headers=headers, params=params) - - def do_request(self, method, url, body=None, headers=None, - params=None, connection_type=None): - - status_code = -1 - replybody_deserialized = '' - - if body: - body = self.serialize(body) - - self.headers = headers or {'Content-Type': self.content_type()} - if self.cookie: - self.headers['cookie'] = self.cookie - - if self.controller_ip != self.controller_ips[0]: - controllers = [self.controller_ip] - else: - controllers = [] - controllers.extend(self.controller_ips) - - for controller_ip in controllers: - serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url) - myurl = serverurl + url - if params and isinstance(params, dict): - myurl += '?' + parse.urlencode(params, doseq=1) - - try: - LOG.debug("Sending request to SDN-VE. url: " - "%(myurl)s method: %(method)s body: " - "%(body)s header: %(header)s ", - {'myurl': myurl, 'method': method, - 'body': body, 'header': self.headers}) - resp, replybody = self.httpclient.request( - myurl, method=method, body=body, headers=self.headers) - LOG.debug("Response recd from SDN-VE. resp: %(resp)s " - "body: %(body)s", - {'resp': resp.status, 'body': replybody}) - status_code = resp.status - - except Exception as e: - LOG.error(_LE("Error: Could not reach server: %(url)s " - "Exception: %(excp)s."), - {'url': myurl, 'excp': e}) - self.cookie = None - continue - - if status_code not in constants.HTTP_ACCEPTABLE: - LOG.debug("Error message: %(reply)s -- Status: %(status)s", - {'reply': replybody, 'status': status_code}) - else: - LOG.debug("Received response status: %s", status_code) - - if resp.get('set-cookie'): - self.cookie = resp['set-cookie'] - replybody_deserialized = self.deserialize( - replybody, - status_code) - LOG.debug("Deserialized body: %s", replybody_deserialized) - if controller_ip != self.controller_ip: - # bcast the change of controller - self.new_controller = True - self.controller_ip = controller_ip - - return (status_code, replybody_deserialized) - - return (httplib.REQUEST_TIMEOUT, 'Could not reach server(s)') - - -class Client(RequestHandler): - '''Client for SDNVE controller.''' - - def __init__(self): - '''Initialize a new SDNVE client.''' - super(Client, self).__init__() - - self.keystoneclient = KeystoneClient() - - resource_path = { - 'network': "ln/networks/", - 'subnet': "ln/subnets/", - 'port': "ln/ports/", - 'tenant': "ln/tenants/", - 'router': "ln/routers/", - 'floatingip': "ln/floatingips/", - } - - def process_request(self, body): - '''Processes requests according to requirements of controller.''' - if self.format == 'json': - body = dict( - (k.replace(':', '_'), v) for k, v in body.items() - if attributes.is_attr_set(v)) - return body - - def sdnve_list(self, resource, **params): - '''Fetches a list of resources.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_LI("Bad resource for forming a list request")) - return 0, '' - - return self.get(res, params=params) - - def sdnve_show(self, resource, specific, **params): - '''Fetches information of a certain resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_LI("Bad resource for forming a show request")) - return 0, '' - - return self.get(res + specific, params=params) - - def sdnve_create(self, resource, body): - '''Creates a new resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_LI("Bad resource for forming a create request")) - return 0, '' - - body = self.process_request(body) - status, data = self.post(res, body=body) - return (status, data) - - def sdnve_update(self, resource, specific, body=None): - '''Updates a resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_LI("Bad resource for forming a update request")) - return 0, '' - - body = self.process_request(body) - return self.put(res + specific, body=body) - - def sdnve_delete(self, resource, specific): - '''Deletes the specified resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_LI("Bad resource for forming a delete request")) - return 0, '' - - return self.delete(res + specific) - - def _tenant_id_conversion(self, osid): - return osid - - def sdnve_get_tenant_byid(self, os_tenant_id): - sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) - resp, content = self.sdnve_show('tenant', sdnve_tenant_id) - if resp in constants.HTTP_ACCEPTABLE: - tenant_id = content.get('id') - tenant_type = content.get('network_type') - if tenant_type == SDNVE_TENANT_TYPE_OVERLAY: - tenant_type = constants.TENANT_TYPE_OVERLAY - return tenant_id, tenant_type - return None, None - - def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None): - - if not os_tenant_id: - return - tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id) - if tenant_id: - if not network_type: - return tenant_id - if tenant_type != network_type: - LOG.info(_LI("Non matching tenant and network types: " - "%(ttype)s %(ntype)s"), - {'ttype': tenant_type, 'ntype': network_type}) - return - return tenant_id - - # Have to create a new tenant - sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) - if not network_type: - network_type = self.keystoneclient.get_tenant_type(os_tenant_id) - if network_type == constants.TENANT_TYPE_OVERLAY: - network_type = SDNVE_TENANT_TYPE_OVERLAY - - pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " + - self.keystoneclient.get_tenant_name(os_tenant_id)) - - res, content = self.sdnve_create('tenant', - {'id': sdnve_tenant_id, - 'name': os_tenant_id, - 'network_type': network_type, - 'description': pinn_desc}) - if res not in constants.HTTP_ACCEPTABLE: - return - - return sdnve_tenant_id - - def sdnve_get_controller(self): - if self.new_controller: - self.new_controller = False - return self.controller_ip - - -class KeystoneClient(object): - - def __init__(self, username=None, tenant_name=None, password=None, - auth_url=None): - - keystone_conf = cfg.CONF.keystone_authtoken - - username = username or keystone_conf.admin_user - tenant_name = tenant_name or keystone_conf.admin_tenant_name - password = password or keystone_conf.admin_password - # FIXME(ihrachys): plugins should not construct keystone URL - # from configuration file and should instead rely on service - # catalog contents - auth_url = auth_url or utils.get_keystone_url(keystone_conf) - - self.overlay_signature = cfg.CONF.SDNVE.overlay_signature - self.of_signature = cfg.CONF.SDNVE.of_signature - self.default_tenant_type = cfg.CONF.SDNVE.default_tenant_type - - self.client = keyclient.Client(username=username, - password=password, - tenant_name=tenant_name, - auth_url=auth_url) - - def get_tenant_byid(self, id): - - try: - return self.client.tenants.get(id) - except Exception: - LOG.exception(_LE("Did not find tenant: %r"), id) - - def get_tenant_type(self, id): - - tenant = self.get_tenant_byid(id) - if tenant: - description = tenant.description - if description: - if (description.find(self.overlay_signature) >= 0): - return constants.TENANT_TYPE_OVERLAY - if (description.find(self.of_signature) >= 0): - return constants.TENANT_TYPE_OF - return self.default_tenant_type - - def get_tenant_name(self, id): - - tenant = self.get_tenant_byid(id) - if tenant: - return tenant.name - return 'not found' diff --git a/neutron/plugins/ibm/sdnve_api_fake.py b/neutron/plugins/ibm/sdnve_api_fake.py deleted file mode 100644 index a6c0aeedfc8..00000000000 --- a/neutron/plugins/ibm/sdnve_api_fake.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from neutron.i18n import _LI -from neutron.plugins.ibm.common import constants - -LOG = logging.getLogger(__name__) - -HTTP_OK = 200 - - -class FakeClient(object): - - '''Fake Client for SDNVE controller.''' - - def __init__(self, **kwargs): - LOG.info(_LI('Fake SDNVE controller initialized')) - - def sdnve_list(self, resource, **_params): - LOG.info(_LI('Fake SDNVE controller: list')) - return (HTTP_OK, None) - - def sdnve_show(self, resource, specific, **_params): - LOG.info(_LI('Fake SDNVE controller: show')) - return (HTTP_OK, None) - - def sdnve_create(self, resource, body): - LOG.info(_LI('Fake SDNVE controller: create')) - return (HTTP_OK, None) - - def sdnve_update(self, resource, specific, body=None): - LOG.info(_LI('Fake SDNVE controller: update')) - return (HTTP_OK, None) - - def sdnve_delete(self, resource, specific): - LOG.info(_LI('Fake SDNVE controller: delete')) - return (HTTP_OK, None) - - def sdnve_get_tenant_byid(self, id): - LOG.info(_LI('Fake SDNVE controller: get tenant by id')) - return id, constants.TENANT_TYPE_OF - - def sdnve_check_and_create_tenant(self, id, network_type=None): - LOG.info(_LI('Fake SDNVE controller: check and create tenant')) - return id - - def sdnve_get_controller(self): - LOG.info(_LI('Fake SDNVE controller: get controller')) - return None diff --git a/neutron/plugins/ibm/sdnve_neutron_plugin.py b/neutron/plugins/ibm/sdnve_neutron_plugin.py deleted file mode 100644 index ac4ae1a3bc6..00000000000 --- a/neutron/plugins/ibm/sdnve_neutron_plugin.py +++ /dev/null @@ -1,678 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import functools - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -from oslo_utils import excutils - -from neutron.common import constants as n_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc as n_rpc -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import l3_gwmode_db -from neutron.db import portbindings_db -from neutron.extensions import portbindings -from neutron.i18n import _LE, _LI, _LW -from neutron.plugins.ibm.common import config # noqa -from neutron.plugins.ibm.common import constants -from neutron.plugins.ibm.common import exceptions as sdnve_exc -from neutron.plugins.ibm import sdnve_api as sdnve -from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake - -LOG = logging.getLogger(__name__) - - -class SdnveRpcCallbacks(object): - - def __init__(self, notifier): - self.notifier = notifier # used to notify the agent - - def sdnve_info(self, rpc_context, **kwargs): - '''Update new information.''' - info = kwargs.get('info') - # Notify all other listening agents - self.notifier.info_update(rpc_context, info) - return info - - -class AgentNotifierApi(object): - '''Agent side of the SDN-VE rpc API.''' - - def __init__(self, topic): - target = oslo_messaging.Target(topic=topic, version='1.0') - self.client = n_rpc.get_client(target) - self.topic_info_update = topics.get_topic_name(topic, - constants.INFO, - topics.UPDATE) - - def info_update(self, context, info): - cctxt = self.client.prepare(topic=self.topic_info_update, fanout=True) - cctxt.cast(context, 'info_update', info=info) - - -def _ha(func): - '''Supports the high availability feature of the controller.''' - - @functools.wraps(func) - def hawrapper(self, *args, **kwargs): - '''This wrapper sets the new controller if necessary - - When a controller is detected to be not responding, and a - new controller is chosen to be used in its place, this decorator - makes sure the existing integration bridges are set to point - to the new controller by calling the set_controller method. - ''' - ret_func = func(self, *args, **kwargs) - self.set_controller(args[0]) - return ret_func - return hawrapper - - -class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - portbindings_db.PortBindingMixin, - l3_gwmode_db.L3_NAT_db_mixin, - agents_db.AgentDbMixin, - ): - - ''' - Implement the Neutron abstractions using SDN-VE SDN Controller. - ''' - - __native_bulk_support = False - __native_pagination_support = False - __native_sorting_support = False - - supported_extension_aliases = ["binding", "router", "external-net", - "agent", "quotas"] - - def __init__(self, configfile=None): - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}} - - super(SdnvePluginV2, self).__init__() - self.setup_rpc() - self.sdnve_controller_select() - if self.fake_controller: - self.sdnve_client = sdnve_fake.FakeClient() - else: - self.sdnve_client = sdnve.Client() - - def sdnve_controller_select(self): - self.fake_controller = cfg.CONF.SDNVE.use_fake_controller - - def setup_rpc(self): - # RPC support - self.topic = topics.PLUGIN - self.conn = n_rpc.create_connection(new=True) - self.notifier = AgentNotifierApi(topics.AGENT) - self.endpoints = [SdnveRpcCallbacks(self.notifier), - agents_db.AgentExtRpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _update_base_binding_dict(self, tenant_type): - if tenant_type == constants.TENANT_TYPE_OVERLAY: - self.base_binding_dict[ - portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE - if tenant_type == constants.TENANT_TYPE_OF: - self.base_binding_dict[ - portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS - - def set_controller(self, context): - LOG.info(_LI("Set a new controller if needed.")) - new_controller = self.sdnve_client.sdnve_get_controller() - if new_controller: - self.notifier.info_update( - context, - {'new_controller': new_controller}) - LOG.info(_LI("Set the controller to a new controller: %s"), - new_controller) - - def _process_request(self, request, current): - new_request = dict( - (k, v) for k, v in request.items() - if v != current.get(k)) - - msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s") - LOG.debug(msg, {'orig': request, 'new': new_request}) - return new_request - - # - # Network - # - - @_ha - def create_network(self, context, network): - LOG.debug("Create network in progress: %r", network) - session = context.session - - tenant_id = self._get_tenant_id_for_create(context, network['network']) - # Create a new SDN-VE tenant if need be - sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( - tenant_id) - if sdnve_tenant is None: - raise sdnve_exc.SdnveException( - msg=_('Create net failed: no SDN-VE tenant.')) - - with session.begin(subtransactions=True): - net = super(SdnvePluginV2, self).create_network(context, network) - self._process_l3_create(context, net, network['network']) - - # Create SDN-VE network - (res, data) = self.sdnve_client.sdnve_create('network', net) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_network(context, net['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create net failed in SDN-VE: %s') % res)) - - LOG.debug("Created network: %s", net['id']) - return net - - @_ha - def update_network(self, context, id, network): - LOG.debug("Update network in progress: %r", network) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_network = super(SdnvePluginV2, self).get_network( - context, id) - processed_request['network'] = self._process_request( - network['network'], original_network) - net = super(SdnvePluginV2, self).update_network( - context, id, network) - self._process_l3_update(context, net, network['network']) - - if processed_request['network']: - (res, data) = self.sdnve_client.sdnve_update( - 'network', id, processed_request['network']) - if res not in constants.HTTP_ACCEPTABLE: - net = super(SdnvePluginV2, self).update_network( - context, id, {'network': original_network}) - raise sdnve_exc.SdnveException( - msg=(_('Update net failed in SDN-VE: %s') % res)) - - return net - - @_ha - def delete_network(self, context, id): - LOG.debug("Delete network in progress: %s", id) - session = context.session - - with session.begin(subtransactions=True): - self._process_l3_delete(context, id) - super(SdnvePluginV2, self).delete_network(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('network', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _LE("Delete net failed after deleting the network in DB: %s"), - res) - - @_ha - def get_network(self, context, id, fields=None): - LOG.debug("Get network in progress: %s", id) - return super(SdnvePluginV2, self).get_network(context, id, fields) - - @_ha - def get_networks(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - LOG.debug("Get networks in progress") - return super(SdnvePluginV2, self).get_networks( - context, filters, fields, sorts, limit, marker, page_reverse) - - # - # Port - # - - @_ha - def create_port(self, context, port): - LOG.debug("Create port in progress: %r", port) - session = context.session - - # Set port status as 'ACTIVE' to avoid needing the agent - port['port']['status'] = n_const.PORT_STATUS_ACTIVE - port_data = port['port'] - - with session.begin(subtransactions=True): - port = super(SdnvePluginV2, self).create_port(context, port) - if 'id' not in port: - return port - # If the tenant_id is set to '' by create_port, add the id to - # the request being sent to the controller as the controller - # requires a tenant id - tenant_id = port.get('tenant_id') - if not tenant_id: - LOG.debug("Create port does not have tenant id info") - original_network = super(SdnvePluginV2, self).get_network( - context, port['network_id']) - original_tenant_id = original_network['tenant_id'] - port['tenant_id'] = original_tenant_id - LOG.debug( - "Create port does not have tenant id info; " - "obtained is: %s", - port['tenant_id']) - - os_tenant_id = tenant_id - id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( - os_tenant_id) - self._update_base_binding_dict(tenant_type) - self._process_portbindings_create_and_update(context, - port_data, port) - - # NOTE(mb): Remove this block when controller is updated - # Remove the information that the controller does not accept - sdnve_port = port.copy() - sdnve_port.pop('device_id', None) - sdnve_port.pop('device_owner', None) - - (res, data) = self.sdnve_client.sdnve_create('port', sdnve_port) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_port(context, port['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create port failed in SDN-VE: %s') % res)) - - LOG.debug("Created port: %s", port.get('id', 'id not found')) - return port - - @_ha - def update_port(self, context, id, port): - LOG.debug("Update port in progress: %r", port) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_port = super(SdnvePluginV2, self).get_port( - context, id) - processed_request['port'] = self._process_request( - port['port'], original_port) - updated_port = super(SdnvePluginV2, self).update_port( - context, id, port) - - os_tenant_id = updated_port['tenant_id'] - id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( - os_tenant_id) - self._update_base_binding_dict(tenant_type) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - - if processed_request['port']: - (res, data) = self.sdnve_client.sdnve_update( - 'port', id, processed_request['port']) - if res not in constants.HTTP_ACCEPTABLE: - updated_port = super(SdnvePluginV2, self).update_port( - context, id, {'port': original_port}) - raise sdnve_exc.SdnveException( - msg=(_('Update port failed in SDN-VE: %s') % res)) - - return updated_port - - @_ha - def delete_port(self, context, id, l3_port_check=True): - LOG.debug("Delete port in progress: %s", id) - - # if needed, check to see if this is a port owned by - # an l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - self.disassociate_floatingips(context, id) - - super(SdnvePluginV2, self).delete_port(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('port', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _LE("Delete port operation failed in SDN-VE " - "after deleting the port from DB: %s"), res) - - # - # Subnet - # - - @_ha - def create_subnet(self, context, subnet): - LOG.debug("Create subnet in progress: %r", subnet) - new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet) - - # Note(mb): Use of null string currently required by controller - sdnve_subnet = new_subnet.copy() - if subnet.get('gateway_ip') is None: - sdnve_subnet['gateway_ip'] = 'null' - (res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_subnet(context, - new_subnet['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create subnet failed in SDN-VE: %s') % res)) - - LOG.debug("Subnet created: %s", new_subnet['id']) - - return new_subnet - - @_ha - def update_subnet(self, context, id, subnet): - LOG.debug("Update subnet in progress: %r", subnet) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_subnet = super(SdnvePluginV2, self).get_subnet( - context, id) - processed_request['subnet'] = self._process_request( - subnet['subnet'], original_subnet) - updated_subnet = super(SdnvePluginV2, self).update_subnet( - context, id, subnet) - - if processed_request['subnet']: - # Note(mb): Use of string containing null required by controller - if 'gateway_ip' in processed_request['subnet']: - if processed_request['subnet'].get('gateway_ip') is None: - processed_request['subnet']['gateway_ip'] = 'null' - (res, data) = self.sdnve_client.sdnve_update( - 'subnet', id, processed_request['subnet']) - if res not in constants.HTTP_ACCEPTABLE: - for key in subnet['subnet'].keys(): - subnet['subnet'][key] = original_subnet[key] - super(SdnvePluginV2, self).update_subnet( - context, id, subnet) - raise sdnve_exc.SdnveException( - msg=(_('Update subnet failed in SDN-VE: %s') % res)) - - return updated_subnet - - @_ha - def delete_subnet(self, context, id): - LOG.debug("Delete subnet in progress: %s", id) - super(SdnvePluginV2, self).delete_subnet(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('subnet', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_LE("Delete subnet operation failed in SDN-VE after " - "deleting the subnet from DB: %s"), res) - - # - # Router - # - - @_ha - def create_router(self, context, router): - LOG.debug("Create router in progress: %r", router) - - if router['router']['admin_state_up'] is False: - LOG.warning(_LW('Ignoring admin_state_up=False for router=%r. ' - 'Overriding with True'), router) - router['router']['admin_state_up'] = True - - tenant_id = self._get_tenant_id_for_create(context, router['router']) - # Create a new SDN-VE tenant if need be - sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( - tenant_id) - if sdnve_tenant is None: - raise sdnve_exc.SdnveException( - msg=_('Create router failed: no SDN-VE tenant.')) - - new_router = super(SdnvePluginV2, self).create_router(context, router) - # Create SDN-VE router - (res, data) = self.sdnve_client.sdnve_create('router', new_router) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_router(context, new_router['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create router failed in SDN-VE: %s') % res)) - - LOG.debug("Router created: %r", new_router) - return new_router - - @_ha - def update_router(self, context, id, router): - LOG.debug("Update router in progress: id=%(id)s " - "router=%(router)r", - {'id': id, 'router': router}) - session = context.session - - processed_request = {} - if not router['router'].get('admin_state_up', True): - raise n_exc.NotImplementedError(_('admin_state_up=False ' - 'routers are not ' - 'supported.')) - - with session.begin(subtransactions=True): - original_router = super(SdnvePluginV2, self).get_router( - context, id) - processed_request['router'] = self._process_request( - router['router'], original_router) - updated_router = super(SdnvePluginV2, self).update_router( - context, id, router) - - if processed_request['router']: - egw = processed_request['router'].get('external_gateway_info') - # Check for existing empty set (different from None) in request - if egw == {}: - processed_request['router'][ - 'external_gateway_info'] = {'network_id': 'null'} - (res, data) = self.sdnve_client.sdnve_update( - 'router', id, processed_request['router']) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).update_router( - context, id, {'router': original_router}) - raise sdnve_exc.SdnveException( - msg=(_('Update router failed in SDN-VE: %s') % res)) - - return updated_router - - @_ha - def delete_router(self, context, id): - LOG.debug("Delete router in progress: %s", id) - - super(SdnvePluginV2, self).delete_router(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('router', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _LE("Delete router operation failed in SDN-VE after " - "deleting the router in DB: %s"), res) - - @_ha - def add_router_interface(self, context, router_id, interface_info): - LOG.debug("Add router interface in progress: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r", - {'router_id': router_id, 'interface_info': interface_info}) - - new_interface = super(SdnvePluginV2, self).add_router_interface( - context, router_id, interface_info) - LOG.debug( - "SdnvePluginV2.add_router_interface called. Port info: %s", - new_interface) - request_info = interface_info.copy() - request_info['port_id'] = new_interface['port_id'] - # Add the subnet_id to the request sent to the controller - if 'subnet_id' not in interface_info: - request_info['subnet_id'] = new_interface['subnet_id'] - - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/add_router_interface', request_info) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).remove_router_interface( - context, router_id, interface_info) - raise sdnve_exc.SdnveException( - msg=(_('Update router-add-interface failed in SDN-VE: %s') % - res)) - - LOG.debug("Added router interface: %r", new_interface) - return new_interface - - def _add_router_interface_only(self, context, router_id, interface_info): - LOG.debug("Add router interface only called: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r", - {'router_id': router_id, 'interface_info': interface_info}) - - port_id = interface_info.get('port_id') - if port_id: - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/add_router_interface', interface_info) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_LE("SdnvePluginV2._add_router_interface_only: " - "failed to add the interface in the roll back." - " of a remove_router_interface operation")) - - def _find_router_port_by_subnet_id(self, ports, subnet_id): - for p in ports: - subnet_ids = [fip['subnet_id'] for fip in p['fixed_ips']] - if subnet_id in subnet_ids: - return p['id'] - - @_ha - def remove_router_interface(self, context, router_id, interface_info): - LOG.debug("Remove router interface in progress: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r", - {'router_id': router_id, 'interface_info': interface_info}) - - subnet_id = interface_info.get('subnet_id') - port_id = interface_info.get('port_id') - if not subnet_id: - if not port_id: - raise sdnve_exc.BadInputException(msg=_('No port ID')) - myport = super(SdnvePluginV2, self).get_port(context, port_id) - LOG.debug("SdnvePluginV2.remove_router_interface port: %s", - myport) - myfixed_ips = myport.get('fixed_ips') - if not myfixed_ips: - raise sdnve_exc.BadInputException(msg=_('No fixed IP')) - subnet_id = myfixed_ips[0].get('subnet_id') - if subnet_id: - interface_info['subnet_id'] = subnet_id - LOG.debug( - "SdnvePluginV2.remove_router_interface subnet_id: %s", - subnet_id) - else: - if not port_id: - # The backend requires port id info in the request - subnet = super(SdnvePluginV2, self).get_subnet(context, - subnet_id) - df = {'device_id': [router_id], - 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF], - 'network_id': [subnet['network_id']]} - ports = self.get_ports(context, filters=df) - if ports: - pid = self._find_router_port_by_subnet_id(ports, subnet_id) - if not pid: - raise sdnve_exc.SdnveException( - msg=(_('Update router-remove-interface ' - 'failed SDN-VE: subnet %(sid) is not ' - 'associated with any ports on router ' - '%(rid)'), {'sid': subnet_id, - 'rid': router_id})) - interface_info['port_id'] = pid - msg = ("SdnvePluginV2.remove_router_interface " - "subnet_id: %(sid)s port_id: %(pid)s") - LOG.debug(msg, {'sid': subnet_id, 'pid': pid}) - - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/remove_router_interface', interface_info) - - if res not in constants.HTTP_ACCEPTABLE: - raise sdnve_exc.SdnveException( - msg=(_('Update router-remove-interface failed SDN-VE: %s') % - res)) - - session = context.session - with session.begin(subtransactions=True): - try: - if not port_id: - # port_id was not originally given in interface_info, - # so we want to remove the interface by subnet instead - # of port - del interface_info['port_id'] - info = super(SdnvePluginV2, self).remove_router_interface( - context, router_id, interface_info) - except Exception: - with excutils.save_and_reraise_exception(): - self._add_router_interface_only(context, - router_id, interface_info) - - return info - - # - # Floating Ip - # - - @_ha - def create_floatingip(self, context, floatingip): - LOG.debug("Create floatingip in progress: %r", - floatingip) - new_floatingip = super(SdnvePluginV2, self).create_floatingip( - context, floatingip) - - (res, data) = self.sdnve_client.sdnve_create( - 'floatingip', {'floatingip': new_floatingip}) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_floatingip( - context, new_floatingip['id']) - raise sdnve_exc.SdnveException( - msg=(_('Creating floating ip operation failed ' - 'in SDN-VE controller: %s') % res)) - - LOG.debug("Created floatingip : %r", new_floatingip) - return new_floatingip - - @_ha - def update_floatingip(self, context, id, floatingip): - LOG.debug("Update floatingip in progress: %r", floatingip) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_floatingip = super( - SdnvePluginV2, self).get_floatingip(context, id) - processed_request['floatingip'] = self._process_request( - floatingip['floatingip'], original_floatingip) - updated_floatingip = super( - SdnvePluginV2, self).update_floatingip(context, id, floatingip) - - if processed_request['floatingip']: - (res, data) = self.sdnve_client.sdnve_update( - 'floatingip', id, - {'floatingip': processed_request['floatingip']}) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).update_floatingip( - context, id, {'floatingip': original_floatingip}) - raise sdnve_exc.SdnveException( - msg=(_('Update floating ip failed in SDN-VE: %s') % res)) - - return updated_floatingip - - @_ha - def delete_floatingip(self, context, id): - LOG.debug("Delete floatingip in progress: %s", id) - super(SdnvePluginV2, self).delete_floatingip(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('floatingip', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_LE("Delete floatingip failed in SDN-VE: %s"), res) diff --git a/neutron/plugins/ml2/config.py b/neutron/plugins/ml2/config.py index 3eb3b2bd4a0..a248c1ceb80 100644 --- a/neutron/plugins/ml2/config.py +++ b/neutron/plugins/ml2/config.py @@ -18,7 +18,7 @@ from oslo_config import cfg ml2_opts = [ cfg.ListOpt('type_drivers', - default=['local', 'flat', 'vlan', 'gre', 'vxlan'], + default=['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve'], help=_("List of network type driver entrypoints to be loaded " "from the neutron.ml2.type_drivers namespace.")), cfg.ListOpt('tenant_network_types', diff --git a/neutron/plugins/ml2/driver_api.py b/neutron/plugins/ml2/driver_api.py index 3284832beeb..c54ab1ba35a 100644 --- a/neutron/plugins/ml2/driver_api.py +++ b/neutron/plugins/ml2/driver_api.py @@ -911,12 +911,14 @@ class ExtensionDriver(object): """ pass - @abc.abstractproperty + @property def extension_alias(self): """Supported extension alias. Return the alias identifying the core API extension supported - by this driver. + by this driver. Do not declare if API extension handling will + be left to a service plugin, and we just need to provide + core resource extension and updates. """ pass diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py deleted file mode 100644 index 045499ce326..00000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 - - -class NexusPortBinding(model_base.BASEV2): - """Represents a binding of VM's to nexus ports.""" - - __tablename__ = "cisco_ml2_nexusport_bindings" - - binding_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - port_id = sa.Column(sa.String(255)) - vlan_id = sa.Column(sa.Integer, nullable=False) - vni = sa.Column(sa.Integer) - switch_ip = sa.Column(sa.String(255)) - instance_id = sa.Column(sa.String(255)) - is_provider_vlan = sa.Column(sa.Boolean(), nullable=False, default=False, - server_default=sa.sql.false()) - - def __repr__(self): - """Just the binding, without the id key.""" - return ("" % - (self.port_id, self.vlan_id, self.vni, self.switch_ip, - self.instance_id, - 'True' if self.is_provider_vlan else 'False')) - - def __eq__(self, other): - """Compare only the binding, without the id key.""" - return ( - self.port_id == other.port_id and - self.vlan_id == other.vlan_id and - self.vni == other.vni and - self.switch_ip == other.switch_ip and - self.instance_id == other.instance_id and - self.is_provider_vlan == other.is_provider_vlan - ) - - -class NexusNVEBinding(model_base.BASEV2): - """Represents Network Virtualization Endpoint configuration.""" - - __tablename__ = "cisco_ml2_nexus_nve" - - vni = sa.Column(sa.Integer, primary_key=True, nullable=False) - device_id = sa.Column(sa.String(255), primary_key=True) - switch_ip = sa.Column(sa.String(255), primary_key=True) - mcast_group = sa.Column(sa.String(255)) - - def __repr__(self): - return ("" % - (self.vni, self.switch_ip, self.device_id, self.mcast_group)) - - -class NexusVxlanAllocation(model_base.BASEV2): - - __tablename__ = 'ml2_nexus_vxlan_allocations' - - vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False, - server_default=sa.sql.false()) - - -class NexusMcastGroup(model_base.BASEV2, models_v2.HasId): - - __tablename__ = 'ml2_nexus_vxlan_mcast_groups' - - mcast_group = sa.Column(sa.String(64), nullable=False) - associated_vni = sa.Column(sa.Integer, - sa.ForeignKey( - 'ml2_nexus_vxlan_allocations.vxlan_vni', - ondelete="CASCADE"), - nullable=False) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/requirements.txt b/neutron/plugins/ml2/drivers/cisco/nexus/requirements.txt deleted file mode 100644 index ef631a3f2b9..00000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -networking-cisco diff --git a/neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py b/neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py deleted file mode 100644 index 8f88966bd6d..00000000000 --- a/neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2015 Cisco Systems, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const -from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_db -from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_network_driver - -from oslo_log import log as logging - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.i18n import _LE, _LW -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api - -LOG = logging.getLogger(__name__) - - -class CiscoUcsmMechanismDriver(api.MechanismDriver): - - """ML2 Mechanism Driver for Cisco UCS Manager.""" - - def initialize(self): - self.vif_type = portbindings.VIF_TYPE_802_QBH - self.vif_details = {portbindings.CAP_PORT_FILTER: False} - self.driver = ucsm_network_driver.CiscoUcsmDriver() - self.ucsm_db = ucsm_db.UcsmDbModel() - - def _get_vlanid(self, context): - """Returns vlan_id associated with a bound VLAN segment.""" - segment = context.bottom_bound_segment - if segment and self.check_segment(segment): - return segment.get(api.SEGMENTATION_ID) - - def update_port_precommit(self, context): - """Adds port profile and vlan information to the DB. - - Assign a port profile to this port. To do that: - 1. Get the vlan_id associated with the bound segment - 2. Check if a port profile already exists for this vlan_id - 3. If yes, associate that port profile with this port. - 4. If no, create a new port profile with this vlan_id and - associate with this port - """ - LOG.debug("Inside update_port_precommit") - vnic_type = context.current.get(portbindings.VNIC_TYPE, - portbindings.VNIC_NORMAL) - - profile = context.current.get(portbindings.PROFILE, {}) - - if not self.driver.check_vnic_type_and_vendor_info(vnic_type, - profile): - LOG.debug("update_port_precommit encountered a non-SR-IOV port") - return - - # If this is an Intel SR-IOV vnic, then no need to create port - # profile on the UCS manager. So no need to update the DB. - if not self.driver.is_vmfex_port(profile): - LOG.debug("update_port_precommit has nothing to do for this " - "sr-iov port") - return - - vlan_id = self._get_vlanid(context) - - if not vlan_id: - LOG.warn(_LW("update_port_precommit: vlan_id is None.")) - return - - p_profile_name = self.make_profile_name(vlan_id) - LOG.debug("update_port_precommit: Profile: %s, VLAN_id: %d", - p_profile_name, vlan_id) - - # Create a new port profile entry in the db - self.ucsm_db.add_port_profile(p_profile_name, vlan_id) - - def update_port_postcommit(self, context): - """Creates a port profile on UCS Manager. - - Creates a Port Profile for this VLAN if it does not already - exist. - """ - LOG.debug("Inside update_port_postcommit") - vlan_id = self._get_vlanid(context) - - if not vlan_id: - LOG.warn(_LW("update_port_postcommit: vlan_id is None.")) - return - - # Check if UCS Manager needs to create a Port Profile. - # 1. Make sure this is a vm_fex_port.(Port profiles are created - # only for VM-FEX ports.) - # 2. Make sure update_port_precommit added an entry in the DB - # for this port profile - # 3. Make sure that the Port Profile hasn't already been created. - - profile = context.current.get(portbindings.PROFILE, {}) - vnic_type = context.current.get(portbindings.VNIC_TYPE, - portbindings.VNIC_NORMAL) - - if (self.driver.check_vnic_type_and_vendor_info(vnic_type, profile) and - self.driver.is_vmfex_port(profile)): - - LOG.debug("update_port_postcommit: VM-FEX port updated for " - "vlan_id %d", vlan_id) - - profile_name = self.ucsm_db.get_port_profile_for_vlan(vlan_id) - if self.ucsm_db.is_port_profile_created(vlan_id): - LOG.debug("update_port_postcommit: Port Profile %s for " - "vlan_id %d already exists. Nothing to do.", - profile_name, vlan_id) - return - - # Ask the UCS Manager driver to create the above Port Profile. - # Connection to the UCS Manager is managed from within the driver. - if self.driver.create_portprofile(profile_name, vlan_id, - vnic_type): - # Port profile created on UCS, record that in the DB. - self.ucsm_db.set_port_profile_created(vlan_id, profile_name) - return - - else: - # Enable vlan-id for this regular Neutron virtual port. - host_id = context.current.get(portbindings.HOST_ID) - LOG.debug("update_port_postcommit: Host_id is %s", host_id) - self.driver.update_serviceprofile(host_id, vlan_id) - - def delete_network_precommit(self, context): - """Delete entry corresponding to Network's VLAN in the DB.""" - - segments = context.network_segments - vlan_id = segments[0]['segmentation_id'] - - if vlan_id: - self.ucsm_db.delete_vlan_entry(vlan_id) - - def delete_network_postcommit(self, context): - """Delete all configuration added to UCS Manager for the vlan_id.""" - - segments = context.network_segments - vlan_id = segments[0]['segmentation_id'] - port_profile = self.make_profile_name(vlan_id) - - if vlan_id: - self.driver.delete_all_config_for_vlan(vlan_id, port_profile) - - def bind_port(self, context): - """Binds port to current network segment. - - Binds port only if the vnic_type is direct or macvtap and - the port is from a supported vendor. While binding port set it - in ACTIVE state and provide the Port Profile or Vlan Id as part - vif_details. - """ - vnic_type = context.current.get(portbindings.VNIC_TYPE, - portbindings.VNIC_NORMAL) - - LOG.debug("Attempting to bind port %(port)s with vnic_type " - "%(vnic_type)s on network %(network)s", - {'port': context.current['id'], - 'vnic_type': vnic_type, - 'network': context.network.current['id']}) - - profile = context.current.get(portbindings.PROFILE, {}) - - if not self.driver.check_vnic_type_and_vendor_info(vnic_type, - profile): - return - - for segment in context.network.network_segments: - if self.check_segment(segment): - vlan_id = segment[api.SEGMENTATION_ID] - - if not vlan_id: - LOG.warn(_LW("Bind port: vlan_id is None.")) - return - - LOG.debug("Port binding to Vlan_id: %s", str(vlan_id)) - - # Check if this is a Cisco VM-FEX port or Intel SR_IOV port - if self.driver.is_vmfex_port(profile): - profile_name = self.make_profile_name(vlan_id) - self.vif_details[ - const.VIF_DETAILS_PROFILEID] = profile_name - else: - self.vif_details[ - portbindings.VIF_DETAILS_VLAN] = str(vlan_id) - - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details, - constants.PORT_STATUS_ACTIVE) - return - - LOG.error(_LE("UCS Mech Driver: Failed binding port ID %(id)s " - "on any segment of network %(network)s"), - {'id': context.current['id'], - 'network': context.network.current['id']}) - - @staticmethod - def check_segment(segment): - network_type = segment[api.NETWORK_TYPE] - return network_type == p_const.TYPE_VLAN - - @staticmethod - def make_profile_name(vlan_id): - return const.PORT_PROFILE_NAME_PREFIX + str(vlan_id) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/type_nexus_vxlan.py b/neutron/plugins/ml2/drivers/hyperv/constants.py similarity index 75% rename from neutron/plugins/ml2/drivers/cisco/nexus/type_nexus_vxlan.py rename to neutron/plugins/ml2/drivers/hyperv/constants.py index 933b1e31798..2783ac4c269 100644 --- a/neutron/plugins/ml2/drivers/cisco/nexus/type_nexus_vxlan.py +++ b/neutron/plugins/ml2/drivers/hyperv/constants.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015 Cisco Systems Inc. +# Copyright (c) 2015 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -12,10 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -from networking_cisco.plugins.ml2.drivers.cisco.nexus import type_nexus_vxlan - - -class NexusVxlanTypeDriver(type_nexus_vxlan.NexusVxlanTypeDriver): - pass +AGENT_TYPE_HYPERV = 'HyperV agent' +VIF_TYPE_HYPERV = 'hyperv' diff --git a/neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py b/neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py index 0fa888c6d18..704d91829da 100644 --- a/neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py +++ b/neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py @@ -15,8 +15,8 @@ from hyperv.neutron.ml2 import mech_hyperv -from neutron.common import constants from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers.hyperv import constants as constants from neutron.plugins.ml2.drivers import mech_agent @@ -33,5 +33,5 @@ class HypervMechanismDriver(mech_hyperv.HypervMechanismDriver, def __init__(self): super(HypervMechanismDriver, self).__init__( constants.AGENT_TYPE_HYPERV, - portbindings.VIF_TYPE_HYPERV, + constants.VIF_TYPE_HYPERV, {portbindings.CAP_PORT_FILTER: False}) diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py index 5420e827a7c..2d9c94ac3ba 100644 --- a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -50,7 +50,7 @@ class L2populationMechanismDriver(api.MechanismDriver, port = context.current agent_host = context.host - fdb_entries = self._update_port_down(context, port, agent_host) + fdb_entries = self._get_agent_fdb(context, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries) @@ -99,7 +99,7 @@ class L2populationMechanismDriver(api.MechanismDriver, return True - def update_port_postcommit(self, context): + def update_port_precommit(self, context): port = context.current orig = context.original @@ -107,7 +107,12 @@ class L2populationMechanismDriver(api.MechanismDriver, context.status == const.PORT_STATUS_ACTIVE): LOG.warning(_LW("unable to modify mac_address of ACTIVE port " "%s"), port['id']) - raise ml2_exc.MechanismDriverError(method='update_port_postcommit') + raise ml2_exc.MechanismDriverError(method='update_port_precommit') + + def update_port_postcommit(self, context): + port = context.current + orig = context.original + diff_ips = self._get_diff_ips(orig, port) if diff_ips: self._fixed_ips_changed(context, orig, port, diff_ips) @@ -116,7 +121,7 @@ class L2populationMechanismDriver(api.MechanismDriver, self._update_port_up(context) if context.status == const.PORT_STATUS_DOWN: agent_host = context.host - fdb_entries = self._update_port_down( + fdb_entries = self._get_agent_fdb( context, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) @@ -132,7 +137,7 @@ class L2populationMechanismDriver(api.MechanismDriver, if context.status == const.PORT_STATUS_ACTIVE: self._update_port_up(context) elif context.status == const.PORT_STATUS_DOWN: - fdb_entries = self._update_port_down( + fdb_entries = self._get_agent_fdb( context, port, context.host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) @@ -142,7 +147,7 @@ class L2populationMechanismDriver(api.MechanismDriver, original_port = orig[0] original_host = orig[1] # this port has been migrated: remove its entries from fdb - fdb_entries = self._update_port_down( + fdb_entries = self._get_agent_fdb( context, original_port, original_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) @@ -259,7 +264,7 @@ class L2populationMechanismDriver(api.MechanismDriver, self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries) - def _update_port_down(self, context, port, agent_host): + def _get_agent_fdb(self, context, port, agent_host): port_infos = self._get_port_infos(context, port, agent_host) if not port_infos: return diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py index 4e5f65146cc..aa970af2e47 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py @@ -12,10 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. - -from neutron.plugins.common import constants as p_const - - FLAT_VLAN_ID = -1 LOCAL_VLAN_ID = -2 @@ -23,16 +19,3 @@ LOCAL_VLAN_ID = -2 VXLAN_NONE = 'not_supported' VXLAN_MCAST = 'multicast_flooding' VXLAN_UCAST = 'unicast_flooding' - - -# TODO(rkukura): Eventually remove this function, which provides -# temporary backward compatibility with pre-Havana RPC and DB vlan_id -# encoding. -def interpret_vlan_id(vlan_id): - """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" - if vlan_id == LOCAL_VLAN_ID: - return (p_const.TYPE_LOCAL, None) - elif vlan_id == FLAT_VLAN_ID: - return (p_const.TYPE_FLAT, None) - else: - return (p_const.TYPE_VLAN, vlan_id) diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 41503b00e8f..f370b5f9ca2 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -935,13 +935,7 @@ class LinuxBridgeNeutronAgentRPC(service.Service): if device_details['admin_state_up']: # create the networking for the port network_type = device_details.get('network_type') - if network_type: - segmentation_id = device_details.get('segmentation_id') - else: - # compatibility with pre-Havana RPC vlan_id encoding - vlan_id = device_details.get('vlan_id') - (network_type, - segmentation_id) = lconst.interpret_vlan_id(vlan_id) + segmentation_id = device_details.get('segmentation_id') if self.br_mgr.add_interface( device_details['network_id'], network_type, diff --git a/neutron/plugins/ml2/drivers/mech_nuage/driver.py b/neutron/plugins/ml2/drivers/mech_nuage/driver.py deleted file mode 100644 index 971c195b4d3..00000000000 --- a/neutron/plugins/ml2/drivers/mech_nuage/driver.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from oslo_config import cfg -from oslo_log import log - -from neutron.common import constants as n_consts -from neutron.extensions import portbindings -from neutron.i18n import _LE -from neutron.plugins.common import constants -from neutron.plugins.ml2 import driver_api as api -from nuage_neutron.plugins.nuage.common import config -from nuage_neutron.plugins.nuage.common import constants as nuage_const -from nuage_neutron.plugins.nuage import plugin - -LOG = log.getLogger(__name__) - - -class NuageMechanismDriver(plugin.NuagePlugin, - api.MechanismDriver): - - def initialize(self): - LOG.debug('Initializing driver') - config.nuage_register_cfg_opts() - self.nuageclient_init() - self.vif_type = portbindings.VIF_TYPE_OVS - self.vif_details = {portbindings.CAP_PORT_FILTER: False} - self.default_np_id = self.nuageclient.get_net_partition_id_by_name( - cfg.CONF.RESTPROXY.default_net_partition_name) - LOG.debug('Initializing complete') - - def create_subnet_postcommit(self, context): - subnet = context.current - net = netaddr.IPNetwork(subnet['cidr']) - params = { - 'netpart_id': self.default_np_id, - 'tenant_id': subnet['tenant_id'], - 'net': net - } - self.nuageclient.create_subnet(subnet, params) - - def delete_subnet_postcommit(self, context): - subnet = context.current - self.nuageclient.delete_subnet(subnet['id']) - - def update_port_postcommit(self, context): - port = context.current - port_prefix = nuage_const.NOVA_PORT_OWNER_PREF - # Check two things prior to proceeding with - # talking to backend. - # 1) binding has happened successfully. - # 2) Its a VM port. - if ((not context.original_top_bound_segment and - context.top_bound_segment) and - port['device_owner'].startswith(port_prefix)): - np_name = cfg.CONF.RESTPROXY.default_net_partition_name - self._create_update_port(context._plugin_context, - port, np_name) - - def delete_port_postcommit(self, context): - port = context.current - np_name = cfg.CONF.RESTPROXY.default_net_partition_name - self._delete_nuage_vport(context._plugin_context, - port, np_name) - - def bind_port(self, context): - LOG.debug("Attempting to bind port %(port)s on " - "network %(network)s", - {'port': context.current['id'], - 'network': context.network.current['id']}) - for segment in context.segments_to_bind: - if self._check_segment(segment): - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details, - status=n_consts.PORT_STATUS_ACTIVE) - LOG.debug("Bound using segment: %s", segment) - return - else: - LOG.error(_LE("Refusing to bind port for segment ID %(id)s, " - "segment %(seg)s, phys net %(physnet)s, and " - "network type %(nettype)s"), - {'id': segment[api.ID], - 'seg': segment[api.SEGMENTATION_ID], - 'physnet': segment[api.PHYSICAL_NETWORK], - 'nettype': segment[api.NETWORK_TYPE]}) - - def _check_segment(self, segment): - """Verify a segment is valid for the Nuage MechanismDriver.""" - network_type = segment[api.NETWORK_TYPE] - return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE, - constants.TYPE_VXLAN, constants.TYPE_VLAN] diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py index 8664769771f..12168883e8b 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py @@ -20,6 +20,7 @@ import re from oslo_log import log as logging import six +from neutron.common import utils from neutron.i18n import _LE, _LW from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc @@ -144,11 +145,7 @@ class EmbSwitch(object): @param pci_slot: Virtual Function address """ - vf_index = self.pci_slot_map.get(pci_slot) - if vf_index is None: - LOG.warning(_LW("Cannot find vf index for pci slot %s"), - pci_slot) - raise exc.InvalidPciSlotError(pci_slot=pci_slot) + vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.get_vf_state(vf_index) def set_device_state(self, pci_slot, state): @@ -157,12 +154,48 @@ class EmbSwitch(object): @param pci_slot: Virtual Function address @param state: link state """ + vf_index = self._get_vf_index(pci_slot) + return self.pci_dev_wrapper.set_vf_state(vf_index, state) + + def set_device_max_rate(self, pci_slot, max_kbps): + """Set device max rate. + + @param pci_slot: Virtual Function address + @param max_kbps: device max rate in kbps + """ + vf_index = self._get_vf_index(pci_slot) + #(Note): ip link set max rate in Mbps therefore + #we need to convert the max_kbps to Mbps. + #Zero means to disable the rate so the lowest rate + #available is 1Mbps. Floating numbers are not allowed + if max_kbps > 0 and max_kbps < 1000: + max_mbps = 1 + else: + max_mbps = utils.round_val(max_kbps / 1000.0) + + log_dict = { + 'max_rate': max_mbps, + 'max_kbps': max_kbps, + 'vf_index': vf_index + } + if max_kbps % 1000 != 0: + LOG.debug("Maximum rate for SR-IOV ports is counted in Mbps; " + "setting %(max_rate)s Mbps limit for port %(vf_index)s " + "instead of %(max_kbps)s kbps", + log_dict) + else: + LOG.debug("Setting %(max_rate)s Mbps limit for port %(vf_index)s", + log_dict) + + return self.pci_dev_wrapper.set_vf_max_rate(vf_index, max_mbps) + + def _get_vf_index(self, pci_slot): vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: LOG.warning(_LW("Cannot find vf index for pci slot %s"), pci_slot) raise exc.InvalidPciSlotError(pci_slot=pci_slot) - return self.pci_dev_wrapper.set_vf_state(vf_index, state) + return vf_index def set_device_spoofcheck(self, pci_slot, enabled): """Set device spoofchecking @@ -194,16 +227,13 @@ class EmbSwitch(object): class ESwitchManager(object): """Manages logical Embedded Switch entities for physical network.""" - def __init__(self, device_mappings, exclude_devices): - """Constructor. - - Create Embedded Switch logical entities for all given device mappings, - using exclude devices. - """ - self.emb_switches_map = {} - self.pci_slot_map = {} - - self._discover_devices(device_mappings, exclude_devices) + def __new__(cls): + # make it a singleton + if not hasattr(cls, '_instance'): + cls._instance = super(ESwitchManager, cls).__new__(cls) + cls.emb_switches_map = {} + cls.pci_slot_map = {} + return cls._instance def device_exists(self, device_mac, pci_slot): """Verify if device exists. @@ -250,6 +280,19 @@ class ESwitchManager(object): return embedded_switch.get_device_state(pci_slot) return False + def set_device_max_rate(self, device_mac, pci_slot, max_kbps): + """Set device max rate + + Sets the device max rate in kbps + @param device_mac: device mac + @param pci_slot: pci slot + @param max_kbps: device max rate in kbps + """ + embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) + if embedded_switch: + embedded_switch.set_device_max_rate(pci_slot, + max_kbps) + def set_device_state(self, device_mac, pci_slot, admin_state_up): """Set device state @@ -276,7 +319,7 @@ class ESwitchManager(object): embedded_switch.set_device_spoofcheck(pci_slot, enabled) - def _discover_devices(self, device_mappings, exclude_devices): + def discover_devices(self, device_mappings, exclude_devices): """Discover which Virtual functions to manage. Discover devices, and create embedded switch object for network device @@ -311,3 +354,17 @@ class ESwitchManager(object): {"device_mac": device_mac, "pci_slot": pci_slot}) embedded_switch = None return embedded_switch + + def get_pci_slot_by_mac(self, device_mac): + """Get pci slot by mac. + + Get pci slot by device mac + @param device_mac: device mac + """ + result = None + for pci_slot, embedded_switch in self.pci_slot_map.items(): + used_device_mac = embedded_switch.get_pci_device(pci_slot) + if used_device_mac == device_mac: + result = pci_slot + break + return result diff --git a/neutron/plugins/nec/extensions/__init__.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from neutron/plugins/nec/extensions/__init__.py rename to neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py new file mode 100755 index 00000000000..8c30817a1ab --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py @@ -0,0 +1,84 @@ +# Copyright 2015 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.agent.l2.extensions import qos +from neutron.i18n import _LE, _LI, _LW +from neutron.plugins.ml2.drivers.mech_sriov.agent.common import ( + exceptions as exc) +from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm +from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import ( + mech_driver) + +LOG = logging.getLogger(__name__) + + +class QosSRIOVAgentDriver(qos.QosAgentDriver): + + _SUPPORTED_RULES = ( + mech_driver.SriovNicSwitchMechanismDriver.supported_qos_rule_types) + + def __init__(self): + super(QosSRIOVAgentDriver, self).__init__() + self.eswitch_mgr = None + + def initialize(self): + self.eswitch_mgr = esm.ESwitchManager() + + def create(self, port, qos_policy): + self._handle_rules('create', port, qos_policy) + + def update(self, port, qos_policy): + self._handle_rules('update', port, qos_policy) + + def delete(self, port, qos_policy): + # TODO(QoS): consider optimizing flushing of all QoS rules from the + # port by inspecting qos_policy.rules contents + self._delete_bandwidth_limit(port) + + def _handle_rules(self, action, port, qos_policy): + for rule in qos_policy.rules: + if rule.rule_type in self._SUPPORTED_RULES: + handler_name = ("".join(("_", action, "_", rule.rule_type))) + handler = getattr(self, handler_name) + handler(port, rule) + else: + LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: ' + '%(rule_type)s; skipping'), + {'rule_id': rule.id, 'rule_type': rule.rule_type}) + + def _create_bandwidth_limit(self, port, rule): + self._update_bandwidth_limit(port, rule) + + def _update_bandwidth_limit(self, port, rule): + pci_slot = port['profile'].get('pci_slot') + device = port['device'] + self._set_vf_max_rate(device, pci_slot, rule.max_kbps) + + def _delete_bandwidth_limit(self, port): + pci_slot = port['profile'].get('pci_slot') + device = port['device'] + self._set_vf_max_rate(device, pci_slot) + + def _set_vf_max_rate(self, device, pci_slot, max_kbps=0): + if self.eswitch_mgr.device_exists(device, pci_slot): + try: + self.eswitch_mgr.set_device_max_rate( + device, pci_slot, max_kbps) + except exc.SriovNicError: + LOG.exception( + _LE("Failed to set device %s max rate"), device) + else: + LOG.info(_LI("No device with MAC %s defined on agent."), device) diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py index 3e7ec1b1449..8f984e0aac4 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py @@ -122,6 +122,21 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): raise exc.IpCommandError(dev_name=self.dev_name, reason=str(e)) + def set_vf_max_rate(self, vf_index, max_tx_rate): + """sets vf max rate. + + @param vf_index: vf index + @param max_tx_rate: vf max tx rate in Mbps + """ + try: + self._as_root([], "link", ("set", self.dev_name, "vf", + str(vf_index), "rate", + str(max_tx_rate))) + except Exception as e: + LOG.exception(_LE("Failed executing ip command")) + raise exc.IpCommandError(dev_name=self.dev_name, + reason=e) + def _get_vf_link_show(self, vf_list, link_show_out): """Get link show output for VFs diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index e1dd7247bfb..13210aa5152 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -26,6 +26,7 @@ from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall +from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.common import config as common_config @@ -34,7 +35,7 @@ from neutron.common import topics from neutron.common import utils as n_utils from neutron import context from neutron.i18n import _LE, _LI, _LW -from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa +from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm @@ -72,12 +73,13 @@ class SriovNicSwitchAgent(object): polling_interval): self.polling_interval = polling_interval + self.conf = cfg.CONF self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) configurations = {'device_mappings': physical_devices_mappings} self.agent_state = { 'binary': 'neutron-sriov-nic-agent', - 'host': cfg.CONF.host, + 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, @@ -92,6 +94,10 @@ class SriovNicSwitchAgent(object): self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc) self._setup_rpc() + self.ext_manager = self._create_agent_extension_manager( + self.connection) + # The initialization is complete; we can start receiving messages + self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0 @@ -111,7 +117,8 @@ class SriovNicSwitchAgent(object): [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, - consumers) + consumers, + start_listening=False) report_interval = cfg.CONF.AGENT.report_interval if report_interval: @@ -129,8 +136,15 @@ class SriovNicSwitchAgent(object): except Exception: LOG.exception(_LE("Failed reporting state!")) + def _create_agent_extension_manager(self, connection): + ext_manager.register_opts(self.conf) + mgr = ext_manager.AgentExtensionsManager(self.conf) + mgr.initialize(connection, 'sriov') + return mgr + def setup_eswitch_mgr(self, device_mappings, exclude_devices={}): - self.eswitch_mgr = esm.ESwitchManager(device_mappings, exclude_devices) + self.eswitch_mgr = esm.ESwitchManager() + self.eswitch_mgr.discover_devices(device_mappings, exclude_devices) def scan_devices(self, registered_devices, updated_devices): curr_devices = self.eswitch_mgr.get_assigned_devices() @@ -224,6 +238,7 @@ class SriovNicSwitchAgent(object): profile.get('pci_slot'), device_details['admin_state_up'], spoofcheck) + self.ext_manager.handle_port(self.context, device_details) else: LOG.info(_LI("Device with MAC %s not defined on plugin"), device) @@ -234,6 +249,16 @@ class SriovNicSwitchAgent(object): for device in devices: LOG.info(_LI("Removing device with mac_address %s"), device) try: + pci_slot = self.eswitch_mgr.get_pci_slot_by_mac(device) + if pci_slot: + profile = {'pci_slot': pci_slot} + port = {'device': device, 'profile': profile} + self.ext_manager.delete_port(self.context, port) + else: + LOG.warning(_LW("Failed to find pci slot for device " + "%(device)s; skipping extension port " + "cleanup"), device) + dev_details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py index 50a95e22683..3f841bcee60 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py @@ -24,9 +24,11 @@ from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ import exceptions as exc +from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) +VIF_TYPE_HW_VEB = 'hw_veb' FLAT_VLAN = 0 sriov_opts = [ @@ -61,9 +63,11 @@ class SriovNicSwitchMechanismDriver(api.MechanismDriver): """ + supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] + def __init__(self, agent_type=constants.AGENT_TYPE_NIC_SWITCH, - vif_type=portbindings.VIF_TYPE_HW_VEB, + vif_type=VIF_TYPE_HW_VEB, vif_details={portbindings.CAP_PORT_FILTER: False}, supported_vnic_types=[portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP], diff --git a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py index 9484a61e870..024a6411bc0 100644 --- a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py +++ b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py @@ -16,13 +16,14 @@ from oslo_log import log -from neutron.common import constants as n_const from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import mech_agent LOG = log.getLogger(__name__) +AGENT_TYPE_MLNX = 'Mellanox plugin agent' +VIF_TYPE_IB_HOSTDEV = 'ib_hostdev' class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): @@ -37,8 +38,8 @@ class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): def __init__(self): super(MlnxMechanismDriver, self).__init__( - agent_type=n_const.AGENT_TYPE_MLNX, - vif_type=portbindings.VIF_TYPE_IB_HOSTDEV, + agent_type=AGENT_TYPE_MLNX, + vif_type=VIF_TYPE_IB_HOSTDEV, vif_details={portbindings.CAP_PORT_FILTER: False}, supported_vnic_types=[portbindings.VNIC_DIRECT]) diff --git a/neutron/plugins/ml2/drivers/opendaylight/driver.py b/neutron/plugins/ml2/drivers/opendaylight/driver.py index 28d6931f5a1..05228502514 100644 --- a/neutron/plugins/ml2/drivers/opendaylight/driver.py +++ b/neutron/plugins/ml2/drivers/opendaylight/driver.py @@ -18,9 +18,6 @@ from networking_odl.ml2 import mech_driver from oslo_config import cfg from oslo_log import log -from neutron.common import constants as n_const -from neutron.extensions import portbindings -from neutron.plugins.common import constants from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) @@ -59,8 +56,7 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): for opt in required_opts: if not getattr(self, opt): raise cfg.RequiredOptError(opt, 'ml2_odl') - self.vif_type = portbindings.VIF_TYPE_OVS - self.vif_details = {portbindings.CAP_PORT_FILTER: True} + self.odl_drv = mech_driver.OpenDaylightDriver() # Postcommit hooks are used to trigger synchronization. @@ -93,33 +89,4 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): self.odl_drv.synchronize('delete', odl_const.ODL_PORTS, context) def bind_port(self, context): - LOG.debug("Attempting to bind port %(port)s on " - "network %(network)s", - {'port': context.current['id'], - 'network': context.network.current['id']}) - for segment in context.segments_to_bind: - if self.check_segment(segment): - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details, - status=n_const.PORT_STATUS_ACTIVE) - LOG.debug("Bound using segment: %s", segment) - return - else: - LOG.debug("Refusing to bind port for segment ID %(id)s, " - "segment %(seg)s, phys net %(physnet)s, and " - "network type %(nettype)s", - {'id': segment[api.ID], - 'seg': segment[api.SEGMENTATION_ID], - 'physnet': segment[api.PHYSICAL_NETWORK], - 'nettype': segment[api.NETWORK_TYPE]}) - - def check_segment(self, segment): - """Verify a segment is valid for the OpenDaylight MechanismDriver. - - Verify the requested segment is supported by ODL and return True or - False to indicate this to callers. - """ - network_type = segment[api.NETWORK_TYPE] - return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE, - constants.TYPE_VXLAN, constants.TYPE_VLAN] + self.odl_drv.bind_port(context) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py index 98b6210f937..56e86f76642 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py @@ -47,6 +47,10 @@ ovs_opts = [ "integration bridge to physical bridges.")), cfg.StrOpt('of_interface', default='ovs-ofctl', choices=['ovs-ofctl'], help=_("OpenFlow interface to use.")), + cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM, + choices=[constants.OVS_DATAPATH_SYSTEM, + constants.OVS_DATAPATH_NETDEV], + help=_("OVS datapath to use.")), ] agent_opts = [ @@ -97,7 +101,10 @@ agent_opts = [ cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " - "timeout won't be changed")) + "timeout won't be changed")), + cfg.BoolOpt('drop_flows_on_start', default=False, + help=_("Reset flow table on start. Setting this to True will " + "cause brief traffic interruption.")) ] diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py index 40fa8f0f07f..4643ffe279e 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py @@ -32,7 +32,9 @@ PEER_PHYSICAL_PREFIX = 'phy-' NONEXISTENT_PEER = 'nonexistent-peer' # The different types of tunnels -TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] +TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN, + p_const.TYPE_GENEVE] + # Various tables for DVR use of integration bridge flows LOCAL_SWITCHING = 0 @@ -44,6 +46,8 @@ DVR_PROCESS = 1 PATCH_LV_TO_TUN = 2 GRE_TUN_TO_LV = 3 VXLAN_TUN_TO_LV = 4 +GENEVE_TUN_TO_LV = 6 + DVR_NOT_LEARN = 9 LEARN_FROM_TUN = 10 UCAST_TO_TUN = 20 @@ -67,7 +71,9 @@ ARP_REPLY = '0x2' # Map tunnel types to tables number TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, - p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} + p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV, + p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV} + # The default respawn interval for the ovsdb monitor DEFAULT_OVSDBMON_RESPAWN = 30 @@ -88,3 +94,9 @@ ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' OVS_RESTARTED = 0 OVS_NORMAL = 1 OVS_DEAD = 2 + +EXTENSION_DRIVER_TYPE = 'ovs' + +# ovs datapath types +OVS_DATAPATH_SYSTEM = 'system' +OVS_DATAPATH_NETDEV = 'netdev' diff --git a/neutron/plugins/plumgrid/__init__.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py similarity index 100% rename from neutron/plugins/plumgrid/__init__.py rename to neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py new file mode 100644 index 00000000000..ce9f2868780 --- /dev/null +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py @@ -0,0 +1,76 @@ +# Copyright (c) 2015 Openstack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from neutron.agent.common import ovs_lib +from neutron.agent.l2.extensions import qos +from neutron.i18n import _LW +from neutron.plugins.ml2.drivers.openvswitch.mech_driver import ( + mech_openvswitch) + +LOG = logging.getLogger(__name__) + + +class QosOVSAgentDriver(qos.QosAgentDriver): + + _SUPPORTED_RULES = ( + mech_openvswitch.OpenvswitchMechanismDriver.supported_qos_rule_types) + + def __init__(self): + super(QosOVSAgentDriver, self).__init__() + self.br_int_name = cfg.CONF.OVS.integration_bridge + self.br_int = None + + def initialize(self): + self.br_int = ovs_lib.OVSBridge(self.br_int_name) + + def create(self, port, qos_policy): + self._handle_rules('create', port, qos_policy) + + def update(self, port, qos_policy): + self._handle_rules('update', port, qos_policy) + + def delete(self, port, qos_policy): + # TODO(QoS): consider optimizing flushing of all QoS rules from the + # port by inspecting qos_policy.rules contents + self._delete_bandwidth_limit(port) + + def _handle_rules(self, action, port, qos_policy): + for rule in qos_policy.rules: + if rule.rule_type in self._SUPPORTED_RULES: + handler_name = ("".join(("_", action, "_", rule.rule_type))) + handler = getattr(self, handler_name) + handler(port, rule) + else: + LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: ' + '%(rule_type)s; skipping'), + {'rule_id': rule.id, 'rule_type': rule.rule_type}) + + def _create_bandwidth_limit(self, port, rule): + self._update_bandwidth_limit(port, rule) + + def _update_bandwidth_limit(self, port, rule): + port_name = port['vif_port'].port_name + max_kbps = rule.max_kbps + max_burst_kbps = rule.max_burst_kbps + + self.br_int.create_egress_bw_limit_for_port(port_name, + max_kbps, + max_burst_kbps) + + def _delete_bandwidth_limit(self, port): + port_name = port['vif_port'].port_name + self.br_int.delete_egress_bw_limit_for_port(port_name) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py index c95a307634b..952513e7176 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py @@ -29,7 +29,6 @@ class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): """openvswitch agent br-int specific logic.""" def setup_default_table(self): - self.delete_flows() self.install_normal() self.setup_canary_table() self.install_drop(table_id=constants.ARP_SPOOF_TABLE) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py index 67c6273e71b..fb2df032ff4 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py @@ -52,78 +52,85 @@ class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, def setup_default_table(self, patch_int_ofport, arp_responder_enabled): # Table 0 (default) will sort incoming traffic depending on in_port - self.add_flow(priority=1, - in_port=patch_int_ofport, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN) - self.add_flow(priority=0, actions="drop") + with self.deferred() as deferred_br: + deferred_br.add_flow(priority=1, + in_port=patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + deferred_br.add_flow(priority=0, actions="drop") - if arp_responder_enabled: - # ARP broadcast-ed request go to the local ARP_RESPONDER table to - # be locally resolved - # REVISIT(yamamoto): arp_op=arp.ARP_REQUEST - self.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=1, - proto='arp', - dl_dst="ff:ff:ff:ff:ff:ff", - actions=("resubmit(,%s)" % - constants.ARP_RESPONDER)) + if arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER + # table to be locally resolved + # REVISIT(yamamoto): arp_op=arp.ARP_REQUEST + deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) - # PATCH_LV_TO_TUN table will handle packets coming from patch_int - # unicasts go to table UCAST_TO_TUN where remote addresses are learnt - self.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are + # learnt + deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions=("resubmit(,%s)" % + constants.UCAST_TO_TUN)) - # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding - self.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles + # flooding + deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions=("resubmit(,%s)" % + constants.FLOOD_TO_TUN)) - # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id - # for each tunnel type, and resubmit to table LEARN_FROM_TUN where - # remote mac addresses will be learnt - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - self.add_flow(table=constants.TUN_TABLE[tunnel_type], - priority=0, - actions="drop") + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + deferred_br.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, actions="drop") - # LEARN_FROM_TUN table will have a single flow using a learn action to - # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac - # addresses (assumes that lvid has already been set by a previous flow) - learned_flow = ("table=%s," - "priority=1," - "hard_timeout=300," - "NXM_OF_VLAN_TCI[0..11]," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "load:0->NXM_OF_VLAN_TCI[]," - "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," - "output:NXM_OF_IN_PORT[]" % - constants.UCAST_TO_TUN) - # Once remote mac addresses are learnt, output packet to patch_int - self.add_flow(table=constants.LEARN_FROM_TUN, - priority=1, - actions="learn(%s),output:%s" % - (learned_flow, patch_int_ofport)) + # LEARN_FROM_TUN table will have a single flow using a learn action + # to dynamically set-up flows in UCAST_TO_TUN corresponding to + # remote mac addresses (assumes that lvid has already been set by + # a previous flow) + learned_flow = ("cookie=%(cookie)s," + "table=%(table)s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + {'cookie': self.agent_uuid_stamp, + 'table': constants.UCAST_TO_TUN}) + # Once remote mac addresses are learnt, output packet to patch_int + deferred_br.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, patch_int_ofport)) - # Egress unicast will be handled in table UCAST_TO_TUN, where remote - # mac addresses will be learned. For now, just add a default flow that - # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them - # as broadcasts/multicasts - self.add_flow(table=constants.UCAST_TO_TUN, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) + # Egress unicast will be handled in table UCAST_TO_TUN, where + # remote mac addresses will be learned. For now, just add a + # default flow that will resubmit unknown unicasts to table + # FLOOD_TO_TUN to treat them as broadcasts/multicasts + deferred_br.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) - if arp_responder_enabled: - # If none of the ARP entries correspond to the requested IP, the - # broadcast-ed packet is resubmitted to the flooding table - self.add_flow(table=constants.ARP_RESPONDER, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) + if arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, + # the broadcast-ed packet is resubmitted to the flooding table + deferred_br.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, # for now, add a default drop action diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py index 578e3e2196a..e0d5154c39f 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py @@ -14,6 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +import re + +from oslo_log import log as logging + +from neutron.i18n import _LW + +LOG = logging.getLogger(__name__) + # Field name mappings (from Ryu to ovs-ofctl) _keywords = { 'eth_src': 'dl_src', @@ -26,6 +34,10 @@ _keywords = { class OpenFlowSwitchMixin(object): """Mixin to provide common convenient routines for an openflow switch.""" + agent_uuid_stamp = '0x0' + + def set_agent_uuid_stamp(self, val): + self.agent_uuid_stamp = val @staticmethod def _conv_args(kwargs): @@ -37,6 +49,9 @@ class OpenFlowSwitchMixin(object): def dump_flows(self, table_id): return self.dump_flows_for_table(table_id) + def dump_flows_all_tables(self): + return self.dump_all_flows() + def install_goto_next(self, table_id): self.install_goto(table_id=table_id, dest_table_id=table_id + 1) @@ -72,3 +87,36 @@ class OpenFlowSwitchMixin(object): **self._conv_args(kwargs)) else: super(OpenFlowSwitchMixin, self).remove_all_flows() + + def add_flow(self, **kwargs): + kwargs['cookie'] = self.agent_uuid_stamp + super(OpenFlowSwitchMixin, self).add_flow(**self._conv_args(kwargs)) + + def mod_flow(self, **kwargs): + kwargs['cookie'] = self.agent_uuid_stamp + super(OpenFlowSwitchMixin, self).mod_flow(**self._conv_args(kwargs)) + + def _filter_flows(self, flows): + LOG.debug("Agent uuid stamp used to filter flows: %s", + self.agent_uuid_stamp) + cookie_re = re.compile('cookie=(0x[A-Fa-f0-9]*)') + table_re = re.compile('table=([0-9]*)') + for flow in flows: + fl_cookie = cookie_re.search(flow) + if not fl_cookie: + continue + fl_cookie = fl_cookie.group(1) + if int(fl_cookie, 16) != self.agent_uuid_stamp: + fl_table = table_re.search(flow) + if not fl_table: + continue + fl_table = fl_table.group(1) + yield flow, fl_cookie, fl_table + + def cleanup_flows(self): + flows = self.dump_flows_all_tables() + for flow, cookie, table in self._filter_flows(flows): + # deleting a stale flow should be rare. + # it might deserve some attention + LOG.warning(_LW("Deleting flow %s"), flow) + self.delete_flows(cookie=cookie + '/-1', table=table) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py index d6e6c24ac5d..c2b823d94dd 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils @@ -25,6 +26,9 @@ from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants LOG = logging.getLogger(__name__) +cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' + 'agent.common.config') + # A class to represent a DVR-hosted subnet including vif_ports resident on # that subnet @@ -134,6 +138,7 @@ class OVSDVRNeutronAgent(object): self.dvr_mac_address = None if self.enable_distributed_routing: self.get_dvr_mac_address() + self.conf = cfg.CONF def setup_dvr_flows(self): self.setup_dvr_flows_on_integ_br() @@ -205,7 +210,8 @@ class OVSDVRNeutronAgent(object): LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"), self.dvr_mac_address) # Remove existing flows in integration bridge - self.int_br.delete_flows() + if self.conf.AGENT.drop_flows_on_start: + self.int_br.delete_flows() # Add a canary flow to int_br to track OVS restarts self.int_br.setup_canary_table() @@ -373,8 +379,8 @@ class OVSDVRNeutronAgent(object): return else: # set up LocalDVRSubnetMapping available for this subnet - subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, - subnet_uuid) + subnet_info = self.plugin_rpc.get_subnet_for_dvr( + self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: LOG.error(_LE("DVR: Unable to retrieve subnet information " "for subnet_id %s"), subnet_uuid) @@ -525,8 +531,8 @@ class OVSDVRNeutronAgent(object): if subnet_uuid not in self.local_dvr_map: # no csnat ports seen on this subnet - create csnat state # for this subnet - subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, - subnet_uuid) + subnet_info = self.plugin_rpc.get_subnet_for_dvr( + self.context, subnet_uuid, fixed_ips=fixed_ips) ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) self.local_dvr_map[subnet_uuid] = ldm else: diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index b0d0ef3d307..6c7999f9d99 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -14,11 +14,12 @@ # under the License. import hashlib -import logging as std_logging import signal import sys import time +import uuid +import functools import netaddr from oslo_config import cfg from oslo_log import log as logging @@ -30,6 +31,7 @@ from six import moves from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils +from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent.linux import ip_lib from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc @@ -57,6 +59,7 @@ cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' # A placeholder for dead vlans. DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 +UINT64_BITMASK = (1 << 64) - 1 class _mac_mydialect(netaddr.mac_unix): @@ -171,9 +174,14 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, :param conf: an instance of ConfigOpts ''' super(OVSNeutronAgent, self).__init__() - self.br_int_cls = bridge_classes['br_int'] - self.br_phys_cls = bridge_classes['br_phys'] - self.br_tun_cls = bridge_classes['br_tun'] + self.conf = conf or cfg.CONF + + # init bridge classes with configured datapath type. + self.br_int_cls, self.br_phys_cls, self.br_tun_cls = ( + functools.partial(bridge_classes[b], + datapath_type=self.conf.OVS.datapath_type) + for b in ('br_int', 'br_phys', 'br_tun')) + self.use_veth_interconnection = use_veth_interconnection self.veth_mtu = veth_mtu self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG, @@ -186,7 +194,6 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.enable_distributed_routing = enable_distributed_routing self.arp_responder_enabled = arp_responder and self.l2_pop self.prevent_arp_spoofing = prevent_arp_spoofing - self.conf = conf or cfg.CONF self.agent_state = { 'binary': 'neutron-openvswitch-agent', @@ -216,6 +223,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # Keep track of int_br's device count for use by _report_state() self.int_br_device_count = 0 + self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK + self.int_br = self.br_int_cls(integ_br) self.setup_integration_br() # Stores port update notifications for processing in main rpc loop @@ -225,10 +234,13 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # keeps association between ports and ofports to detect ofport change self.vifname_to_ofport_map = {} self.setup_rpc() + self.init_extension_manager(self.connection) self.bridge_mappings = bridge_mappings self.setup_physical_bridges(self.bridge_mappings) self.local_vlan_map = {} - self.tun_br_ofports = {p_const.TYPE_GRE: {}, + + self.tun_br_ofports = {p_const.TYPE_GENEVE: {}, + p_const.TYPE_GRE: {}, p_const.TYPE_VXLAN: {}} self.polling_interval = polling_interval @@ -243,8 +255,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.patch_tun_ofport = constants.OFPORT_INVALID if self.enable_tunneling: # The patch_int_ofport and patch_tun_ofport are updated - # here inside the call to reset_tunnel_br() - self.reset_tunnel_br(tun_br) + # here inside the call to setup_tunnel_br() + self.setup_tunnel_br(tun_br) self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( self.context, @@ -268,7 +280,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, heartbeat.start(interval=report_interval) if self.enable_tunneling: - self.setup_tunnel_br() + self.setup_tunnel_br_flows() self.dvr_agent.setup_dvr_flows() @@ -365,6 +377,13 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, consumers, start_listening=False) + def init_extension_manager(self, connection): + ext_manager.register_opts(self.conf) + self.ext_manager = ( + ext_manager.AgentExtensionsManager(self.conf)) + self.ext_manager.initialize( + connection, constants.EXTENSION_DRIVER_TYPE) + def get_net_uuid(self, vif_id): for network_id, vlan_mapping in six.iteritems(self.local_vlan_map): if vif_id in vlan_mapping.vif_ports: @@ -395,6 +414,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # longer have access to the network self.sg_agent.remove_devices_filter([port_id]) port = self.int_br.get_vif_port_by_id(port_id) + self.ext_manager.delete_port(self.context, + {"vif_port": port, + "port_id": port_id}) if port: # don't log errors since there is a chance someone will be # removing the port from the bridge at the same time @@ -563,7 +585,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, :param net_uuid: the uuid of the network associated with this vlan. :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', - 'local') + 'local', 'geneve') :param physical_network: the physical network for 'vlan' or 'flat' :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' ''' @@ -861,8 +883,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def setup_integration_br(self): '''Setup the integration bridge. - Delete patch ports and remove all existing flows. ''' + self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp) # Ensure the integration bridge is created. # ovs_lib.OVSBridge.create() will run # ovs-vsctl -- --may-exist add-br BRIDGE_NAME @@ -872,7 +894,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.int_br.setup_controllers(self.conf) self.int_br.delete_port(self.conf.OVS.int_peer_patch_port) - + if self.conf.AGENT.drop_flows_on_start: + self.int_br.delete_flows() self.int_br.setup_default_table() def setup_ancillary_bridges(self, integ_br, tun_br): @@ -901,7 +924,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, ancillary_bridges.append(br) return ancillary_bridges - def reset_tunnel_br(self, tun_br_name=None): + def setup_tunnel_br(self, tun_br_name=None): '''(re)initialize the tunnel bridge. Creates tunnel bridge, and links it to the integration bridge @@ -911,15 +934,21 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, ''' if not self.tun_br: self.tun_br = self.br_tun_cls(tun_br_name) + self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp) - self.tun_br.reset_bridge(secure_mode=True) + if not self.tun_br.bridge_exists(self.tun_br.br_name): + self.tun_br.create(secure_mode=True) self.tun_br.setup_controllers(self.conf) - self.patch_tun_ofport = self.int_br.add_patch_port( - self.conf.OVS.int_peer_patch_port, - self.conf.OVS.tun_peer_patch_port) - self.patch_int_ofport = self.tun_br.add_patch_port( - self.conf.OVS.tun_peer_patch_port, - self.conf.OVS.int_peer_patch_port) + if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or + self.patch_tun_ofport == ovs_lib.INVALID_OFPORT): + self.patch_tun_ofport = self.int_br.add_patch_port( + self.conf.OVS.int_peer_patch_port, + self.conf.OVS.tun_peer_patch_port) + if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or + self.patch_int_ofport == ovs_lib.INVALID_OFPORT): + self.patch_int_ofport = self.tun_br.add_patch_port( + self.conf.OVS.tun_peer_patch_port, + self.conf.OVS.int_peer_patch_port) if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, self.patch_int_ofport): LOG.error(_LE("Failed to create OVS patch port. Cannot have " @@ -927,9 +956,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, "version of OVS does not support tunnels or patch " "ports. Agent terminated!")) exit(1) - self.tun_br.delete_flows() + if self.conf.AGENT.drop_flows_on_start: + self.tun_br.delete_flows() - def setup_tunnel_br(self): + def setup_tunnel_br_flows(self): '''Setup the tunnel bridge. Add all flows to the tunnel bridge. @@ -997,9 +1027,15 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, bridge) phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX, bridge) - self.int_br.delete_port(int_if_name) - br.delete_port(phys_if_name) + # Interface type of port for physical and integration bridges must + # be same, so check only one of them. + int_type = self.int_br.db_get_val("Interface", int_if_name, "type") if self.use_veth_interconnection: + # Drop ports if the interface types doesn't match the + # configuration value. + if int_type == 'patch': + self.int_br.delete_port(int_if_name) + br.delete_port(phys_if_name) if ip_lib.device_exists(int_if_name): ip_lib.IPDevice(int_if_name).link.delete() # Give udev a chance to process its rules here, to avoid @@ -1011,6 +1047,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, int_ofport = self.int_br.add_port(int_veth) phys_ofport = br.add_port(phys_veth) else: + # Drop ports if the interface type doesn't match the + # configuration value + if int_type == 'veth': + self.int_br.delete_port(int_if_name) + br.delete_port(phys_if_name) # Create patch ports without associating them in order to block # untranslated traffic before association int_ofport = self.int_br.add_patch_port( @@ -1217,6 +1258,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def treat_devices_added_or_updated(self, devices, ovs_restarted): skipped_devices = [] need_binding_devices = [] + security_disabled_devices = [] devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, @@ -1245,6 +1287,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if 'port_id' in details: LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), {'device': device, 'details': details}) + details['vif_port'] = port need_binding = self.treat_vif_port(port, details['port_id'], details['network_id'], details['network_type'], @@ -1255,13 +1298,19 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, details['device_owner'], ovs_restarted) if need_binding: - details['vif_port'] = port need_binding_devices.append(details) + + port_security = details['port_security_enabled'] + has_sgs = 'security_groups' in details + if not port_security or not has_sgs: + security_disabled_devices.append(device) + + self.ext_manager.handle_port(self.context, details) else: LOG.warn(_LW("Device %s not defined on plugin"), device) if (port and port.ofport != -1): self.port_dead(port) - return skipped_devices, need_binding_devices + return skipped_devices, need_binding_devices, security_disabled_devices def treat_ancillary_devices_added(self, devices): devices_details_list = ( @@ -1344,10 +1393,12 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, devices_added_updated = (port_info.get('added', set()) | port_info.get('updated', set())) need_binding_devices = [] + security_disabled_ports = [] if devices_added_updated: start = time.time() try: - skipped_devices, need_binding_devices = ( + (skipped_devices, need_binding_devices, + security_disabled_ports) = ( self.treat_devices_added_or_updated( devices_added_updated, ovs_restarted)) LOG.debug("process_network_ports - iteration:%(iter_num)d - " @@ -1373,7 +1424,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # TODO(salv-orlando): Optimize avoiding applying filters # unnecessarily, (eg: when there are no IP address changes) - self.sg_agent.setup_port_filters(port_info.get('added', set()), + added_ports = port_info.get('added', set()) + if security_disabled_ports: + added_ports -= set(security_disabled_ports) + self.sg_agent.setup_port_filters(added_ports, port_info.get('updated', set())) self._bind_devices(need_binding_devices) @@ -1503,6 +1557,15 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 'removed': len(ancillary_port_info.get('removed', []))} return port_stats + def cleanup_stale_flows(self): + if self.iter_num == 0: + bridges = [self.int_br] + if self.enable_tunneling: + bridges.append(self.tun_br) + for bridge in bridges: + LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name) + bridge.cleanup_flows() + def rpc_loop(self, polling_manager=None): if not polling_manager: polling_manager = polling.get_polling_manager( @@ -1531,8 +1594,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.setup_integration_br() self.setup_physical_bridges(self.bridge_mappings) if self.enable_tunneling: - self.reset_tunnel_br() self.setup_tunnel_br() + self.setup_tunnel_br_flows() tunnel_sync = True if self.enable_distributed_routing: self.dvr_agent.reset_ovs_parameters(self.int_br, @@ -1601,6 +1664,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # If treat devices fails - must resync with plugin sync = self.process_network_ports(port_info, ovs_restarted) + self.cleanup_stale_flows() LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "ports processed. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, @@ -1660,7 +1724,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.conf.reload_config_files() config.setup_logging() LOG.debug('Full set of CONF:') - self.conf.log_opt_values(LOG, std_logging.DEBUG) + self.conf.log_opt_values(LOG, logging.DEBUG) self.catch_sighup = False return self.run_daemon_loop @@ -1672,9 +1736,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def _check_agent_configurations(self): if (self.enable_distributed_routing and self.enable_tunneling and not self.l2_pop): - raise ValueError(_("DVR deployments for VXLAN/GRE underlays " - "require L2-pop to be enabled, in both the " - "Agent and Server side.")) + + raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve " + "underlays require L2-pop to be enabled, " + "in both the Agent and Server side.")) def create_agent_config_map(config): @@ -1717,6 +1782,18 @@ def create_agent_config_map(config): return kwargs +def validate_local_ip(local_ip): + """If tunneling is enabled, verify if the ip exists on the agent's host.""" + if not cfg.CONF.AGENT.tunnel_types: + return + + if not ip_lib.IPWrapper().get_device_by_ip(local_ip): + LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'." + " IP couldn't be found on this host's interfaces."), + local_ip) + raise SystemExit(1) + + def prepare_xen_compute(): is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper if is_xen_compute_host: @@ -1733,6 +1810,7 @@ def main(bridge_classes): LOG.exception(_LE("Agent failed to create agent config map")) raise SystemExit(1) prepare_xen_compute() + validate_local_ip(agent_config['local_ip']) try: agent = OVSNeutronAgent(bridge_classes, **agent_config) except (RuntimeError, ValueError) as e: diff --git a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py index 13128a246ba..2ad29dd00b3 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py @@ -20,6 +20,7 @@ from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2.drivers import mech_agent +from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) @@ -34,6 +35,8 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): network. """ + supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] + def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, diff --git a/neutron/plugins/ml2/drivers/type_geneve.py b/neutron/plugins/ml2/drivers/type_geneve.py new file mode 100644 index 00000000000..d8f430aafd2 --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_geneve.py @@ -0,0 +1,103 @@ +# Copyright (c) 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log +import sqlalchemy as sa +from sqlalchemy import sql + +from neutron.common import exceptions as n_exc +from neutron.db import model_base +from neutron.i18n import _LE +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers import type_tunnel + +LOG = log.getLogger(__name__) + +geneve_opts = [ + cfg.ListOpt('vni_ranges', + default=[], + help=_("Comma-separated list of : tuples " + "enumerating ranges of Geneve VNI IDs that are " + "available for tenant network allocation")), + cfg.IntOpt('max_header_size', + default=p_const.GENEVE_ENCAP_MIN_OVERHEAD, + help=_("Geneve encapsulation header size is dynamic, this " + "value is used to calculate the maximum MTU " + "for the driver." + "this is the sum of the sizes of the outer " + "ETH + IP + UDP + GENEVE header sizes")), +] + +cfg.CONF.register_opts(geneve_opts, "ml2_type_geneve") + + +class GeneveAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_geneve_allocations' + + geneve_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False, + server_default=sql.false(), index=True) + + +class GeneveEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + + __tablename__ = 'ml2_geneve_endpoints' + __table_args__ = ( + sa.UniqueConstraint('host', + name='unique_ml2_geneve_endpoints0host'), + model_base.BASEV2.__table_args__ + ) + ip_address = sa.Column(sa.String(64), primary_key=True) + host = sa.Column(sa.String(255), nullable=True) + + def __repr__(self): + return "" % self.ip_address + + +class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): + + def __init__(self): + super(GeneveTypeDriver, self).__init__(GeneveAllocation, + GeneveEndpoints) + self.max_encap_size = cfg.CONF.ml2_type_geneve.max_header_size + + def get_type(self): + return p_const.TYPE_GENEVE + + def initialize(self): + try: + self._initialize(cfg.CONF.ml2_type_geneve.vni_ranges) + except n_exc.NetworkTunnelRangeError: + LOG.error(_LE("Failed to parse vni_ranges. " + "Service terminated!")) + raise SystemExit() + + def get_endpoints(self): + """Get every geneve endpoints from database.""" + geneve_endpoints = self._get_endpoints() + return [{'ip_address': geneve_endpoint.ip_address, + 'host': geneve_endpoint.host} + for geneve_endpoint in geneve_endpoints] + + def add_endpoint(self, ip, host): + return self._add_endpoint(ip, host) + + def get_mtu(self, physical_network=None): + mtu = super(GeneveTypeDriver, self).get_mtu() + return mtu - self.max_encap_size if mtu else 0 diff --git a/neutron/plugins/ml2/extensions/qos.py b/neutron/plugins/ml2/extensions/qos.py new file mode 100644 index 00000000000..4de7cf653a7 --- /dev/null +++ b/neutron/plugins/ml2/extensions/qos.py @@ -0,0 +1,50 @@ +# Copyright (c) 2015 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.core_extensions import base as base_core +from neutron.core_extensions import qos as qos_core +from neutron.plugins.ml2 import driver_api as api + +LOG = logging.getLogger(__name__) + + +class QosExtensionDriver(api.ExtensionDriver): + + def initialize(self): + self.core_ext_handler = qos_core.QosCoreResourceExtension() + LOG.debug("QosExtensionDriver initialization complete") + + def process_create_network(self, context, data, result): + self.core_ext_handler.process_fields( + context, base_core.NETWORK, data, result) + + process_update_network = process_create_network + + def process_create_port(self, context, data, result): + self.core_ext_handler.process_fields( + context, base_core.PORT, data, result) + + process_update_port = process_create_port + + def extend_network_dict(self, session, db_data, result): + result.update( + self.core_ext_handler.extract_fields( + base_core.NETWORK, db_data)) + + def extend_port_dict(self, session, db_data, result): + result.update( + self.core_ext_handler.extract_fields(base_core.PORT, db_data)) diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 4f678b2265a..1861935496b 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -26,11 +26,12 @@ from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings from neutron.extensions import providernet as provider from neutron.extensions import vlantransparent -from neutron.i18n import _LE, _LI +from neutron.i18n import _LE, _LI, _LW from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import models +from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) @@ -313,6 +314,40 @@ class MechanismManager(stevedore.named.NamedExtensionManager): LOG.info(_LI("Registered mechanism drivers: %s"), [driver.name for driver in self.ordered_mech_drivers]) + @property + def supported_qos_rule_types(self): + if not self.ordered_mech_drivers: + return [] + + rule_types = set(qos_consts.VALID_RULE_TYPES) + + # Recalculate on every call to allow drivers determine supported rule + # types dynamically + for driver in self.ordered_mech_drivers: + if hasattr(driver.obj, 'supported_qos_rule_types'): + new_rule_types = \ + rule_types & set(driver.obj.supported_qos_rule_types) + dropped_rule_types = new_rule_types - rule_types + if dropped_rule_types: + LOG.info( + _LI("%(rule_types)s rule types disabled for ml2 " + "because %(driver)s does not support them"), + {'rule_types': ', '.join(dropped_rule_types), + 'driver': driver.name}) + rule_types = new_rule_types + else: + # at least one of drivers does not support QoS, meaning there + # are no rule types supported by all of them + LOG.warn( + _LW("%s does not support QoS; no rule types available"), + driver.name) + return [] + + rule_types = list(rule_types) + LOG.debug("Supported QoS rule types " + "(common subset for all mech drivers): %s", rule_types) + return rule_types + def initialize(self): for driver in self.ordered_mech_drivers: LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name) @@ -698,7 +733,6 @@ class MechanismManager(stevedore.named.NamedExtensionManager): LOG.exception(_LE("Mechanism driver %s failed in " "bind_port"), driver.name) - binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': binding.host}) @@ -754,9 +788,10 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): exts = [] for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias - exts.append(alias) - LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), - {'alias': alias, 'drv': driver.name}) + if alias: + exts.append(alias) + LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), + {'alias': alias, 'drv': driver.name}) return exts def _call_on_ext_drivers(self, method_name, plugin_context, data, result): @@ -810,9 +845,6 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): {'name': driver.name, 'method': method_name}) raise ml2_exc.ExtensionDriverError(driver=driver.name) - LOG.debug("%(method)s succeeded for driver %(driver)s", - {'method': method_name, 'driver': driver.name}) - def extend_network_dict(self, session, base_model, result): """Notify all extension drivers to extend network dictionary.""" self._call_on_dict_driver("extend_network_dict", session, base_model, diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 79741afba7f..4cdf98a40e7 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -31,6 +31,7 @@ from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import metadata_rpc +from neutron.api.rpc.handlers import resources_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.api.v2 import attributes from neutron.callbacks import events @@ -76,6 +77,7 @@ from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc from neutron.quota import resource_registry +from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) @@ -116,7 +118,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, "multi-provider", "allowed-address-pairs", "extra_dhcp_opt", "subnet_allocation", "net-mtu", "vlan-transparent", - "address-scope"] + "address-scope", "dns-integration"] @property def supported_extension_aliases(self): @@ -161,7 +163,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_rpc.DVRServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), - metadata_rpc.MetadataRpcCallback() + metadata_rpc.MetadataRpcCallback(), + resources_rpc.ResourcesPullRpcCallback() ] def _setup_dhcp(self): @@ -171,6 +174,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, ) self.start_periodic_dhcp_agent_status_check() + @property + def supported_qos_rule_types(self): + return self.mechanism_manager.supported_qos_rule_types + @log_helpers.log_method_call def start_rpc_listeners(self): """Start the RPC loop to let the plugin communicate with agents.""" @@ -259,9 +266,29 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False): - plugin_context = context._plugin_context - port_id = context.current['id'] + # Binding limit does not need to be tunable because no + # more than a couple of attempts should ever be required in + # normal operation. + for count in range(1, MAX_BIND_TRIES + 1): + if count > 1: + # multiple attempts shouldn't happen very often so we log each + # attempt after the 1st. + greenthread.sleep(0) # yield + LOG.info(_LI("Attempt %(count)s to bind port %(port)s"), + {'count': count, 'port': context.current['id']}) + context, need_notify, try_again = self._attempt_binding( + context, need_notify) + if not try_again: + if allow_notify and need_notify: + self._notify_port_updated(context) + return context + LOG.error(_LE("Failed to commit binding results for %(port)s " + "after %(max)s tries"), + {'port': context.current['id'], 'max': MAX_BIND_TRIES}) + return context + + def _attempt_binding(self, context, need_notify): # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is # possible (but unlikely) that the port's state could change @@ -270,57 +297,41 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # thread commits its results, the already committed results are # used. If attributes such as binding:host_id, # binding:profile, or binding:vnic_type are updated - # concurrently, this loop retries binding using the new - # values. - count = 0 - while True: - # First, determine whether it is necessary and possible to - # bind the port. - binding = context._binding - if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND + # concurrently, the try_again flag is returned to indicate that + # the commit was unsuccessful. + plugin_context = context._plugin_context + port_id = context.current['id'] + binding = context._binding + try_again = False + # First, determine whether it is necessary and possible to + # bind the port. + if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND or not binding.host): - # We either don't need to bind the port, or can't, so - # notify if needed and return. - if allow_notify and need_notify: - self._notify_port_updated(context) - return context + # We either don't need to bind the port or can't + return context, need_notify, try_again - # Limit binding attempts to avoid any possibility of - # infinite looping and to ensure an error is logged - # instead. This does not need to be tunable because no - # more than a couple attempts should ever be required in - # normal operation. Log at info level if not 1st attempt. - count += 1 - if count > MAX_BIND_TRIES: - LOG.error(_LE("Failed to commit binding results for %(port)s " - "after %(max)s tries"), - {'port': port_id, 'max': MAX_BIND_TRIES}) - return context - if count > 1: - greenthread.sleep(0) # yield - LOG.info(_LI("Attempt %(count)s to bind port %(port)s"), - {'count': count, 'port': port_id}) - - # The port isn't already bound and the necessary - # information is available, so attempt to bind the port. - bind_context = self._bind_port(context) - - # Now try to commit result of attempting to bind the port. - new_context, did_commit = self._commit_port_binding( - plugin_context, port_id, binding, bind_context) - if not new_context: - # The port has been deleted concurrently, so just - # return the unbound result from the initial - # transaction that completed before the deletion. - LOG.debug("Port %s has been deleted concurrently", - port_id) - return context - # Need to notify if we succeed and our results were - # committed. - if did_commit and (new_context._binding.vif_type != - portbindings.VIF_TYPE_BINDING_FAILED): - need_notify = True - context = new_context + # The port isn't already bound and the necessary + # information is available, so attempt to bind the port. + bind_context = self._bind_port(context) + # Now try to commit result of attempting to bind the port. + new_context, did_commit = self._commit_port_binding( + plugin_context, port_id, binding, bind_context) + if not new_context: + # The port has been deleted concurrently, so just + # return the unbound result from the initial + # transaction that completed before the deletion. + LOG.debug("Port %s has been deleted concurrently", + port_id) + need_notify = False + return context, need_notify, try_again + # Need to notify if we succeed and our results were + # committed. + if did_commit and (new_context._binding.vif_type != + portbindings.VIF_TYPE_BINDING_FAILED): + need_notify = True + return new_context, need_notify, try_again + try_again = True + return new_context, need_notify, try_again def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous @@ -360,7 +371,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, port_id) if not port_db: # The port has been deleted concurrently. - return (None, None) + return (None, False) oport = self._make_port_dict(port_db) port = self._make_port_dict(port_db) network = new_context.network.current @@ -552,6 +563,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, {'res': resource, 'id': obj['result']['id']}) + @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, + retry_on_request=True) def _create_bulk_ml2(self, resource, context, request_items): objects = [] collection = "%ss" % resource @@ -993,6 +1006,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, return result, mech_context + @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, + retry_on_request=True) def create_port(self, context, port): attrs = port[attributes.PORT] result, mech_context = self._create_port_db(context, port) @@ -1119,6 +1134,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, original_port[psec.PORTSECURITY] != updated_port[psec.PORTSECURITY]): need_port_update_notify = True + # TODO(QoS): Move out to the extension framework somehow. + # Follow https://review.openstack.org/#/c/169223 for a solution. + if (qos_consts.QOS_POLICY_ID in attrs and + original_port[qos_consts.QOS_POLICY_ID] != + updated_port[qos_consts.QOS_POLICY_ID]): + need_port_update_notify = True if addr_pair.ADDRESS_PAIRS in attrs: need_port_update_notify |= ( @@ -1402,10 +1423,11 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, return self._bind_port_if_needed(port_context) - @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, - retry_on_deadlock=True, - retry_on_request=True) - @db_api.convert_db_exception_to_retry(stale_data=True) + @oslo_db_api.wrap_db_retry( + max_retries=db_api.MAX_RETRIES, + retry_on_deadlock=True, retry_on_request=True, + exception_checker=lambda e: isinstance(e, sa_exc.StaleDataError) + ) def update_port_status(self, context, port_id, status, host=None, network=None): """ diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index 4f5c10848c8..8ae7997a218 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -32,6 +32,7 @@ from neutron.i18n import _LE, _LW from neutron import manager from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_tunnel +from neutron.services.qos import qos_consts # REVISIT(kmestery): Allow the type and mechanism drivers to supply the # mixins and eventually remove the direct dependencies on type_tunnel. @@ -108,6 +109,9 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): host, port_context.network.current) + qos_policy_id = (port.get(qos_consts.QOS_POLICY_ID) or + port_context.network._network.get( + qos_consts.QOS_POLICY_ID)) entry = {'device': device, 'network_id': port['network_id'], 'port_id': port['id'], @@ -120,7 +124,10 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): 'device_owner': port['device_owner'], 'allowed_address_pairs': port['allowed_address_pairs'], 'port_security_enabled': port.get(psec.PORTSECURITY, True), + 'qos_policy_id': qos_policy_id, 'profile': port[portbindings.PROFILE]} + if 'security_groups' in port: + entry['security_groups'] = port['security_groups'] LOG.debug("Returning: %s", entry) return entry diff --git a/neutron/plugins/nec/README b/neutron/plugins/nec/README deleted file mode 100644 index 337c2a03cc9..00000000000 --- a/neutron/plugins/nec/README +++ /dev/null @@ -1,11 +0,0 @@ -Neutron NEC OpenFlow Plugin -=========================== - -Neutron plugins for NEC OpenFlow networking products and -Trema Sliceable Switch (reference implementation). - -* Main Page: https://wiki.openstack.org/wiki/Neutron/NEC_OpenFlow_Plugin - -* Repository: - * http://git.openstack.org/cgit/stackforge/networking-nec/ - * https://github.com/stackforge/networking-nec diff --git a/neutron/plugins/nec/config.py b/neutron/plugins/nec/config.py deleted file mode 100644 index be1021a1faf..00000000000 --- a/neutron/plugins/nec/config.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from neutron.agent.common import config - - -ovs_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("Integration bridge to use.")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), -] - -ofc_opts = [ - cfg.StrOpt('host', default='127.0.0.1', - help=_("Host to connect to.")), - cfg.StrOpt('path_prefix', default='', - help=_("Base URL of OFC REST API. " - "It is prepended to each API request.")), - cfg.StrOpt('port', default='8888', - help=_("Port to connect to.")), - cfg.StrOpt('driver', default='trema', - help=_("Driver to use.")), - cfg.BoolOpt('enable_packet_filter', default=True, - help=_("Enable packet filter.")), - cfg.BoolOpt('support_packet_filter_on_ofc_router', default=True, - help=_("Support packet filter on OFC router interface.")), - cfg.BoolOpt('use_ssl', default=False, - help=_("Use SSL to connect.")), - cfg.StrOpt('key_file', - help=_("Location of key file.")), - cfg.StrOpt('cert_file', - help=_("Location of certificate file.")), - cfg.BoolOpt('insecure_ssl', default=False, - help=_("Disable SSL certificate verification.")), - cfg.IntOpt('api_max_attempts', default=3, - help=_("Maximum attempts per OFC API request. " - "NEC plugin retries API request to OFC " - "when OFC returns ServiceUnavailable (503). " - "The value must be greater than 0.")), -] - -provider_opts = [ - cfg.StrOpt('default_router_provider', - default='l3-agent', - help=_('Default router provider to use.')), - cfg.ListOpt('router_providers', - default=['l3-agent', 'openflow'], - help=_('List of enabled router providers.')) -] - - -def register_plugin_opts(): - cfg.CONF.register_opts(ofc_opts, "OFC") - cfg.CONF.register_opts(provider_opts, "PROVIDER") - - -def register_agent_opts(): - cfg.CONF.register_opts(agent_opts, "AGENT") - cfg.CONF.register_opts(ovs_opts, "OVS") - config.register_agent_state_opts_helper(cfg.CONF) diff --git a/neutron/plugins/nec/db/models.py b/neutron/plugins/nec/db/models.py deleted file mode 100644 index cce043eeeb8..00000000000 --- a/neutron/plugins/nec/db/models.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.db import l3_db -from neutron.db import model_base -from neutron.db import models_v2 - - -# New mapping tables. - - -class OFCId(object): - """Resource ID on OpenFlow Controller.""" - ofc_id = sa.Column(sa.String(255), unique=True, nullable=False) - - -class NeutronId(object): - """Logical ID on Neutron.""" - neutron_id = sa.Column(sa.String(36), primary_key=True) - - -class OFCTenantMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Tenant on OpenFlow Network/Controller.""" - - -class OFCNetworkMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Network on OpenFlow Network/Controller.""" - - -class OFCPortMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Port on OpenFlow Network/Controller.""" - - -class OFCRouterMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a router on OpenFlow Network/Controller.""" - - -class OFCFilterMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Filter on OpenFlow Network/Controller.""" - - -class PortInfo(model_base.BASEV2): - """Represents a Virtual Interface.""" - id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - datapath_id = sa.Column(sa.String(36), nullable=False) - port_no = sa.Column(sa.Integer, nullable=False) - vlan_id = sa.Column(sa.Integer, nullable=False) - mac = sa.Column(sa.String(32), nullable=False) - port = orm.relationship( - models_v2.Port, - backref=orm.backref("portinfo", - lazy='joined', uselist=False, - cascade='delete')) - - -class RouterProvider(models_v2.model_base.BASEV2): - """Represents a binding of router_id to provider.""" - provider = sa.Column(sa.String(255)) - router_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - - router = orm.relationship(l3_db.Router, uselist=False, - backref=orm.backref('provider', uselist=False, - lazy='joined', - cascade='delete')) - - -class PacketFilter(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): - """Represents a packet filter.""" - name = sa.Column(sa.String(255)) - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - nullable=False) - priority = sa.Column(sa.Integer, nullable=False) - action = sa.Column(sa.String(16), nullable=False) - # condition - in_port = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - nullable=True) - src_mac = sa.Column(sa.String(32), nullable=False) - dst_mac = sa.Column(sa.String(32), nullable=False) - eth_type = sa.Column(sa.Integer, nullable=False) - src_cidr = sa.Column(sa.String(64), nullable=False) - dst_cidr = sa.Column(sa.String(64), nullable=False) - protocol = sa.Column(sa.String(16), nullable=False) - src_port = sa.Column(sa.Integer, nullable=False) - dst_port = sa.Column(sa.Integer, nullable=False) - # status - admin_state_up = sa.Column(sa.Boolean(), nullable=False) - status = sa.Column(sa.String(16), nullable=False) - - network = orm.relationship( - models_v2.Network, - backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), - uselist=False) - in_port_ref = orm.relationship( - models_v2.Port, - backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), - primaryjoin="Port.id==PacketFilter.in_port", - uselist=False) diff --git a/neutron/plugins/nec/extensions/packetfilter.py b/neutron/plugins/nec/extensions/packetfilter.py deleted file mode 100644 index 3d89cf4e25a..00000000000 --- a/neutron/plugins/nec/extensions/packetfilter.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2012-2013 NEC Corporation. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import base -from neutron.common import constants -from neutron.common import exceptions -from neutron import manager -from neutron.quota import resource as quota_resource -from neutron.quota import resource_registry - - -quota_packet_filter_opts = [ - cfg.IntOpt('quota_packet_filter', - default=100, - help=_("Number of packet_filters allowed per tenant, " - "-1 for unlimited")) -] -cfg.CONF.register_opts(quota_packet_filter_opts, 'QUOTAS') - - -class PacketFilterNotFound(exceptions.NotFound): - message = _("PacketFilter %(id)s could not be found") - - -class PacketFilterIpVersionNonSupported(exceptions.BadRequest): - message = _("IP version %(version)s is not supported for %(field)s " - "(%(value)s is specified)") - - -class PacketFilterInvalidPriority(exceptions.BadRequest): - message = _("Packet Filter priority should be %(min)s-%(max)s (included)") - - -class PacketFilterUpdateNotSupported(exceptions.BadRequest): - message = _("%(field)s field cannot be updated") - - -class PacketFilterDuplicatedPriority(exceptions.BadRequest): - message = _("The backend does not support duplicated priority. " - "Priority %(priority)s is in use") - - -class PacketFilterEtherTypeProtocolMismatch(exceptions.Conflict): - message = _("Ether Type '%(eth_type)s' conflicts with protocol " - "'%(protocol)s'. Update or clear protocol before " - "changing ether type.") - - -def convert_to_int_dec_and_hex(data): - try: - return int(data, 0) - except (ValueError, TypeError): - pass - try: - return int(data) - except (ValueError, TypeError): - msg = _("'%s' is not a integer") % data - raise exceptions.InvalidInput(error_message=msg) - - -def convert_to_int_or_none(data): - if data is None: - return - return convert_to_int_dec_and_hex(data) - - -PROTO_NAME_ARP = 'arp' -SUPPORTED_PROTOCOLS = [constants.PROTO_NAME_ICMP, - constants.PROTO_NAME_TCP, - constants.PROTO_NAME_UDP, - PROTO_NAME_ARP] -ALLOW_ACTIONS = ['allow', 'accept'] -DROP_ACTIONS = ['drop', 'deny'] -SUPPORTED_ACTIONS = ALLOW_ACTIONS + DROP_ACTIONS - -ALIAS = 'packet-filter' -RESOURCE = 'packet_filter' -COLLECTION = 'packet_filters' -PACKET_FILTER_ACTION_REGEX = '(?i)^(%s)$' % '|'.join(SUPPORTED_ACTIONS) -PACKET_FILTER_PROTOCOL_REGEX = ('(?i)^(%s|0x[0-9a-fA-F]+|[0-9]+|)$' % - '|'.join(SUPPORTED_PROTOCOLS)) -PACKET_FILTER_ATTR_PARAMS = { - 'id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, 'default': '', - 'validate': {'type:string': attributes.NAME_MAX_LEN}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': attributes.TENANT_ID_MAX_LEN}, - 'required_by_policy': True, - 'is_visible': True}, - 'network_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'admin_state_up': {'allow_post': True, 'allow_put': True, - 'default': True, - 'convert_to': attributes.convert_to_boolean, - 'is_visible': True}, - 'status': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'action': {'allow_post': True, 'allow_put': True, - 'validate': {'type:regex': PACKET_FILTER_ACTION_REGEX}, - 'is_visible': True}, - 'priority': {'allow_post': True, 'allow_put': True, - 'convert_to': convert_to_int_dec_and_hex, - 'is_visible': True}, - 'in_port': {'allow_post': True, 'allow_put': False, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'src_mac': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:mac_address_or_none': None}, - 'is_visible': True}, - 'dst_mac': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:mac_address_or_none': None}, - 'is_visible': True}, - 'eth_type': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, - 'src_cidr': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:subnet_or_none': None}, - 'is_visible': True}, - 'dst_cidr': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:subnet_or_none': None}, - 'is_visible': True}, - 'protocol': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:regex_or_none': - PACKET_FILTER_PROTOCOL_REGEX}, - 'is_visible': True}, - 'src_port': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, - 'dst_port': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, -} -PACKET_FILTER_ATTR_MAP = {COLLECTION: PACKET_FILTER_ATTR_PARAMS} - - -class Packetfilter(extensions.ExtensionDescriptor): - @classmethod - def get_name(cls): - return ALIAS - - @classmethod - def get_alias(cls): - return ALIAS - - @classmethod - def get_description(cls): - return "PacketFilters on OFC" - - @classmethod - def get_updated(cls): - return "2013-07-16T00:00:00+09:00" - - @classmethod - def get_resources(cls): - qresource = quota_resource.CountableResource( - RESOURCE, quota_resource._count_resource, 'quota_%s' % RESOURCE) - - resource_registry.register_resource(qresource) - - resource = base.create_resource(COLLECTION, RESOURCE, - manager.NeutronManager.get_plugin(), - PACKET_FILTER_ATTR_PARAMS) - pf_ext = extensions.ResourceExtension( - COLLECTION, resource, attr_map=PACKET_FILTER_ATTR_PARAMS) - return [pf_ext] - - def get_extended_resources(self, version): - if version == "2.0": - return PACKET_FILTER_ATTR_MAP - else: - return {} diff --git a/neutron/plugins/nec/extensions/router_provider.py b/neutron/plugins/nec/extensions/router_provider.py deleted file mode 100644 index 7c7f4afb416..00000000000 --- a/neutron/plugins/nec/extensions/router_provider.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from neutron.api import extensions -from neutron.api.v2 import attributes - - -LOG = logging.getLogger(__name__) - -ROUTER_PROVIDER = 'provider' - -ROUTER_PROVIDER_ATTRIBUTE = { - 'routers': {ROUTER_PROVIDER: - {'allow_post': True, - 'allow_put': False, - 'is_visible': True, - 'default': attributes.ATTR_NOT_SPECIFIED} - } -} - - -class Router_provider(extensions.ExtensionDescriptor): - @classmethod - def get_name(cls): - return "Router Provider" - - @classmethod - def get_alias(cls): - return "router_provider" - - @classmethod - def get_description(cls): - return "Router Provider Support" - - @classmethod - def get_updated(cls): - return "2013-08-20T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return ROUTER_PROVIDER_ATTRIBUTE - else: - return {} diff --git a/neutron/plugins/nec/nec_plugin.py b/neutron/plugins/nec/nec_plugin.py deleted file mode 100644 index c434c711d9b..00000000000 --- a/neutron/plugins/nec/nec_plugin.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2012-2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_nec.plugins.openflow import plugin - -from neutron.plugins.nec import config as nec_config - - -class NECPluginV2(plugin.NECPluginV2Impl): - - _supported_extension_aliases = ["agent", - "allowed-address-pairs", - "binding", - "dhcp_agent_scheduler", - "external-net", - "ext-gw-mode", - "extraroute", - "l3_agent_scheduler", - "packet-filter", - "quotas", - "router", - "router_provider", - "security-group", - ] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - self.setup_extension_aliases(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - nec_config.register_plugin_opts() - super(NECPluginV2, self).__init__() diff --git a/neutron/plugins/nec/requirements.txt b/neutron/plugins/nec/requirements.txt deleted file mode 100644 index fafd1a9c4f0..00000000000 --- a/neutron/plugins/nec/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -networking-nec>=2015.1,<2015.2 diff --git a/neutron/plugins/opencontrail/contrail_plugin.py b/neutron/plugins/opencontrail/contrail_plugin.py index caf97a233ba..b83637d0ae8 100644 --- a/neutron/plugins/opencontrail/contrail_plugin.py +++ b/neutron/plugins/opencontrail/contrail_plugin.py @@ -40,6 +40,7 @@ opencontrail_opts = [ cfg.CONF.register_opts(opencontrail_opts, 'CONTRAIL') +VIF_TYPE_VROUTER = 'vrouter' CONTRAIL_EXCEPTION_MAP = { requests.codes.not_found: c_exc.ContrailNotFoundError, requests.codes.conflict: c_exc.ContrailConflictError, @@ -72,7 +73,7 @@ class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2, """return VIF type and details.""" binding = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER, + portbindings.VIF_TYPE: VIF_TYPE_VROUTER, portbindings.VIF_DETAILS: { # TODO(praneetb): Replace with new VIF security details portbindings.CAP_PORT_FILTER: diff --git a/neutron/plugins/plumgrid/README b/neutron/plugins/plumgrid/README deleted file mode 100644 index 5fc4050e4cb..00000000000 --- a/neutron/plugins/plumgrid/README +++ /dev/null @@ -1,14 +0,0 @@ -PLUMgrid Neutron Plugin -======================== - -PLUMgrid Neutron Plugin for PLUMgrid Open Networking Suite - -* Full plugin code is available at: - * https://github.com/stackforge/networking-plumgrid - -* PyPI package location: - * https://pypi.python.org/pypi/networking-plumgrid - -* For config, install and other details, please refer to - wiki page: - * http://wiki.openstack.org/PLUMgrid-Neutron diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py b/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py deleted file mode 100644 index e69d6d65348..00000000000 --- a/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from networking_plumgrid.neutron.plugins import plugin - - -class NeutronPluginPLUMgridV2(plugin.NeutronPluginPLUMgridV2): - - supported_extension_aliases = ["binding", "external-net", "extraroute", - "provider", "quotas", "router", - "security-group"] - - def __init__(self): - super(NeutronPluginPLUMgridV2, self).__init__() diff --git a/neutron/plugins/plumgrid/requirements.txt b/neutron/plugins/plumgrid/requirements.txt deleted file mode 100644 index 9d9d8a09cff..00000000000 --- a/neutron/plugins/plumgrid/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -networking-plumgrid diff --git a/neutron/plugins/vmware/README b/neutron/plugins/vmware/README deleted file mode 100644 index fac935450a2..00000000000 --- a/neutron/plugins/vmware/README +++ /dev/null @@ -1,14 +0,0 @@ -VMware Neutron plugins -=========================== - -Neutron plugins for VMware NSX family products - -* For configuration information, supported extensions, and architectural - documentation please refer to the plugin wiki page: - https://wiki.openstack.org/wiki/Neutron/VMware_NSX_plugins - -* Full plugin code available at: - * http://git.openstack.org/cgit/openstack/vmware-nsx - * https://github.com/openstack/vmware-nsx - -* Pypi location: https://pypi.python.org/pypi/vmware-nsx diff --git a/neutron/plugins/vmware/__init__.py b/neutron/plugins/vmware/__init__.py index a6281888841..e69de29bb2d 100644 --- a/neutron/plugins/vmware/__init__.py +++ b/neutron/plugins/vmware/__init__.py @@ -1,3 +0,0 @@ -import os - -NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') diff --git a/neutron/plugins/vmware/common/nsxv_constants.py b/neutron/plugins/vmware/common/nsxv_constants.py deleted file mode 100644 index 3d1ae85f343..00000000000 --- a/neutron/plugins/vmware/common/nsxv_constants.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2015 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Edge size -COMPACT = 'compact' -LARGE = 'large' -XLARGE = 'xlarge' -QUADLARGE = 'quadlarge' - - -# Edge type -SERVICE_EDGE = 'service' -VDR_EDGE = 'vdr' - -# Internal element purpose -INTER_EDGE_PURPOSE = 'inter_edge_net' diff --git a/neutron/plugins/vmware/dbexts/nsx_models.py b/neutron/plugins/vmware/dbexts/nsx_models.py deleted file mode 100644 index 7ca671323fb..00000000000 --- a/neutron/plugins/vmware/dbexts/nsx_models.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2015 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -NSX data models. - -This module defines data models used by the VMware NSX plugin family. - -""" - -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy import sql - -from neutron.db import model_base -from neutron.db import models_v2 - - -class TzNetworkBinding(model_base.BASEV2): - """Represents a binding of a virtual network with a transport zone. - - This model class associates a Neutron network with a transport zone; - optionally a vlan ID might be used if the binding type is 'bridge' - """ - __tablename__ = 'tz_network_bindings' - - # TODO(arosen) - it might be worth while refactoring the how this data - # is stored later so every column does not need to be a primary key. - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - # 'flat', 'vlan', stt' or 'gre' - binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', - name='tz_network_bindings_binding_type'), - nullable=False, primary_key=True) - phy_uuid = sa.Column(sa.String(36), primary_key=True, default='') - vlan_id = sa.Column(sa.Integer, primary_key=True, - autoincrement=False, default=0) - - def __init__(self, network_id, binding_type, phy_uuid, vlan_id): - self.network_id = network_id - self.binding_type = binding_type - self.phy_uuid = phy_uuid - self.vlan_id = vlan_id - - def __repr__(self): - return "" % (self.network_id, - self.binding_type, - self.phy_uuid, - self.vlan_id) - - -class NeutronNsxNetworkMapping(model_base.BASEV2): - """Maps neutron network identifiers to NSX identifiers. - - Because of chained logical switches more than one mapping might exist - for a single Neutron network. - """ - __tablename__ = 'neutron_nsx_network_mappings' - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete='CASCADE'), - primary_key=True) - nsx_id = sa.Column(sa.String(36), primary_key=True) - - -class NeutronNsxSecurityGroupMapping(model_base.BASEV2): - """Backend mappings for Neutron Security Group identifiers. - - This class maps a neutron security group identifier to the corresponding - NSX security profile identifier. - """ - - __tablename__ = 'neutron_nsx_security_group_mappings' - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('securitygroups.id', - ondelete="CASCADE"), - primary_key=True) - nsx_id = sa.Column(sa.String(36), primary_key=True) - - -class NeutronNsxPortMapping(model_base.BASEV2): - """Represents the mapping between neutron and nsx port uuids.""" - - __tablename__ = 'neutron_nsx_port_mappings' - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - nsx_switch_id = sa.Column(sa.String(36)) - nsx_port_id = sa.Column(sa.String(36), nullable=False) - - def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): - self.neutron_id = neutron_id - self.nsx_switch_id = nsx_switch_id - self.nsx_port_id = nsx_port_id - - -class NeutronNsxRouterMapping(model_base.BASEV2): - """Maps neutron router identifiers to NSX identifiers.""" - __tablename__ = 'neutron_nsx_router_mappings' - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', ondelete='CASCADE'), - primary_key=True) - nsx_id = sa.Column(sa.String(36)) - - -class MultiProviderNetworks(model_base.BASEV2): - """Networks provisioned through multiprovider extension.""" - - __tablename__ = 'multi_provider_networks' - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - - def __init__(self, network_id): - self.network_id = network_id - - -class NetworkConnection(model_base.BASEV2, models_v2.HasTenant): - """Defines a connection between a network gateway and a network.""" - # We use port_id as the primary key as one can connect a gateway - # to a network in multiple ways (and we cannot use the same port form - # more than a single gateway) - network_gateway_id = sa.Column(sa.String(36), - sa.ForeignKey('networkgateways.id', - ondelete='CASCADE')) - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete='CASCADE')) - segmentation_type = sa.Column( - sa.Enum('flat', 'vlan', - name='networkconnections_segmentation_type')) - segmentation_id = sa.Column(sa.Integer) - __table_args__ = (sa.UniqueConstraint(network_gateway_id, - segmentation_type, - segmentation_id), - model_base.BASEV2.__table_args__) - # Also, storing port id comes back useful when disconnecting a network - # from a gateway - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete='CASCADE'), - primary_key=True) - - -class NetworkGatewayDeviceReference(model_base.BASEV2): - id = sa.Column(sa.String(36), primary_key=True) - network_gateway_id = sa.Column(sa.String(36), - sa.ForeignKey('networkgateways.id', - ondelete='CASCADE'), - primary_key=True) - interface_name = sa.Column(sa.String(64), primary_key=True) - - -class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - nsx_id = sa.Column(sa.String(36)) - # Optional name for the gateway device - name = sa.Column(sa.String(255)) - # Transport connector type. Not using enum as range of - # connector types might vary with backend version - connector_type = sa.Column(sa.String(10)) - # Transport connector IP Address - connector_ip = sa.Column(sa.String(64)) - # operational status - status = sa.Column(sa.String(16)) - - -class NetworkGateway(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Defines the data model for a network gateway.""" - name = sa.Column(sa.String(255)) - # Tenant id is nullable for this resource - tenant_id = sa.Column(sa.String(36)) - default = sa.Column(sa.Boolean()) - devices = orm.relationship(NetworkGatewayDeviceReference, - backref='networkgateways', - cascade='all,delete') - network_connections = orm.relationship(NetworkConnection, lazy='joined') - - -class MacLearningState(model_base.BASEV2): - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False) - - # Add a relationship to the Port model using the backref attribute. - # This will instruct SQLAlchemy to eagerly load this association. - port = orm.relationship( - models_v2.Port, - backref=orm.backref("mac_learning_state", lazy='joined', - uselist=False, cascade='delete')) - - -class LsnPort(models_v2.model_base.BASEV2): - - __tablename__ = 'lsn_port' - - lsn_port_id = sa.Column(sa.String(36), primary_key=True) - - lsn_id = sa.Column(sa.String(36), - sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"), - nullable=False) - sub_id = sa.Column(sa.String(36), nullable=False, unique=True) - mac_addr = sa.Column(sa.String(32), nullable=False, unique=True) - - def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): - self.lsn_port_id = lsn_port_id - self.lsn_id = lsn_id - self.sub_id = subnet_id - self.mac_addr = mac_address - - -class Lsn(models_v2.model_base.BASEV2): - __tablename__ = 'lsn' - - lsn_id = sa.Column(sa.String(36), primary_key=True) - net_id = sa.Column(sa.String(36), nullable=False) - - def __init__(self, net_id, lsn_id): - self.net_id = net_id - self.lsn_id = lsn_id - - -class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): - name = sa.Column(sa.String(255)) - default = sa.Column(sa.Boolean, default=False, server_default=sql.false()) - min = sa.Column(sa.Integer, nullable=False) - max = sa.Column(sa.Integer, nullable=True) - qos_marking = sa.Column(sa.Enum('untrusted', 'trusted', - name='qosqueues_qos_marking')) - dscp = sa.Column(sa.Integer) - - -class PortQueueMapping(model_base.BASEV2): - port_id = sa.Column(sa.String(36), - sa.ForeignKey("ports.id", ondelete="CASCADE"), - primary_key=True) - - queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"), - primary_key=True) - - # Add a relationship to the Port model adding a backref which will - # allow SQLAlchemy for eagerly load the queue binding - port = orm.relationship( - models_v2.Port, - backref=orm.backref("qos_queue", uselist=False, - cascade='delete', lazy='joined')) - - -class NetworkQueueMapping(model_base.BASEV2): - network_id = sa.Column(sa.String(36), - sa.ForeignKey("networks.id", ondelete="CASCADE"), - primary_key=True) - - queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id", - ondelete="CASCADE")) - - # Add a relationship to the Network model adding a backref which will - # allow SQLAlcremy for eagerly load the queue binding - network = orm.relationship( - models_v2.Network, - backref=orm.backref("qos_queue", uselist=False, - cascade='delete', lazy='joined')) diff --git a/neutron/plugins/vmware/dbexts/nsxv_models.py b/neutron/plugins/vmware/dbexts/nsxv_models.py deleted file mode 100644 index 2edb40061ea..00000000000 --- a/neutron/plugins/vmware/dbexts/nsxv_models.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2015 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.db import l3_db -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.plugins.vmware.common import nsxv_constants - - -class NsxvRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription): - """Represents the mapping between neutron router and vShield Edge.""" - - __tablename__ = 'nsxv_router_bindings' - - # no ForeignKey to routers.id because for now, a router can be removed - # from routers when delete_router is executed, but the binding is only - # removed after the Edge is deleted - router_id = sa.Column(sa.String(36), - primary_key=True) - edge_id = sa.Column(sa.String(36), - nullable=True) - lswitch_id = sa.Column(sa.String(36), - nullable=True) - appliance_size = sa.Column(sa.Enum( - nsxv_constants.COMPACT, - nsxv_constants.LARGE, - nsxv_constants.XLARGE, - nsxv_constants.QUADLARGE, - name='nsxv_router_bindings_appliance_size')) - edge_type = sa.Column(sa.Enum(nsxv_constants.SERVICE_EDGE, - nsxv_constants.VDR_EDGE, - name='nsxv_router_bindings_edge_type')) - - -class NsxvEdgeVnicBinding(model_base.BASEV2): - """Represents mapping between vShield Edge vnic and neutron netowrk.""" - - __tablename__ = 'nsxv_edge_vnic_bindings' - - edge_id = sa.Column(sa.String(36), - primary_key=True) - vnic_index = sa.Column(sa.Integer(), - primary_key=True) - tunnel_index = sa.Column(sa.Integer(), - primary_key=True) - network_id = sa.Column(sa.String(36), nullable=True) - - -class NsxvEdgeDhcpStaticBinding(model_base.BASEV2): - """Represents mapping between mac addr and bindingId.""" - - __tablename__ = 'nsxv_edge_dhcp_static_bindings' - - edge_id = sa.Column(sa.String(36), primary_key=True) - mac_address = sa.Column(sa.String(32), primary_key=True) - binding_id = sa.Column(sa.String(36), nullable=False) - - -class NsxvInternalNetworks(model_base.BASEV2): - """Represents internal networks between NSXV plugin elements.""" - - __tablename__ = 'nsxv_internal_networks' - - network_purpose = sa.Column( - sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, - name='nsxv_internal_networks_purpose'), - primary_key=True) - network_id = sa.Column(sa.String(36), - sa.ForeignKey("networks.id", ondelete="CASCADE"), - nullable=True) - - -class NsxvInternalEdges(model_base.BASEV2): - """Represents internal Edge appliances for NSXV plugin operations.""" - - __tablename__ = 'nsxv_internal_edges' - - ext_ip_address = sa.Column(sa.String(64), primary_key=True) - router_id = sa.Column(sa.String(36), nullable=True) - purpose = sa.Column( - sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, - name='nsxv_internal_edges_purpose')) - - -class NsxvSecurityGroupSectionMapping(model_base.BASEV2): - """Backend mappings for Neutron Rule Sections. - - This class maps a neutron security group identifier to the corresponding - NSX layer 3 section. - """ - - __tablename__ = 'nsxv_security_group_section_mappings' - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('securitygroups.id', - ondelete="CASCADE"), - primary_key=True) - ip_section_id = sa.Column(sa.String(100)) - - -class NsxvRuleMapping(model_base.BASEV2): - """Backend mappings for Neutron Rule Sections. - - This class maps a neutron security group identifier to the corresponding - NSX layer 3 and layer 2 sections. - """ - - __tablename__ = 'nsxv_rule_mappings' - - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('securitygrouprules.id', - ondelete="CASCADE"), - primary_key=True) - nsx_rule_id = sa.Column(sa.String(36), primary_key=True) - - -class NsxvPortVnicMapping(model_base.BASEV2): - """Maps neutron port to NSXv VM Vnic Id.""" - - __tablename__ = 'nsxv_port_vnic_mappings' - - neutron_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - nsx_id = sa.Column(sa.String(42), primary_key=True) - - -class NsxvRouterExtAttributes(model_base.BASEV2): - """Router attributes managed by NSX plugin extensions.""" - - __tablename__ = 'nsxv_router_ext_attributes' - - router_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - distributed = sa.Column(sa.Boolean, default=False, nullable=False) - router_type = sa.Column( - sa.Enum('shared', 'exclusive', - name='nsxv_router_type'), - default='exclusive', nullable=False) - service_router = sa.Column(sa.Boolean, default=False, nullable=False) - # Add a relationship to the Router model in order to instruct - # SQLAlchemy to eagerly load this association - router = orm.relationship( - l3_db.Router, - backref=orm.backref("nsx_attributes", lazy='joined', - uselist=False, cascade='delete')) - - -class NsxvTzNetworkBinding(model_base.BASEV2): - """Represents a binding of a virtual network with a transport zone. - - This model class associates a Neutron network with a transport zone; - optionally a vlan ID might be used if the binding type is 'bridge' - """ - - __tablename__ = 'nsxv_tz_network_bindings' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - binding_type = sa.Column( - sa.Enum('flat', 'vlan', 'portgroup', - name='nsxv_tz_network_bindings_binding_type'), - nullable=False, primary_key=True) - phy_uuid = sa.Column(sa.String(36), primary_key=True, nullable=True) - vlan_id = sa.Column(sa.Integer, primary_key=True, nullable=True, - autoincrement=False) - - def __init__(self, network_id, binding_type, phy_uuid, vlan_id): - self.network_id = network_id - self.binding_type = binding_type - self.phy_uuid = phy_uuid - self.vlan_id = vlan_id - - def __repr__(self): - return "" % (self.network_id, - self.binding_type, - self.phy_uuid, - self.vlan_id) - - -class NsxvPortIndexMapping(model_base.BASEV2): - """Associates attached Neutron ports with the instance VNic index.""" - - __tablename__ = 'nsxv_port_index_mappings' - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - device_id = sa.Column(sa.String(255), nullable=False) - index = sa.Column(sa.Integer, nullable=False) - __table_args__ = (sa.UniqueConstraint(device_id, index), - model_base.BASEV2.__table_args__) - - # Add a relationship to the Port model in order to instruct SQLAlchemy to - # eagerly read port vnic-index - port = orm.relationship( - models_v2.Port, - backref=orm.backref("vnic_index", lazy='joined', - uselist=False, cascade='delete')) - - -class NsxvEdgeFirewallRuleBinding(model_base.BASEV2): - """Mapping between firewall rule and edge firewall rule_id.""" - - __tablename__ = 'nsxv_firewall_rule_bindings' - - rule_id = sa.Column(sa.String(36), - primary_key=True) - edge_id = sa.Column(sa.String(36), primary_key=True) - rule_vse_id = sa.Column(sa.String(36)) - - -class NsxvSpoofGuardPolicyNetworkMapping(model_base.BASEV2): - """Mapping between SpoofGuard and neutron networks""" - - __tablename__ = 'nsxv_spoofguard_policy_network_mappings' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete='CASCADE'), - primary_key=True, - nullable=False) - policy_id = sa.Column(sa.String(36), nullable=False) - - -class NsxvVdrDhcpBinding(model_base.BASEV2): - """1:1 mapping between VDR and a DHCP Edge.""" - - __tablename__ = 'nsxv_vdr_dhcp_bindings' - - vdr_router_id = sa.Column(sa.String(36), primary_key=True) - dhcp_router_id = sa.Column(sa.String(36), nullable=False) - dhcp_edge_id = sa.Column(sa.String(36), nullable=False) - - __table_args__ = ( - sa.UniqueConstraint( - dhcp_router_id, - name='unique_nsxv_vdr_dhcp_bindings0dhcp_router_id'), - sa.UniqueConstraint( - dhcp_edge_id, - name='unique_nsxv_vdr_dhcp_bindings0dhcp_edge_id')) diff --git a/neutron/plugins/vmware/dbexts/vcns_models.py b/neutron/plugins/vmware/dbexts/vcns_models.py deleted file mode 100644 index 50447436d5c..00000000000 --- a/neutron/plugins/vmware/dbexts/vcns_models.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 - - -class VcnsRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription): - """Represents the mapping between neutron router and vShield Edge.""" - - __tablename__ = 'vcns_router_bindings' - - # no ForeignKey to routers.id because for now, a router can be removed - # from routers when delete_router is executed, but the binding is only - # removed after the Edge is deleted - router_id = sa.Column(sa.String(36), - primary_key=True) - edge_id = sa.Column(sa.String(16), - nullable=True) - lswitch_id = sa.Column(sa.String(36), - nullable=False) diff --git a/neutron/plugins/vmware/extensions/advancedserviceproviders.py b/neutron/plugins/vmware/extensions/advancedserviceproviders.py deleted file mode 100644 index f82fc3aa675..00000000000 --- a/neutron/plugins/vmware/extensions/advancedserviceproviders.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api import extensions - -# Attribute Map -ADV_SERVICE_PROVIDERS = 'advanced_service_providers' - - -EXTENDED_ATTRIBUTES_2_0 = { - 'subnets': { - ADV_SERVICE_PROVIDERS: - {'allow_post': False, - 'allow_put': False, - 'is_visible': True, - 'default': None}}} - - -class Advancedserviceproviders(extensions.ExtensionDescriptor): - @classmethod - def get_name(cls): - return "Advanced Service Providers" - - @classmethod - def get_alias(cls): - return "advanced-service-providers" - - @classmethod - def get_description(cls): - return "Id of the advanced service providers attached to the subnet" - - @classmethod - def get_updated(cls): - return "2014-12-11T12:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/extensions/lsn.py b/neutron/plugins/vmware/extensions/lsn.py deleted file mode 100644 index 28ea8a0d79f..00000000000 --- a/neutron/plugins/vmware/extensions/lsn.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.api import extensions -from neutron.api.v2 import base -from neutron import manager - - -EXT_ALIAS = 'lsn' -COLLECTION_NAME = "%ss" % EXT_ALIAS - -RESOURCE_ATTRIBUTE_MAP = { - COLLECTION_NAME: { - 'network': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'is_visible': True}, - 'report': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'required_by_policy': True, - 'validate': {'type:string': None}, 'is_visible': True}, - }, -} - - -class Lsn(extensions.ExtensionDescriptor): - """Enable LSN configuration for Neutron NSX networks.""" - - @classmethod - def get_name(cls): - return "Logical Service Node configuration" - - @classmethod - def get_alias(cls): - return EXT_ALIAS - - @classmethod - def get_description(cls): - return "Enables configuration of NSX Logical Services Node." - - @classmethod - def get_updated(cls): - return "2013-10-05T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - resource_name = EXT_ALIAS - collection_name = resource_name.replace('_', '-') + "s" - params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) - controller = base.create_resource(collection_name, - resource_name, - plugin, params, allow_bulk=False) - ex = extensions.ResourceExtension(collection_name, controller) - exts.append(ex) - return exts - - def get_extended_resources(self, version): - if version == "2.0": - return RESOURCE_ATTRIBUTE_MAP - else: - return {} diff --git a/neutron/plugins/vmware/extensions/maclearning.py b/neutron/plugins/vmware/extensions/maclearning.py deleted file mode 100644 index c73618ab4b5..00000000000 --- a/neutron/plugins/vmware/extensions/maclearning.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api import extensions -from neutron.api.v2 import attributes - - -MAC_LEARNING = 'mac_learning_enabled' -EXTENDED_ATTRIBUTES_2_0 = { - 'ports': { - MAC_LEARNING: {'allow_post': True, 'allow_put': True, - 'convert_to': attributes.convert_to_boolean, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - } -} - - -class Maclearning(extensions.ExtensionDescriptor): - """Extension class supporting port mac learning.""" - - @classmethod - def get_name(cls): - return "MAC Learning" - - @classmethod - def get_alias(cls): - return "mac-learning" - - @classmethod - def get_description(cls): - return "Provides MAC learning capabilities." - - @classmethod - def get_updated(cls): - return "2013-05-1T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - return [] - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/extensions/networkgw.py b/neutron/plugins/vmware/extensions/networkgw.py deleted file mode 100644 index aac070360a2..00000000000 --- a/neutron/plugins/vmware/extensions/networkgw.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2013 VMware. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc - -from oslo_config import cfg - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import resource_helper - -GATEWAY_RESOURCE_NAME = "network_gateway" -DEVICE_RESOURCE_NAME = "gateway_device" -# Use dash for alias and collection name -EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-') -NETWORK_GATEWAYS = "%ss" % EXT_ALIAS -GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-') -DEVICE_ID_ATTR = 'id' -IFACE_NAME_ATTR = 'interface_name' - - -# TODO(salv-orlando): This type definition is duplicated into -# openstack/vmware-nsx. This temporary duplication should be removed once the -# plugin decomposition is finished. -# Allowed network types for the NSX Plugin -class NetworkTypes(object): - """Allowed provider network types for the NSX Plugin.""" - L3_EXT = 'l3_ext' - STT = 'stt' - GRE = 'gre' - FLAT = 'flat' - VLAN = 'vlan' - BRIDGE = 'bridge' - -# Attribute Map for Network Gateway Resource -# TODO(salvatore-orlando): add admin state as other neutron resources -RESOURCE_ATTRIBUTE_MAP = { - NETWORK_GATEWAYS: { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': attributes.NAME_MAX_LEN}, - 'is_visible': True, 'default': ''}, - 'default': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'devices': {'allow_post': True, 'allow_put': False, - 'validate': {'type:device_list': None}, - 'is_visible': True}, - 'ports': {'allow_post': False, 'allow_put': False, - 'default': [], - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': - attributes.TENANT_ID_MAX_LEN}, - 'required_by_policy': True, - 'is_visible': True} - }, - GATEWAY_DEVICES: { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': attributes.NAME_MAX_LEN}, - 'is_visible': True, 'default': ''}, - 'client_certificate': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': None}, - 'is_visible': True}, - 'connector_type': {'allow_post': True, 'allow_put': True, - 'validate': {'type:connector_type': None}, - 'is_visible': True}, - 'connector_ip': {'allow_post': True, 'allow_put': True, - 'validate': {'type:ip_address': None}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': - attributes.TENANT_ID_MAX_LEN}, - 'required_by_policy': True, - 'is_visible': True}, - 'status': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - } -} - - -def _validate_device_list(data, valid_values=None): - """Validate the list of service definitions.""" - if not data: - # Devices must be provided - msg = _("Cannot create a gateway with an empty device list") - return msg - try: - for device in data: - key_specs = {DEVICE_ID_ATTR: - {'type:regex': attributes.UUID_PATTERN, - 'required': True}, - IFACE_NAME_ATTR: - {'type:string': None, - 'required': False}} - err_msg = attributes._validate_dict( - device, key_specs=key_specs) - if err_msg: - return err_msg - unexpected_keys = [key for key in device if key not in key_specs] - if unexpected_keys: - err_msg = (_("Unexpected keys found in device description:%s") - % ",".join(unexpected_keys)) - return err_msg - except TypeError: - return (_("%s: provided data are not iterable") % - _validate_device_list.__name__) - - -def _validate_connector_type(data, valid_values=None): - if not data: - # A connector type is compulsory - msg = _("A connector type is required to create a gateway device") - return msg - connector_types = (valid_values if valid_values else - [NetworkTypes.GRE, - NetworkTypes.STT, - NetworkTypes.BRIDGE, - 'ipsec%s' % NetworkTypes.GRE, - 'ipsec%s' % NetworkTypes.STT]) - if data not in connector_types: - msg = _("Unknown connector type: %s") % data - return msg - - -nw_gw_quota_opts = [ - cfg.IntOpt('quota_network_gateway', - default=5, - help=_('Number of network gateways allowed per tenant, ' - '-1 for unlimited')) -] - -cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS') - -attributes.validators['type:device_list'] = _validate_device_list -attributes.validators['type:connector_type'] = _validate_connector_type - - -class Networkgw(extensions.ExtensionDescriptor): - """API extension for Layer-2 Gateway support. - - The Layer-2 gateway feature allows for connecting neutron networks - with external networks at the layer-2 level. No assumption is made on - the location of the external network, which might not even be directly - reachable from the hosts where the VMs are deployed. - - This is achieved by instantiating 'network gateways', and then connecting - Neutron network to them. - """ - - @classmethod - def get_name(cls): - return "Network Gateway" - - @classmethod - def get_alias(cls): - return EXT_ALIAS - - @classmethod - def get_description(cls): - return "Connects Neutron networks with external networks at layer 2." - - @classmethod - def get_updated(cls): - return "2014-01-01T00:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - - member_actions = { - GATEWAY_RESOURCE_NAME.replace('_', '-'): { - 'connect_network': 'PUT', - 'disconnect_network': 'PUT'}} - - plural_mappings = resource_helper.build_plural_mappings( - {}, RESOURCE_ATTRIBUTE_MAP) - - return resource_helper.build_resource_info(plural_mappings, - RESOURCE_ATTRIBUTE_MAP, - None, - action_map=member_actions, - register_quota=True, - translate_name=True) - - def get_extended_resources(self, version): - if version == "2.0": - return RESOURCE_ATTRIBUTE_MAP - else: - return {} - - -class NetworkGatewayPluginBase(object): - - @abc.abstractmethod - def create_network_gateway(self, context, network_gateway): - pass - - @abc.abstractmethod - def update_network_gateway(self, context, id, network_gateway): - pass - - @abc.abstractmethod - def get_network_gateway(self, context, id, fields=None): - pass - - @abc.abstractmethod - def delete_network_gateway(self, context, id): - pass - - @abc.abstractmethod - def get_network_gateways(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - pass - - @abc.abstractmethod - def connect_network(self, context, network_gateway_id, - network_mapping_info): - pass - - @abc.abstractmethod - def disconnect_network(self, context, network_gateway_id, - network_mapping_info): - pass - - @abc.abstractmethod - def create_gateway_device(self, context, gateway_device): - pass - - @abc.abstractmethod - def update_gateway_device(self, context, id, gateway_device): - pass - - @abc.abstractmethod - def delete_gateway_device(self, context, id): - pass - - @abc.abstractmethod - def get_gateway_device(self, context, id, fields=None): - pass - - @abc.abstractmethod - def get_gateway_devices(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - pass diff --git a/neutron/plugins/vmware/extensions/nvp_qos.py b/neutron/plugins/vmware/extensions/nvp_qos.py deleted file mode 100644 index 14d30ce9eae..00000000000 --- a/neutron/plugins/vmware/extensions/nvp_qos.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# TODO(arosen): This is deprecated in Juno, and -# to be removed in Kxxxx. - -from neutron.plugins.vmware.extensions import qos - - -class Nvp_qos(qos.Qos): - """(Deprecated) Port Queue extension.""" - - @classmethod - def get_name(cls): - return "nvp-qos" - - @classmethod - def get_alias(cls): - return "nvp-qos" - - @classmethod - def get_description(cls): - return "NVP QoS extension (deprecated)." diff --git a/neutron/plugins/vmware/extensions/qos.py b/neutron/plugins/vmware/extensions/qos.py deleted file mode 100644 index fe1ac6ee3be..00000000000 --- a/neutron/plugins/vmware/extensions/qos.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc - -from neutron.api import extensions -from neutron.api.v2 import attributes as attr -from neutron.api.v2 import base -from neutron.common import exceptions as nexception -from neutron import manager - - -# For policy.json/Auth -qos_queue_create = "create_qos_queue" -qos_queue_delete = "delete_qos_queue" -qos_queue_get = "get_qos_queue" -qos_queue_list = "get_qos_queues" - - -class DefaultQueueCreateNotAdmin(nexception.InUse): - message = _("Need to be admin in order to create queue called default") - - -class DefaultQueueAlreadyExists(nexception.InUse): - message = _("Default queue already exists.") - - -class QueueInvalidDscp(nexception.InvalidInput): - message = _("Invalid value for dscp %(data)s must be integer value" - " between 0 and 63.") - - -class QueueInvalidMarking(nexception.InvalidInput): - message = _("The qos marking cannot be set to 'trusted' " - "when the DSCP field is set") - - -class QueueMinGreaterMax(nexception.InvalidInput): - message = _("Invalid bandwidth rate, min greater than max.") - - -class QueueInvalidBandwidth(nexception.InvalidInput): - message = _("Invalid bandwidth rate, %(data)s must be a non negative" - " integer.") - - -class QueueNotFound(nexception.NotFound): - message = _("Queue %(id)s does not exist") - - -class QueueInUseByPort(nexception.InUse): - message = _("Unable to delete queue attached to port.") - - -class QueuePortBindingNotFound(nexception.NotFound): - message = _("Port is not associated with lqueue") - - -def convert_to_unsigned_int_or_none(val): - if val is None: - return - try: - val = int(val) - if val < 0: - raise ValueError() - except (ValueError, TypeError): - msg = _("'%s' must be a non negative integer.") % val - raise nexception.InvalidInput(error_message=msg) - return val - - -def convert_to_unsigned_int_or_none_max_63(val): - val = convert_to_unsigned_int_or_none(val) - if val > 63: - raise QueueInvalidDscp(data=val) - return val - -# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is -# untrusted, DSCP must be specified. Whichever default values we choose for -# the tuple (qos_marking, dscp), there will be at least one combination of a -# request with conflicting values: for instance given the default values below, -# requests with qos_marking = 'trusted' and the default dscp value will fail. -# In order to avoid API users to explicitly specify a setting for clearing -# the DSCP field when a trusted queue is created, the code serving this API -# will adopt the following behaviour when qos_marking is set to 'trusted': -# - if the DSCP attribute is set to the default value (0), silently drop -# its value -# - if the DSCP attribute is set to anything than 0 (but still a valid DSCP -# value) return a 400 error as qos_marking and DSCP setting conflict. -# TODO(salv-orlando): Evaluate whether it will be possible from a backward -# compatibility perspective to change the default value for DSCP in order to -# avoid this peculiar behaviour - -RESOURCE_ATTRIBUTE_MAP = { - 'qos_queues': { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'default': {'allow_post': True, 'allow_put': False, - 'convert_to': attr.convert_to_boolean, - 'is_visible': True, 'default': False}, - 'name': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': attr.NAME_MAX_LEN}, - 'is_visible': True, 'default': ''}, - 'min': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '0', - 'convert_to': convert_to_unsigned_int_or_none}, - 'max': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': None, - 'convert_to': convert_to_unsigned_int_or_none}, - 'qos_marking': {'allow_post': True, 'allow_put': False, - 'validate': {'type:values': ['untrusted', 'trusted']}, - 'default': 'untrusted', 'is_visible': True}, - 'dscp': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '0', - 'convert_to': convert_to_unsigned_int_or_none_max_63}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'required_by_policy': True, - 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, - 'is_visible': True}, - }, -} - - -QUEUE = 'queue_id' -RXTX_FACTOR = 'rxtx_factor' -EXTENDED_ATTRIBUTES_2_0 = { - 'ports': { - RXTX_FACTOR: {'allow_post': True, - # FIXME(arosen): the plugin currently does not - # implement updating rxtx factor on port. - 'allow_put': True, - 'is_visible': False, - 'default': 1, - 'enforce_policy': True, - 'convert_to': attr.convert_to_positive_float_or_none}, - - QUEUE: {'allow_post': False, - 'allow_put': False, - 'is_visible': True, - 'default': False, - 'enforce_policy': True}}, - 'networks': {QUEUE: {'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': False, - 'enforce_policy': True}} - -} - - -class Qos(extensions.ExtensionDescriptor): - """Port Queue extension.""" - - @classmethod - def get_name(cls): - return "QoS Queue" - - @classmethod - def get_alias(cls): - return "qos-queue" - - @classmethod - def get_description(cls): - return "NSX QoS extension." - - @classmethod - def get_updated(cls): - return "2014-01-01T00:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - resource_name = 'qos_queue' - collection_name = resource_name.replace('_', '-') + "s" - params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) - controller = base.create_resource(collection_name, - resource_name, - plugin, params, allow_bulk=False) - - ex = extensions.ResourceExtension(collection_name, - controller) - exts.append(ex) - - return exts - - def get_extended_resources(self, version): - if version == "2.0": - return dict(EXTENDED_ATTRIBUTES_2_0.items() + - RESOURCE_ATTRIBUTE_MAP.items()) - else: - return {} - - -class QueuePluginBase(object): - @abc.abstractmethod - def create_qos_queue(self, context, queue): - pass - - @abc.abstractmethod - def delete_qos_queue(self, context, id): - pass - - @abc.abstractmethod - def get_qos_queue(self, context, id, fields=None): - pass - - @abc.abstractmethod - def get_qos_queues(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - pass diff --git a/neutron/plugins/vmware/extensions/routertype.py b/neutron/plugins/vmware/extensions/routertype.py deleted file mode 100644 index f42c6b902ba..00000000000 --- a/neutron/plugins/vmware/extensions/routertype.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api import extensions -from neutron.api.v2 import attributes - - -ROUTER_TYPE = 'router_type' -EXTENDED_ATTRIBUTES_2_0 = { - 'routers': { - ROUTER_TYPE: {'allow_post': True, 'allow_put': False, - 'validate': {'type:values': ['shared', 'exclusive']}, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - } -} - - -class Routertype(extensions.ExtensionDescriptor): - """Extension class supporting router type.""" - - @classmethod - def get_name(cls): - return "Router Type" - - @classmethod - def get_alias(cls): - return "nsxv-router-type" - - @classmethod - def get_description(cls): - return "Enables configuration of NSXv router type." - - @classmethod - def get_updated(cls): - return "2015-1-12T10:00:00-00:00" - - def get_required_extensions(self): - return ["router"] - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - return [] - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/extensions/vnicindex.py b/neutron/plugins/vmware/extensions/vnicindex.py deleted file mode 100644 index 290f0a3b2c2..00000000000 --- a/neutron/plugins/vmware/extensions/vnicindex.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api import extensions -from neutron.api.v2 import attributes - -# Attribute Map -VNIC_INDEX = 'vnic_index' - - -EXTENDED_ATTRIBUTES_2_0 = { - 'ports': { - VNIC_INDEX: - {'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': None, - 'convert_to': attributes.convert_to_int_if_not_none}}} - - -class Vnicindex(extensions.ExtensionDescriptor): - @classmethod - def get_name(cls): - return "VNIC Index" - - @classmethod - def get_alias(cls): - return "vnic-index" - - @classmethod - def get_description(cls): - return ("Enable a port to be associated with a VNIC index") - - @classmethod - def get_updated(cls): - return "2014-09-15T12:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/plugin.py b/neutron/plugins/vmware/plugin.py deleted file mode 100644 index c841d15594a..00000000000 --- a/neutron/plugins/vmware/plugin.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from vmware_nsx.neutron.plugins.vmware.plugins import base as nsx_mh -from vmware_nsx.neutron.plugins.vmware.plugins import dvs -from vmware_nsx.neutron.plugins.vmware.plugins import nsx_v - -NsxMhPlugin = nsx_mh.NsxPluginV2 -# The 'NsxPlugin' name will be deprecated in Liberty -# and replaced by the 'NsxMhPlugin' name -NsxPlugin = NsxMhPlugin -NsxVPlugin = nsx_v.NsxVPluginV2 -NsxDvsPlugin = dvs.NsxDvsV2 diff --git a/neutron/policy.py b/neutron/policy.py index c3f6746bb46..e1d955a6022 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -18,7 +18,6 @@ Policy engine for neutron. Largely copied from nova. """ import collections -import logging as std_logging import re from oslo_config import cfg @@ -314,7 +313,7 @@ def _prepare_check(context, action, target, pluralized): def log_rule_list(match_rule): - if LOG.isEnabledFor(std_logging.DEBUG): + if LOG.isEnabledFor(logging.DEBUG): rules = _process_rules_list([], match_rule) LOG.debug("Enforcing rules: %s", rules) diff --git a/neutron/quota/__init__.py b/neutron/quota/__init__.py index 97b466e872a..df54d9f9128 100644 --- a/neutron/quota/__init__.py +++ b/neutron/quota/__init__.py @@ -24,6 +24,7 @@ import six import webob from neutron.common import exceptions +from neutron.db.quota import api as quota_api from neutron.i18n import _LI, _LW from neutron.quota import resource_registry @@ -152,6 +153,33 @@ class ConfDriver(object): msg = _('Access to this resource was denied.') raise webob.exc.HTTPForbidden(msg) + def make_reservation(self, context, tenant_id, resources, deltas, plugin): + """This driver does not support reservations. + + This routine is provided for backward compatibility purposes with + the API controllers which have now been adapted to make reservations + rather than counting resources and checking limits - as this + routine ultimately does. + """ + for resource in deltas.keys(): + count = QUOTAS.count(context, resource, plugin, tenant_id) + total_use = deltas.get(resource, 0) + count + deltas[resource] = total_use + + self.limit_check( + context, + tenant_id, + resource_registry.get_all_resources(), + deltas) + # return a fake reservation - the REST controller expects it + return quota_api.ReservationInfo('fake', None, None, None) + + def commit_reservation(self, context, reservation_id): + """Tnis is a noop as this driver does not support reservations.""" + + def cancel_reservation(self, context, reservation_id): + """Tnis is a noop as this driver does not support reservations.""" + class QuotaEngine(object): """Represent the set of recognized quotas.""" @@ -210,6 +238,39 @@ class QuotaEngine(object): return res.count(context, *args, **kwargs) + def make_reservation(self, context, tenant_id, deltas, plugin): + # Verify that resources are managed by the quota engine + # Ensure no value is less than zero + unders = [key for key, val in deltas.items() if val < 0] + if unders: + raise exceptions.InvalidQuotaValue(unders=sorted(unders)) + + requested_resources = set(deltas.keys()) + all_resources = resource_registry.get_all_resources() + managed_resources = set([res for res in all_resources.keys() + if res in requested_resources]) + # Make sure we accounted for all of them... + unknown_resources = requested_resources - managed_resources + + if unknown_resources: + raise exceptions.QuotaResourceUnknown( + unknown=sorted(unknown_resources)) + # FIXME(salv-orlando): There should be no reason for sending all the + # resource in the registry to the quota driver, but as other driver + # APIs request them, this will be sorted out with a different patch. + return self.get_driver().make_reservation( + context, + tenant_id, + all_resources, + deltas, + plugin) + + def commit_reservation(self, context, reservation_id): + self.get_driver().commit_reservation(context, reservation_id) + + def cancel_reservation(self, context, reservation_id): + self.get_driver().cancel_reservation(context, reservation_id) + def limit_check(self, context, tenant_id, **values): """Check simple quota limits. @@ -232,6 +293,7 @@ class QuotaEngine(object): :param tenant_id: Tenant for which the quota limit is being checked :param values: Dict specifying requested deltas for each resource """ + # TODO(salv-orlando): Deprecate calls to this API # Verify that resources are managed by the quota engine requested_resources = set(values.keys()) managed_resources = set([res for res in diff --git a/neutron/quota/resource.py b/neutron/quota/resource.py index eb0036859fa..7068254c7dc 100644 --- a/neutron/quota/resource.py +++ b/neutron/quota/resource.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as oslo_db_exception @@ -177,20 +176,26 @@ class TrackedResource(BaseResource): def dirty(self): return self._dirty_tenants - @lockutils.synchronized('dirty_tenants') def mark_dirty(self, context, nested=False): if not self._dirty_tenants: return with context.session.begin(nested=nested, subtransactions=True): - for tenant_id in self._dirty_tenants: + # It is not necessary to protect this operation with a lock. + # Indeed when this method is called the request has been processed + # and therefore all resources created or deleted. + # dirty_tenants will contain all the tenants for which the + # resource count is changed. The list might contain also tenants + # for which resource count was altered in other requests, but this + # won't be harmful. + dirty_tenants_snap = self._dirty_tenants.copy() + for tenant_id in dirty_tenants_snap: quota_api.set_quota_usage_dirty(context, self.name, tenant_id) LOG.debug(("Persisted dirty status for tenant:%(tenant_id)s " "on resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) - self._out_of_sync_tenants |= self._dirty_tenants - self._dirty_tenants.clear() + self._out_of_sync_tenants |= dirty_tenants_snap + self._dirty_tenants = self._dirty_tenants - dirty_tenants_snap - @lockutils.synchronized('dirty_tenants') def _db_event_handler(self, mapper, _conn, target): try: tenant_id = target['tenant_id'] @@ -208,14 +213,15 @@ class TrackedResource(BaseResource): max_retries=db_api.MAX_RETRIES, exception_checker=lambda exc: isinstance(exc, oslo_db_exception.DBDuplicateEntry)) - def _set_quota_usage(self, context, tenant_id, in_use): - return quota_api.set_quota_usage(context, self.name, - tenant_id, in_use=in_use) + def _set_quota_usage(self, context, tenant_id, in_use, reserved): + return quota_api.set_quota_usage(context, self.name, tenant_id, + in_use=in_use, reserved=reserved) - def _resync(self, context, tenant_id, in_use): + def _resync(self, context, tenant_id, in_use, reserved): # Update quota usage usage_info = self._set_quota_usage( - context, tenant_id, in_use=in_use) + context, tenant_id, in_use, reserved) + self._dirty_tenants.discard(tenant_id) self._out_of_sync_tenants.discard(tenant_id) LOG.debug(("Unset dirty status for tenant:%(tenant_id)s on " @@ -232,39 +238,57 @@ class TrackedResource(BaseResource): in_use = context.session.query(self._model_class).filter_by( tenant_id=tenant_id).count() # Update quota usage - return self._resync(context, tenant_id, in_use) + return self._resync(context, tenant_id, in_use, reserved=0) def count(self, context, _plugin, tenant_id, resync_usage=False): - """Return the current usage count for the resource.""" - # Load current usage data + """Return the current usage count for the resource. + + This method will fetch the information from resource usage data, + unless usage data are marked as "dirty", in which case both used and + reserved resource are explicitly counted. + + The _plugin and _resource parameters are unused but kept for + compatibility with the signature of the count method for + CountableResource instances. + """ + # Load current usage data, setting a row-level lock on the DB usage_info = quota_api.get_quota_usage_by_resource_and_tenant( - context, self.name, tenant_id) + context, self.name, tenant_id, lock_for_update=True) # If dirty or missing, calculate actual resource usage querying # the database and set/create usage info data # NOTE: this routine "trusts" usage counters at service startup. This # assumption is generally valid, but if the database is tampered with, # or if data migrations do not take care of usage counters, the # assumption will not hold anymore - if (tenant_id in self._dirty_tenants or not usage_info - or usage_info.dirty): + if (tenant_id in self._dirty_tenants or + not usage_info or usage_info.dirty): LOG.debug(("Usage tracker for resource:%(resource)s and tenant:" "%(tenant_id)s is out of sync, need to count used " "quota"), {'resource': self.name, 'tenant_id': tenant_id}) in_use = context.session.query(self._model_class).filter_by( tenant_id=tenant_id).count() + # Update quota usage, if requested (by default do not do that, as # typically one counts before adding a record, and that would mark # the usage counter as dirty again) if resync_usage or not usage_info: - usage_info = self._resync(context, tenant_id, in_use) + usage_info = self._resync(context, tenant_id, + in_use, reserved=0) else: + # NOTE(salv-orlando): Passing 0 for reserved amount as + # reservations are currently not supported usage_info = quota_api.QuotaUsageInfo(usage_info.resource, usage_info.tenant_id, in_use, - usage_info.reserved, + 0, usage_info.dirty) + LOG.debug(("Quota usage for %(resource)s was recalculated. " + "Used quota:%(used)d; Reserved quota:%(reserved)d"), + {'resource': self.name, + 'used': usage_info.used, + 'reserved': usage_info.reserved}) return usage_info.total def register_events(self): diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py index d0263e87614..8d462e5e843 100644 --- a/neutron/quota/resource_registry.py +++ b/neutron/quota/resource_registry.py @@ -65,7 +65,7 @@ def set_resources_dirty(context): return for res in get_all_resources().values(): - with context.session.begin(): + with context.session.begin(subtransactions=True): if is_tracked(res.name) and res.dirty: res.mark_dirty(context, nested=True) @@ -133,7 +133,7 @@ class ResourceRegistry(object): there are usage counters which are kept in sync with the actual number of rows in the database, this class allows the plugin to register their names either explicitly or through the @tracked_resources decorator, - which should preferrably be applied to the __init__ method of the class. + which should preferably be applied to the __init__ method of the class. """ _instance = None diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index ae7adbf38ea..e41a34968d7 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -234,11 +234,8 @@ class L3Scheduler(object): sync_router = plugin.get_router(context, router_id) candidates = candidates or self._get_candidates( plugin, context, sync_router) - if not candidates: - return - - router_distributed = sync_router.get('distributed', False) - if router_distributed: + chosen_agent = None + if sync_router.get('distributed', False): for chosen_agent in candidates: self.bind_router(context, router_id, chosen_agent) @@ -249,13 +246,15 @@ class L3Scheduler(object): if not snat_bindings and router_gw_exists: # If GW exists for DVR routers and no SNAT binding # call the schedule_snat_router - plugin.schedule_snat_router( + chosen_agent = plugin.schedule_snat_router( context, router_id, sync_router) elif not router_gw_exists and snat_bindings: # If DVR router and no Gateway but SNAT Binding exists then # call the unbind_snat_servicenode to unbind the snat service # from agent plugin.unbind_snat_servicenode(context, router_id) + elif not candidates: + return elif sync_router.get('ha', False): chosen_agents = self._bind_ha_router(plugin, context, router_id, candidates) diff --git a/neutron/server/__init__.py b/neutron/server/__init__.py index 108e9f4d4ed..3386fcfa6fe 100644 --- a/neutron/server/__init__.py +++ b/neutron/server/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # Copyright 2011 VMware, Inc. # All Rights Reserved. # diff --git a/neutron/service.py b/neutron/service.py index 4cec3357078..6b1eee248b1 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -118,13 +118,27 @@ class RpcWorker(common_service.ServiceBase): self._servers = self._plugin.start_rpc_listeners() def wait(self): + try: + self._wait() + except Exception: + LOG.exception(_LE('done with wait')) + raise + + def _wait(self): + LOG.debug('calling RpcWorker wait()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): + LOG.debug('calling wait on %s', server) server.wait() + else: + LOG.debug('NOT calling wait on %s', server) + LOG.debug('returning from RpcWorker wait()') def stop(self): + LOG.debug('calling RpcWorker stop()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): + LOG.debug('calling stop on %s', server) server.stop() @staticmethod @@ -151,12 +165,16 @@ def serve_rpc(): rpc = RpcWorker(plugin) if cfg.CONF.rpc_workers < 1: + LOG.debug('starting rpc directly, workers=%s', + cfg.CONF.rpc_workers) rpc.start() return rpc else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. + LOG.debug('using launcher for rpc, workers=%s', + cfg.CONF.rpc_workers) session.dispose() launcher = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py index cc406e74193..938644f8c47 100644 --- a/neutron/services/provider_configuration.py +++ b/neutron/services/provider_configuration.py @@ -51,6 +51,8 @@ def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS): try: driver_manager = stevedore.driver.DriverManager( namespace, driver).driver + except ImportError: + return driver except RuntimeError: return driver new_driver = "%s.%s" % (driver_manager.__module__, diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py b/neutron/services/qos/__init__.py similarity index 100% rename from neutron/plugins/plumgrid/plumgrid_plugin/__init__.py rename to neutron/services/qos/__init__.py diff --git a/neutron/plugins/vmware/common/__init__.py b/neutron/services/qos/notification_drivers/__init__.py similarity index 100% rename from neutron/plugins/vmware/common/__init__.py rename to neutron/services/qos/notification_drivers/__init__.py diff --git a/neutron/services/qos/notification_drivers/manager.py b/neutron/services/qos/notification_drivers/manager.py new file mode 100644 index 00000000000..d027c1945c7 --- /dev/null +++ b/neutron/services/qos/notification_drivers/manager.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_config import cfg +from oslo_log import log as logging + +from neutron.i18n import _LI +from neutron import manager + +QOS_DRIVER_NAMESPACE = 'neutron.qos.notification_drivers' +QOS_PLUGIN_OPTS = [ + cfg.ListOpt('notification_drivers', + default='message_queue', + help=_('Drivers list to use to send the update notification')), +] + +cfg.CONF.register_opts(QOS_PLUGIN_OPTS, "qos") + +LOG = logging.getLogger(__name__) + + +class QosServiceNotificationDriverManager(object): + + def __init__(self): + self.notification_drivers = [] + self._load_drivers(cfg.CONF.qos.notification_drivers) + + def update_policy(self, context, qos_policy): + for driver in self.notification_drivers: + driver.update_policy(context, qos_policy) + + def delete_policy(self, context, qos_policy): + for driver in self.notification_drivers: + driver.delete_policy(context, qos_policy) + + def create_policy(self, context, qos_policy): + for driver in self.notification_drivers: + driver.create_policy(context, qos_policy) + + def _load_drivers(self, notification_drivers): + """Load all the instances of the configured QoS notification drivers + + :param notification_drivers: comma separated string + """ + if not notification_drivers: + raise SystemExit(_('A QoS driver must be specified')) + LOG.debug("Loading QoS notification drivers: %s", notification_drivers) + for notification_driver in notification_drivers: + driver_ins = self._load_driver_instance(notification_driver) + self.notification_drivers.append(driver_ins) + + def _load_driver_instance(self, notification_driver): + """Returns an instance of the configured QoS notification driver + + :returns: An instance of Driver for the QoS notification + """ + mgr = manager.NeutronManager + driver = mgr.load_class_for_provider(QOS_DRIVER_NAMESPACE, + notification_driver) + driver_instance = driver() + LOG.info( + _LI("Loading %(name)s (%(description)s) notification driver " + "for QoS plugin"), + {"name": notification_driver, + "description": driver_instance.get_description()}) + return driver_instance diff --git a/neutron/services/qos/notification_drivers/message_queue.py b/neutron/services/qos/notification_drivers/message_queue.py new file mode 100644 index 00000000000..1af63f9ac3c --- /dev/null +++ b/neutron/services/qos/notification_drivers/message_queue.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks.producer import registry +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.i18n import _LW +from neutron.objects.qos import policy as policy_object +from neutron.services.qos.notification_drivers import qos_base + + +LOG = logging.getLogger(__name__) + + +def _get_qos_policy_cb(resource, policy_id, **kwargs): + context = kwargs.get('context') + if context is None: + LOG.warning(_LW( + 'Received %(resource)s %(policy_id)s without context'), + {'resource': resource, 'policy_id': policy_id} + ) + return + + policy = policy_object.QosPolicy.get_by_id(context, policy_id) + return policy + + +class RpcQosServiceNotificationDriver( + qos_base.QosServiceNotificationDriverBase): + """RPC message queue service notification driver for QoS.""" + + def __init__(self): + self.notification_api = resources_rpc.ResourcesPushRpcApi() + registry.provide(_get_qos_policy_cb, resources.QOS_POLICY) + + def get_description(self): + return "Message queue updates" + + def create_policy(self, context, policy): + #No need to update agents on create + pass + + def update_policy(self, context, policy): + self.notification_api.push(context, policy, events.UPDATED) + + def delete_policy(self, context, policy): + self.notification_api.push(context, policy, events.DELETED) diff --git a/neutron/services/qos/notification_drivers/qos_base.py b/neutron/services/qos/notification_drivers/qos_base.py new file mode 100644 index 00000000000..50f98f0c4b4 --- /dev/null +++ b/neutron/services/qos/notification_drivers/qos_base.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class QosServiceNotificationDriverBase(object): + """QoS service notification driver base class.""" + + @abc.abstractmethod + def get_description(self): + """Get the notification driver description. + """ + + @abc.abstractmethod + def create_policy(self, context, policy): + """Create the QoS policy.""" + + @abc.abstractmethod + def update_policy(self, context, policy): + """Update the QoS policy. + + Apply changes to the QoS policy. + """ + + @abc.abstractmethod + def delete_policy(self, context, policy): + """Delete the QoS policy. + + Remove all rules for this policy and free up all the resources. + """ diff --git a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py b/neutron/services/qos/qos_consts.py similarity index 71% rename from neutron/plugins/ml2/drivers/cisco/ncs/driver.py rename to neutron/services/qos/qos_consts.py index 6f8b8a6c7c0..3eb78d517d5 100644 --- a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py +++ b/neutron/services/qos/qos_consts.py @@ -1,5 +1,5 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. +# Copyright (c) 2015 Red Hat Inc. +# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,10 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -ML2 Mechanism Driver for Cisco NCS. -""" +RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth_limit' +VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT] -from networking_cisco.plugins.ml2.drivers.cisco.ncs import driver as cisco - -NCSMechanismDriver = cisco.NCSMechanismDriver +QOS_POLICY_ID = 'qos_policy_id' diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py new file mode 100644 index 00000000000..154c1b87206 --- /dev/null +++ b/neutron/services/qos/qos_plugin.py @@ -0,0 +1,166 @@ +# Copyright (c) 2015 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_log import log as logging + + +from neutron.common import exceptions as n_exc +from neutron.db import api as db_api +from neutron.db import db_base_plugin_common +from neutron.extensions import qos +from neutron.objects.qos import policy as policy_object +from neutron.objects.qos import rule as rule_object +from neutron.objects.qos import rule_type as rule_type_object +from neutron.services.qos.notification_drivers import manager as driver_mgr +from neutron.services.qos import qos_consts + + +LOG = logging.getLogger(__name__) + + +class QoSPlugin(qos.QoSPluginBase): + """Implementation of the Neutron QoS Service Plugin. + + This class implements a Quality of Service plugin that + provides quality of service parameters over ports and + networks. + + """ + supported_extension_aliases = ['qos'] + + def __init__(self): + super(QoSPlugin, self).__init__() + self.notification_driver_manager = ( + driver_mgr.QosServiceNotificationDriverManager()) + + @db_base_plugin_common.convert_result_to_dict + def create_policy(self, context, policy): + policy = policy_object.QosPolicy(context, **policy['policy']) + policy.create() + self.notification_driver_manager.create_policy(context, policy) + return policy + + @db_base_plugin_common.convert_result_to_dict + def update_policy(self, context, policy_id, policy): + policy = policy_object.QosPolicy(context, **policy['policy']) + policy.id = policy_id + policy.update() + self.notification_driver_manager.update_policy(context, policy) + return policy + + def delete_policy(self, context, policy_id): + policy = policy_object.QosPolicy(context) + policy.id = policy_id + self.notification_driver_manager.delete_policy(context, policy) + policy.delete() + + def _get_policy_obj(self, context, policy_id): + obj = policy_object.QosPolicy.get_by_id(context, policy_id) + if obj is None: + raise n_exc.QosPolicyNotFound(policy_id=policy_id) + return obj + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_policy(self, context, policy_id, fields=None): + return self._get_policy_obj(context, policy_id) + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_policies(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + return policy_object.QosPolicy.get_objects(context, **filters) + + #TODO(QoS): Consider adding a proxy catch-all for rules, so + # we capture the API function call, and just pass + # the rule type as a parameter removing lots of + # future code duplication when we have more rules. + @db_base_plugin_common.convert_result_to_dict + def create_policy_bandwidth_limit_rule(self, context, policy_id, + bandwidth_limit_rule): + # make sure we will have a policy object to push resource update + with db_api.autonested_transaction(context.session): + # first, validate that we have access to the policy + policy = self._get_policy_obj(context, policy_id) + rule = rule_object.QosBandwidthLimitRule( + context, qos_policy_id=policy_id, + **bandwidth_limit_rule['bandwidth_limit_rule']) + rule.create() + policy.reload_rules() + self.notification_driver_manager.update_policy(context, policy) + return rule + + @db_base_plugin_common.convert_result_to_dict + def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, + bandwidth_limit_rule): + # make sure we will have a policy object to push resource update + with db_api.autonested_transaction(context.session): + # first, validate that we have access to the policy + policy = self._get_policy_obj(context, policy_id) + rule = rule_object.QosBandwidthLimitRule( + context, **bandwidth_limit_rule['bandwidth_limit_rule']) + rule.id = rule_id + rule.update() + policy.reload_rules() + self.notification_driver_manager.update_policy(context, policy) + return rule + + def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id): + # make sure we will have a policy object to push resource update + with db_api.autonested_transaction(context.session): + # first, validate that we have access to the policy + policy = self._get_policy_obj(context, policy_id) + rule = rule_object.QosBandwidthLimitRule(context) + rule.id = rule_id + rule.delete() + policy.reload_rules() + self.notification_driver_manager.update_policy(context, policy) + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_policy_bandwidth_limit_rule(self, context, rule_id, + policy_id, fields=None): + # make sure we have access to the policy when fetching the rule + with db_api.autonested_transaction(context.session): + # first, validate that we have access to the policy + self._get_policy_obj(context, policy_id) + rule = rule_object.QosBandwidthLimitRule.get_by_id( + context, rule_id) + if not rule: + raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id) + return rule + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_policy_bandwidth_limit_rules(self, context, policy_id, + filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False): + # make sure we have access to the policy when fetching rules + with db_api.autonested_transaction(context.session): + # first, validate that we have access to the policy + self._get_policy_obj(context, policy_id) + filters = filters or dict() + filters[qos_consts.QOS_POLICY_ID] = policy_id + return rule_object.QosBandwidthLimitRule.get_objects(context, + **filters) + + # TODO(QoS): enforce rule types when accessing rule objects + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_rule_types(self, context, filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False): + return rule_type_object.QosRuleType.get_objects(**filters) diff --git a/neutron/plugins/vmware/dbexts/__init__.py b/neutron/services/rbac/__init__.py similarity index 100% rename from neutron/plugins/vmware/dbexts/__init__.py rename to neutron/services/rbac/__init__.py diff --git a/neutron/tests/api/admin/test_routers_dvr.py b/neutron/tests/api/admin/test_routers_dvr.py index 34301ce7448..592fded05f2 100644 --- a/neutron/tests/api/admin/test_routers_dvr.py +++ b/neutron/tests/api/admin/test_routers_dvr.py @@ -93,7 +93,9 @@ class RoutersTestDVR(base.BaseRouterTest): attribute will be set to True """ name = data_utils.rand_name('router') - router = self.admin_client.create_router(name, distributed=False) + # router needs to be in admin state down in order to be upgraded to DVR + router = self.admin_client.create_router(name, distributed=False, + admin_state_up=False) self.addCleanup(self.admin_client.delete_router, router['router']['id']) self.assertFalse(router['router']['distributed']) diff --git a/neutron/tests/api/admin/test_shared_network_extension.py b/neutron/tests/api/admin/test_shared_network_extension.py index 569e07f1a72..78215e41704 100644 --- a/neutron/tests/api/admin/test_shared_network_extension.py +++ b/neutron/tests/api/admin/test_shared_network_extension.py @@ -18,6 +18,7 @@ from tempest_lib import exceptions as lib_exc import testtools from neutron.tests.api import base +from neutron.tests.api import clients from neutron.tests.tempest import config from neutron.tests.tempest import test from tempest_lib.common.utils import data_utils @@ -172,3 +173,180 @@ class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest): with testtools.ExpectedException(lib_exc.Forbidden): self.update_port( port, allowed_address_pairs=self.allowed_address_pairs) + + +class RBACSharedNetworksTest(base.BaseAdminNetworkTest): + + force_tenant_isolation = True + + @classmethod + def resource_setup(cls): + super(RBACSharedNetworksTest, cls).resource_setup() + extensions = cls.admin_client.list_extensions() + if not test.is_extension_enabled('rbac_policies', 'network'): + msg = "rbac extension not enabled." + raise cls.skipException(msg) + # NOTE(kevinbenton): the following test seems to be necessary + # since the default is 'all' for the above check and these tests + # need to get into the gate and be disabled until the service plugin + # is enabled in devstack. Is there a better way to do this? + if 'rbac-policies' not in [x['alias'] + for x in extensions['extensions']]: + msg = "rbac extension is not in extension listing." + raise cls.skipException(msg) + creds = cls.isolated_creds.get_alt_creds() + cls.client2 = clients.Manager(credentials=creds).network_client + + def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id): + net = self.admin_client.create_network( + name=data_utils.rand_name('test-network-'))['network'] + self.addCleanup(self.admin_client.delete_network, net['id']) + subnet = self.create_subnet(net, client=self.admin_client) + # network is shared to first unprivileged client by default + pol = self.admin_client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant=tenant_id + )['rbac_policy'] + return {'network': net, 'subnet': subnet, 'policy': pol} + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff') + def test_network_only_visible_to_policy_target(self): + net = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id)['network'] + self.client.show_network(net['id']) + with testtools.ExpectedException(lib_exc.NotFound): + # client2 has not been granted access + self.client2.show_network(net['id']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff') + def test_subnet_on_network_only_visible_to_policy_target(self): + sub = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id)['subnet'] + self.client.show_subnet(sub['id']) + with testtools.ExpectedException(lib_exc.NotFound): + # client2 has not been granted access + self.client2.show_subnet(sub['id']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee') + def test_policy_target_update(self): + res = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id) + # change to client2 + update_res = self.admin_client.update_rbac_policy( + res['policy']['id'], target_tenant=self.client2.tenant_id) + self.assertEqual(self.client2.tenant_id, + update_res['rbac_policy']['target_tenant']) + # make sure everything else stayed the same + res['policy'].pop('target_tenant') + update_res['rbac_policy'].pop('target_tenant') + self.assertEqual(res['policy'], update_res['rbac_policy']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff') + def test_port_presence_prevents_network_rbac_policy_deletion(self): + res = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id) + port = self.client.create_port(network_id=res['network']['id'])['port'] + # a port on the network should prevent the deletion of a policy + # required for it to exist + with testtools.ExpectedException(lib_exc.Conflict): + self.admin_client.delete_rbac_policy(res['policy']['id']) + + # a wildcard policy should allow the specific policy to be deleted + # since it allows the remaining port + wild = self.admin_client.create_rbac_policy( + object_type='network', object_id=res['network']['id'], + action='access_as_shared', target_tenant='*')['rbac_policy'] + self.admin_client.delete_rbac_policy(res['policy']['id']) + + # now that wilcard is the only remainin, it should be subjected to + # to the same restriction + with testtools.ExpectedException(lib_exc.Conflict): + self.admin_client.delete_rbac_policy(wild['id']) + # similarily, we can't update the policy to a different tenant + with testtools.ExpectedException(lib_exc.Conflict): + self.admin_client.update_rbac_policy( + wild['id'], target_tenant=self.client2.tenant_id) + + self.client.delete_port(port['id']) + # anchor is gone, delete should pass + self.admin_client.delete_rbac_policy(wild['id']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef') + def test_tenant_can_delete_port_on_own_network(self): + # TODO(kevinbenton): make adjustments to the db lookup to + # make this work. + msg = "Non-admin cannot currently delete other's ports." + raise self.skipException(msg) + # pylint: disable=unreachable + net = self.create_network() # owned by self.client + self.client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant=self.client2.tenant_id) + port = self.client2.create_port(network_id=net['id'])['port'] + self.client.delete_port(port['id']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff') + def test_regular_client_shares_to_another_regular_client(self): + net = self.create_network() # owned by self.client + with testtools.ExpectedException(lib_exc.NotFound): + self.client2.show_network(net['id']) + pol = self.client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant=self.client2.tenant_id) + self.client2.show_network(net['id']) + + self.assertIn(pol['rbac_policy'], + self.client.list_rbac_policies()['rbac_policies']) + # ensure that 'client2' can't see the policy sharing the network to it + # because the policy belongs to 'client' + self.assertNotIn(pol['rbac_policy']['id'], + [p['id'] + for p in self.client2.list_rbac_policies()['rbac_policies']]) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff') + def test_policy_show(self): + res = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id) + p1 = res['policy'] + p2 = self.admin_client.create_rbac_policy( + object_type='network', object_id=res['network']['id'], + action='access_as_shared', + target_tenant='*')['rbac_policy'] + + self.assertEqual( + p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy']) + self.assertEqual( + p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy']) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff') + def test_regular_client_blocked_from_sharing_anothers_network(self): + net = self._make_admin_net_and_subnet_shared_to_tenant_id( + self.client.tenant_id)['network'] + with testtools.ExpectedException(lib_exc.BadRequest): + self.client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant=self.client.tenant_id) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff') + def test_regular_client_blocked_from_sharing_with_wildcard(self): + net = self.create_network() + with testtools.ExpectedException(lib_exc.Forbidden): + self.client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant='*') + # ensure it works on update as well + pol = self.client.create_rbac_policy( + object_type='network', object_id=net['id'], + action='access_as_shared', target_tenant=self.client2.tenant_id) + with testtools.ExpectedException(lib_exc.Forbidden): + self.client.update_rbac_policy(pol['rbac_policy']['id'], + target_tenant='*') diff --git a/neutron/tests/api/base.py b/neutron/tests/api/base.py index 2790240eb5f..0f31a9a2a84 100644 --- a/neutron/tests/api/base.py +++ b/neutron/tests/api/base.py @@ -88,6 +88,8 @@ class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase): cls.fw_rules = [] cls.fw_policies = [] cls.ipsecpolicies = [] + cls.qos_rules = [] + cls.qos_policies = [] cls.ethertype = "IPv" + str(cls._ip_version) cls.address_scopes = [] cls.admin_address_scopes = [] @@ -115,6 +117,14 @@ class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase): for vpnservice in cls.vpnservices: cls._try_delete_resource(cls.client.delete_vpnservice, vpnservice['id']) + # Clean up QoS rules + for qos_rule in cls.qos_rules: + cls._try_delete_resource(cls.admin_client.delete_qos_rule, + qos_rule['id']) + # Clean up QoS policies + for qos_policy in cls.qos_policies: + cls._try_delete_resource(cls.admin_client.delete_qos_policy, + qos_policy['id']) # Clean up floating IPs for floating_ip in cls.floating_ips: cls._try_delete_resource(cls.client.delete_floatingip, @@ -221,9 +231,9 @@ class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase): return network @classmethod - def create_shared_network(cls, network_name=None): + def create_shared_network(cls, network_name=None, **post_body): network_name = network_name or data_utils.rand_name('sharednetwork-') - post_body = {'name': network_name, 'shared': True} + post_body.update({'name': network_name, 'shared': True}) body = cls.admin_client.create_network(**post_body) network = body['network'] cls.shared_networks.append(network) @@ -431,6 +441,25 @@ class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase): cls.fw_policies.append(fw_policy) return fw_policy + @classmethod + def create_qos_policy(cls, name, description, shared, tenant_id=None): + """Wrapper utility that returns a test QoS policy.""" + body = cls.admin_client.create_qos_policy( + name, description, shared, tenant_id) + qos_policy = body['policy'] + cls.qos_policies.append(qos_policy) + return qos_policy + + @classmethod + def create_qos_bandwidth_limit_rule(cls, policy_id, + max_kbps, max_burst_kbps): + """Wrapper utility that returns a test QoS bandwidth limit rule.""" + body = cls.admin_client.create_bandwidth_limit_rule( + policy_id, max_kbps, max_burst_kbps) + qos_rule = body['bandwidth_limit_rule'] + cls.qos_rules.append(qos_rule) + return qos_rule + @classmethod def delete_router(cls, router): body = cls.client.list_router_interfaces(router['id']) diff --git a/neutron/tests/api/test_qos.py b/neutron/tests/api/test_qos.py new file mode 100644 index 00000000000..5f26afd3cf2 --- /dev/null +++ b/neutron/tests/api/test_qos.py @@ -0,0 +1,453 @@ +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest_lib import exceptions +import testtools + +from neutron.services.qos import qos_consts +from neutron.tests.api import base +from neutron.tests.tempest import config +from neutron.tests.tempest import test + +CONF = config.CONF + + +class QosTestJSON(base.BaseAdminNetworkTest): + @classmethod + def resource_setup(cls): + super(QosTestJSON, cls).resource_setup() + if not test.is_extension_enabled('qos', 'network'): + msg = "qos extension not enabled." + raise cls.skipException(msg) + + @test.attr(type='smoke') + @test.idempotent_id('108fbdf7-3463-4e47-9871-d07f3dcf5bbb') + def test_create_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy desc1', + shared=False) + + # Test 'show policy' + retrieved_policy = self.admin_client.show_qos_policy(policy['id']) + retrieved_policy = retrieved_policy['policy'] + self.assertEqual('test-policy', retrieved_policy['name']) + self.assertEqual('test policy desc1', retrieved_policy['description']) + self.assertFalse(retrieved_policy['shared']) + + # Test 'list policies' + policies = self.admin_client.list_qos_policies()['policies'] + policies_ids = [p['id'] for p in policies] + self.assertIn(policy['id'], policies_ids) + + @test.attr(type='smoke') + @test.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815') + def test_list_policy_filter_by_name(self): + self.create_qos_policy(name='test', description='test policy', + shared=False) + self.create_qos_policy(name='test2', description='test policy', + shared=False) + + policies = (self.admin_client. + list_qos_policies(name='test')['policies']) + self.assertEqual(1, len(policies)) + + retrieved_policy = policies[0] + self.assertEqual('test', retrieved_policy['name']) + + @test.attr(type='smoke') + @test.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6') + def test_policy_update(self): + policy = self.create_qos_policy(name='test-policy', + description='', + shared=False) + self.admin_client.update_qos_policy(policy['id'], + description='test policy desc2', + shared=True) + + retrieved_policy = self.admin_client.show_qos_policy(policy['id']) + retrieved_policy = retrieved_policy['policy'] + self.assertEqual('test policy desc2', retrieved_policy['description']) + self.assertTrue(retrieved_policy['shared']) + self.assertEqual([], retrieved_policy['rules']) + + @test.attr(type='smoke') + @test.idempotent_id('1cb42653-54bd-4a9a-b888-c55e18199201') + def test_delete_policy(self): + policy = self.admin_client.create_qos_policy( + 'test-policy', 'desc', True)['policy'] + + retrieved_policy = self.admin_client.show_qos_policy(policy['id']) + retrieved_policy = retrieved_policy['policy'] + self.assertEqual('test-policy', retrieved_policy['name']) + + self.admin_client.delete_qos_policy(policy['id']) + self.assertRaises(exceptions.NotFound, + self.admin_client.show_qos_policy, policy['id']) + + @test.attr(type='smoke') + @test.idempotent_id('cf776f77-8d3d-49f2-8572-12d6a1557224') + def test_list_admin_rule_types(self): + self._test_list_rule_types(self.admin_client) + + @test.attr(type='smoke') + @test.idempotent_id('49c8ea35-83a9-453a-bd23-239cf3b13929') + def test_list_regular_rule_types(self): + self._test_list_rule_types(self.client) + + def _test_list_rule_types(self, client): + # List supported rule types + # TODO(QoS): since in gate we run both ovs and linuxbridge ml2 drivers, + # and since Linux Bridge ml2 driver does not have QoS support yet, ml2 + # plugin reports no rule types are supported. Once linuxbridge will + # receive support for QoS, the list of expected rule types will change. + # + # In theory, we could make the test conditional on which ml2 drivers + # are enabled in gate (or more specifically, on which supported qos + # rules are claimed by core plugin), but that option doesn't seem to be + # available thru tempest_lib framework + expected_rule_types = [] + expected_rule_details = ['type'] + + rule_types = client.list_qos_rule_types() + actual_list_rule_types = rule_types['rule_types'] + actual_rule_types = [rule['type'] for rule in actual_list_rule_types] + + # Verify that only required fields present in rule details + for rule in actual_list_rule_types: + self.assertEqual(tuple(rule.keys()), tuple(expected_rule_details)) + + # Verify if expected rules are present in the actual rules list + for rule in expected_rule_types: + self.assertIn(rule, actual_rule_types) + + def _disassociate_network(self, client, network_id): + client.update_network(network_id, qos_policy_id=None) + updated_network = self.admin_client.show_network(network_id) + self.assertIsNone(updated_network['network']['qos_policy_id']) + + @test.attr(type='smoke') + @test.idempotent_id('65b9ef75-1911-406a-bbdb-ca1d68d528b0') + def test_policy_association_with_admin_network(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + network = self.create_shared_network('test network', + qos_policy_id=policy['id']) + + retrieved_network = self.admin_client.show_network(network['id']) + self.assertEqual( + policy['id'], retrieved_network['network']['qos_policy_id']) + + self._disassociate_network(self.admin_client, network['id']) + + @test.attr(type='smoke') + @test.idempotent_id('1738de5d-0476-4163-9022-5e1b548c208e') + def test_policy_association_with_tenant_network(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=True) + network = self.create_network('test network', + qos_policy_id=policy['id']) + + retrieved_network = self.admin_client.show_network(network['id']) + self.assertEqual( + policy['id'], retrieved_network['network']['qos_policy_id']) + + self._disassociate_network(self.client, network['id']) + + @test.attr(type='smoke') + @test.idempotent_id('9efe63d0-836f-4cc2-b00c-468e63aa614e') + def test_policy_association_with_network_nonexistent_policy(self): + self.assertRaises( + exceptions.NotFound, + self.create_network, + 'test network', + qos_policy_id='9efe63d0-836f-4cc2-b00c-468e63aa614e') + + @test.attr(type='smoke') + @test.idempotent_id('1aa55a79-324f-47d9-a076-894a8fc2448b') + def test_policy_association_with_network_non_shared_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + self.assertRaises( + exceptions.NotFound, + self.create_network, + 'test network', qos_policy_id=policy['id']) + + @test.attr(type='smoke') + @test.idempotent_id('09a9392c-1359-4cbb-989f-fb768e5834a8') + def test_policy_update_association_with_admin_network(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + network = self.create_shared_network('test network') + retrieved_network = self.admin_client.show_network(network['id']) + self.assertIsNone(retrieved_network['network']['qos_policy_id']) + + self.admin_client.update_network(network['id'], + qos_policy_id=policy['id']) + retrieved_network = self.admin_client.show_network(network['id']) + self.assertEqual( + policy['id'], retrieved_network['network']['qos_policy_id']) + + self._disassociate_network(self.admin_client, network['id']) + + def _disassociate_port(self, port_id): + self.client.update_port(port_id, qos_policy_id=None) + updated_port = self.admin_client.show_port(port_id) + self.assertIsNone(updated_port['port']['qos_policy_id']) + + @test.attr(type='smoke') + @test.idempotent_id('98fcd95e-84cf-4746-860e-44692e674f2e') + def test_policy_association_with_port_shared_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=True) + network = self.create_shared_network('test network') + port = self.create_port(network, qos_policy_id=policy['id']) + + retrieved_port = self.admin_client.show_port(port['id']) + self.assertEqual( + policy['id'], retrieved_port['port']['qos_policy_id']) + + self._disassociate_port(port['id']) + + @test.attr(type='smoke') + @test.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e') + def test_policy_association_with_port_nonexistent_policy(self): + network = self.create_shared_network('test network') + self.assertRaises( + exceptions.NotFound, + self.create_port, + network, + qos_policy_id='49e02f5a-e1dd-41d5-9855-cfa37f2d195e') + + @test.attr(type='smoke') + @test.idempotent_id('f53d961c-9fe5-4422-8b66-7add972c6031') + def test_policy_association_with_port_non_shared_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + network = self.create_shared_network('test network') + self.assertRaises( + exceptions.NotFound, + self.create_port, + network, qos_policy_id=policy['id']) + + @test.attr(type='smoke') + @test.idempotent_id('f8163237-fba9-4db5-9526-bad6d2343c76') + def test_policy_update_association_with_port_shared_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=True) + network = self.create_shared_network('test network') + port = self.create_port(network) + retrieved_port = self.admin_client.show_port(port['id']) + self.assertIsNone(retrieved_port['port']['qos_policy_id']) + + self.client.update_port(port['id'], qos_policy_id=policy['id']) + retrieved_port = self.admin_client.show_port(port['id']) + self.assertEqual( + policy['id'], retrieved_port['port']['qos_policy_id']) + + self._disassociate_port(port['id']) + + @test.attr(type='smoke') + @test.idempotent_id('18163237-8ba9-4db5-9525-bad6d2343c75') + def test_delete_not_allowed_if_policy_in_use_by_network(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=True) + network = self.create_shared_network( + 'test network', qos_policy_id=policy['id']) + self.assertRaises( + exceptions.Conflict, + self.admin_client.delete_qos_policy, policy['id']) + + self._disassociate_network(self.admin_client, network['id']) + self.admin_client.delete_qos_policy(policy['id']) + + @test.attr(type='smoke') + @test.idempotent_id('24153230-84a9-4dd5-9525-bad6d2343c75') + def test_delete_not_allowed_if_policy_in_use_by_port(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=True) + network = self.create_shared_network('test network') + port = self.create_port(network, qos_policy_id=policy['id']) + self.assertRaises( + exceptions.Conflict, + self.admin_client.delete_qos_policy, policy['id']) + + self._disassociate_port(port['id']) + self.admin_client.delete_qos_policy(policy['id']) + + @test.attr(type='smoke') + @test.idempotent_id('a2a5849b-dd06-4b18-9664-0b6828a1fc27') + def test_qos_policy_delete_with_rules(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + self.admin_client.create_bandwidth_limit_rule( + policy['id'], 200, 1337)['bandwidth_limit_rule'] + + self.admin_client.delete_qos_policy(policy['id']) + + with testtools.ExpectedException(exceptions.NotFound): + self.admin_client.show_qos_policy(policy['id']) + + +class QosBandwidthLimitRuleTestJSON(base.BaseAdminNetworkTest): + @classmethod + def resource_setup(cls): + super(QosBandwidthLimitRuleTestJSON, cls).resource_setup() + if not test.is_extension_enabled('qos', 'network'): + msg = "qos extension not enabled." + raise cls.skipException(msg) + + @test.attr(type='smoke') + @test.idempotent_id('8a59b00b-3e9c-4787-92f8-93a5cdf5e378') + def test_rule_create(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], + max_kbps=200, + max_burst_kbps=1337) + + # Test 'show rule' + retrieved_rule = self.admin_client.show_bandwidth_limit_rule( + policy['id'], rule['id']) + retrieved_rule = retrieved_rule['bandwidth_limit_rule'] + self.assertEqual(rule['id'], retrieved_rule['id']) + self.assertEqual(200, retrieved_rule['max_kbps']) + self.assertEqual(1337, retrieved_rule['max_burst_kbps']) + + # Test 'list rules' + rules = self.admin_client.list_bandwidth_limit_rules(policy['id']) + rules = rules['bandwidth_limit_rules'] + rules_ids = [r['id'] for r in rules] + self.assertIn(rule['id'], rules_ids) + + # Test 'show policy' + retrieved_policy = self.admin_client.show_qos_policy(policy['id']) + policy_rules = retrieved_policy['policy']['rules'] + self.assertEqual(1, len(policy_rules)) + self.assertEqual(rule['id'], policy_rules[0]['id']) + self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, + policy_rules[0]['type']) + + @test.attr(type='smoke') + @test.idempotent_id('8a59b00b-ab01-4787-92f8-93a5cdf5e378') + def test_rule_create_fail_for_the_same_type(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], + max_kbps=200, + max_burst_kbps=1337) + + self.assertRaises(exceptions.Conflict, + self.create_qos_bandwidth_limit_rule, + policy_id=policy['id'], + max_kbps=201, max_burst_kbps=1338) + + @test.attr(type='smoke') + @test.idempotent_id('149a6988-2568-47d2-931e-2dbc858943b3') + def test_rule_update(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], + max_kbps=1, + max_burst_kbps=1) + + self.admin_client.update_bandwidth_limit_rule(policy['id'], + rule['id'], + max_kbps=200, + max_burst_kbps=1337) + + retrieved_policy = self.admin_client.show_bandwidth_limit_rule( + policy['id'], rule['id']) + retrieved_policy = retrieved_policy['bandwidth_limit_rule'] + self.assertEqual(200, retrieved_policy['max_kbps']) + self.assertEqual(1337, retrieved_policy['max_burst_kbps']) + + @test.attr(type='smoke') + @test.idempotent_id('67ee6efd-7b33-4a68-927d-275b4f8ba958') + def test_rule_delete(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False) + rule = self.admin_client.create_bandwidth_limit_rule( + policy['id'], 200, 1337)['bandwidth_limit_rule'] + + retrieved_policy = self.admin_client.show_bandwidth_limit_rule( + policy['id'], rule['id']) + retrieved_policy = retrieved_policy['bandwidth_limit_rule'] + self.assertEqual(rule['id'], retrieved_policy['id']) + + self.admin_client.delete_bandwidth_limit_rule(policy['id'], rule['id']) + self.assertRaises(exceptions.NotFound, + self.admin_client.show_bandwidth_limit_rule, + policy['id'], rule['id']) + + @test.attr(type='smoke') + @test.idempotent_id('f211222c-5808-46cb-a961-983bbab6b852') + def test_rule_create_rule_nonexistent_policy(self): + self.assertRaises( + exceptions.NotFound, + self.create_qos_bandwidth_limit_rule, + 'policy', 200, 1337) + + @test.attr(type='smoke') + @test.idempotent_id('eed8e2a6-22da-421b-89b9-935a2c1a1b50') + def test_policy_create_forbidden_for_regular_tenants(self): + self.assertRaises( + exceptions.Forbidden, + self.client.create_qos_policy, + 'test-policy', 'test policy', False) + + @test.attr(type='smoke') + @test.idempotent_id('a4a2e7ad-786f-4927-a85a-e545a93bd274') + def test_rule_create_forbidden_for_regular_tenants(self): + self.assertRaises( + exceptions.Forbidden, + self.client.create_bandwidth_limit_rule, + 'policy', 1, 2) + + @test.attr(type='smoke') + @test.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2') + def test_get_rules_by_policy(self): + policy1 = self.create_qos_policy(name='test-policy1', + description='test policy1', + shared=False) + rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy1['id'], + max_kbps=200, + max_burst_kbps=1337) + + policy2 = self.create_qos_policy(name='test-policy2', + description='test policy2', + shared=False) + rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy2['id'], + max_kbps=5000, + max_burst_kbps=2523) + + # Test 'list rules' + rules = self.admin_client.list_bandwidth_limit_rules(policy1['id']) + rules = rules['bandwidth_limit_rules'] + rules_ids = [r['id'] for r in rules] + self.assertIn(rule1['id'], rules_ids) + self.assertNotIn(rule2['id'], rules_ids) diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 476f6464ab5..cb5fb3ee66f 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -35,6 +35,7 @@ import six import testtools from neutron.agent.linux import external_process +from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg from neutron.callbacks import manager as registry_manager from neutron.callbacks import registry from neutron.common import config @@ -126,6 +127,11 @@ class DietTestCase(testtools.TestCase): def setUp(self): super(DietTestCase, self).setUp() + # FIXME(amuller): this must be called in the Neutron unit tests base + # class to initialize the DB connection string. Moving this may cause + # non-deterministic failures. Bug #1489098 for more info. + config.set_db_defaults() + # Configure this first to ensure pm debugging support for setUp() debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER') if debugger: @@ -290,6 +296,7 @@ class BaseTestCase(DietTestCase): policy.init() self.addCleanup(policy.reset) + self.addCleanup(rpc_consumer_reg.clear) def get_new_temp_dir(self): """Create a new temporary directory. diff --git a/neutron/plugins/ibm/common/constants.py b/neutron/tests/common/agents/l2_extensions.py similarity index 56% rename from neutron/plugins/ibm/common/constants.py rename to neutron/tests/common/agents/l2_extensions.py index f296c49e21b..11b354eeb3b 100644 --- a/neutron/plugins/ibm/common/constants.py +++ b/neutron/tests/common/agents/l2_extensions.py @@ -1,5 +1,4 @@ -# Copyright 2014 IBM Corp. -# +# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,17 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron.agent.linux import utils as agent_utils -from six.moves import http_client as httplib -# Topic for info notifications between the plugin and agent -INFO = 'info' +def wait_until_bandwidth_limit_rule_applied(bridge, port_vif, rule): + def _bandwidth_limit_rule_applied(): + bw_rule = bridge.get_egress_bw_limit_for_port(port_vif) + expected = None, None + if rule: + expected = rule.max_kbps, rule.max_burst_kbps + return bw_rule == expected -TENANT_TYPE_OF = 'OF' -TENANT_TYPE_OVERLAY = 'OVERLAY' - -HTTP_ACCEPTABLE = [httplib.OK, - httplib.CREATED, - httplib.ACCEPTED, - httplib.NO_CONTENT - ] + agent_utils.wait_until_true(_bandwidth_limit_rule_applied) diff --git a/neutron/tests/common/l3_test_common.py b/neutron/tests/common/l3_test_common.py index 6045f56bb44..1c3a9f36db5 100644 --- a/neutron/tests/common/l3_test_common.py +++ b/neutron/tests/common/l3_test_common.py @@ -244,6 +244,34 @@ def router_append_subnet(router, count=1, ip_version=4, router[l3_constants.INTERFACE_KEY] = interfaces +def router_append_pd_enabled_subnet(router, count=1): + interfaces = router[l3_constants.INTERFACE_KEY] + current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6 + for p in interfaces for subnet in p['subnets']) + + mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') + mac_address.dialect = netaddr.mac_unix + pd_intfs = [] + for i in range(current, current + count): + subnet_id = _uuid() + intf = {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '::1', + 'prefixlen': 64, + 'subnet_id': subnet_id}], + 'mac_address': str(mac_address), + 'subnets': [{'id': subnet_id, + 'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX, + 'gateway_ip': '::1', + 'ipv6_ra_mode': l3_constants.IPV6_SLAAC, + 'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]} + interfaces.append(intf) + pd_intfs.append(intf) + mac_address.value += 1 + return pd_intfs + + def prepare_ext_gw_test(context, ri, dual_stack=False): subnet_id = _uuid() fixed_ips = [{'subnet_id': subnet_id, diff --git a/neutron/tests/common/machine_fixtures.py b/neutron/tests/common/machine_fixtures.py index 65a1a433cd1..e61ece189b2 100644 --- a/neutron/tests/common/machine_fixtures.py +++ b/neutron/tests/common/machine_fixtures.py @@ -19,7 +19,7 @@ from neutron.agent.linux import ip_lib from neutron.tests.common import net_helpers -class FakeMachine(fixtures.Fixture): +class FakeMachineBase(fixtures.Fixture): """Create a fake machine. :ivar bridge: bridge on which the fake machine is bound @@ -36,6 +36,39 @@ class FakeMachine(fixtures.Fixture): :type port: IPDevice """ + def __init__(self): + self.port = None + + def _setUp(self): + ns_fixture = self.useFixture( + net_helpers.NamespaceFixture()) + self.namespace = ns_fixture.name + + def execute(self, *args, **kwargs): + ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) + return ns_ip_wrapper.netns.execute(*args, **kwargs) + + def assert_ping(self, dst_ip): + net_helpers.assert_ping(self.namespace, dst_ip) + + def assert_no_ping(self, dst_ip): + net_helpers.assert_no_ping(self.namespace, dst_ip) + + @property + def ip(self): + raise NotImplementedError() + + @property + def ip_cidr(self): + raise NotImplementedError() + + @property + def mac_address(self): + return self.port.link.address + + +class FakeMachine(FakeMachineBase): + def __init__(self, bridge, ip_cidr, gateway_ip=None): super(FakeMachine, self).__init__() self.bridge = bridge @@ -43,9 +76,7 @@ class FakeMachine(fixtures.Fixture): self.gateway_ip = gateway_ip def _setUp(self): - ns_fixture = self.useFixture( - net_helpers.NamespaceFixture()) - self.namespace = ns_fixture.name + super(FakeMachine, self)._setUp() self.port = self.useFixture( net_helpers.PortFixture.get(self.bridge, self.namespace)).port @@ -68,26 +99,12 @@ class FakeMachine(fixtures.Fixture): self.port.addr.delete(self._ip_cidr) self._ip_cidr = ip_cidr - @property - def mac_address(self): - return self.port.link.address - - @mac_address.setter + @FakeMachineBase.mac_address.setter def mac_address(self, mac_address): self.port.link.set_down() self.port.link.set_address(mac_address) self.port.link.set_up() - def execute(self, *args, **kwargs): - ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) - return ns_ip_wrapper.netns.execute(*args, **kwargs) - - def assert_ping(self, dst_ip): - net_helpers.assert_ping(self.namespace, dst_ip) - - def assert_no_ping(self, dst_ip): - net_helpers.assert_no_ping(self.namespace, dst_ip) - class PeerMachines(fixtures.Fixture): """Create 'amount' peered machines on an ip_cidr. diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index d4bfe3736b4..577318146c3 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -14,6 +14,8 @@ # import abc +from concurrent import futures +import contextlib import functools import os import random @@ -25,15 +27,18 @@ import subprocess import fixtures import netaddr +from oslo_config import cfg from oslo_utils import uuidutils import six from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib +from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants as n_const +from neutron.db import db_base_plugin_common from neutron.tests import base as tests_base from neutron.tests.common import base as common_base from neutron.tests import tools @@ -86,6 +91,17 @@ def assert_ping(src_namespace, dst_ip, timeout=1, count=1): dst_ip]) +@contextlib.contextmanager +def async_ping(namespace, ips): + with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor: + fs = [executor.submit(assert_ping, namespace, ip, count=10) + for ip in ips] + yield lambda: all(f.done() for f in fs) + futures.wait(fs) + for f in fs: + f.result() + + def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1): try: assert_ping(src_namespace, dst_ip, timeout, count) @@ -420,10 +436,13 @@ class PortFixture(fixtures.Fixture): :ivar bridge: port bridge """ - def __init__(self, bridge=None, namespace=None): + def __init__(self, bridge=None, namespace=None, mac=None, port_id=None): super(PortFixture, self).__init__() self.bridge = bridge self.namespace = namespace + self.mac = ( + mac or db_base_plugin_common.DbBasePluginCommon._generate_mac()) + self.port_id = port_id or uuidutils.generate_uuid() @abc.abstractmethod def _create_bridge_fixture(self): @@ -436,10 +455,10 @@ class PortFixture(fixtures.Fixture): self.bridge = self.useFixture(self._create_bridge_fixture()).bridge @classmethod - def get(cls, bridge, namespace=None): + def get(cls, bridge, namespace=None, mac=None, port_id=None): """Deduce PortFixture class from bridge type and instantiate it.""" if isinstance(bridge, ovs_lib.OVSBridge): - return OVSPortFixture(bridge, namespace) + return OVSPortFixture(bridge, namespace, mac, port_id) if isinstance(bridge, bridge_lib.BridgeDevice): return LinuxBridgePortFixture(bridge, namespace) if isinstance(bridge, VethBridge): @@ -468,30 +487,26 @@ class OVSBridgeFixture(fixtures.Fixture): class OVSPortFixture(PortFixture): - def __init__(self, bridge=None, namespace=None, attrs=None): - super(OVSPortFixture, self).__init__(bridge, namespace) - if attrs is None: - attrs = [] - self.attrs = attrs - def _create_bridge_fixture(self): return OVSBridgeFixture() def _setUp(self): super(OVSPortFixture, self)._setUp() - port_name = common_base.create_resource(PORT_PREFIX, self.create_port) + interface_config = cfg.ConfigOpts() + interface_config.register_opts(interface.OPTS) + ovs_interface = interface.OVSInterfaceDriver(interface_config) + + port_name = tests_base.get_rand_device_name(PORT_PREFIX) + ovs_interface.plug_new( + None, + self.port_id, + port_name, + self.mac, + bridge=self.bridge.br_name, + namespace=self.namespace) self.addCleanup(self.bridge.delete_port, port_name) - self.port = ip_lib.IPDevice(port_name) - - ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) - ns_ip_wrapper.add_device_to_namespace(self.port) - self.port.link.set_up() - - def create_port(self, name): - self.attrs.insert(0, ('type', 'internal')) - self.bridge.add_port(name, *self.attrs) - return name + self.port = ip_lib.IPDevice(port_name, self.namespace) class LinuxBridgeFixture(fixtures.Fixture): diff --git a/neutron/tests/contrib/gate_hook.sh b/neutron/tests/contrib/gate_hook.sh index 3da5775c032..99d0ccf2f9d 100644 --- a/neutron/tests/contrib/gate_hook.sh +++ b/neutron/tests/contrib/gate_hook.sh @@ -1,21 +1,19 @@ #!/usr/bin/env bash - set -ex - VENV=${1:-"dsvm-functional"} +GATE_DEST=$BASE/new +DEVSTACK_PATH=$GATE_DEST/devstack if [ "$VENV" == "dsvm-functional" ] || [ "$VENV" == "dsvm-fullstack" ] then # The following need to be set before sourcing # configure_for_func_testing. - GATE_DEST=$BASE/new GATE_STACK_USER=stack NEUTRON_PATH=$GATE_DEST/neutron PROJECT_NAME=neutron - DEVSTACK_PATH=$GATE_DEST/devstack IS_GATE=True source $NEUTRON_PATH/tools/configure_for_func_testing.sh @@ -26,10 +24,23 @@ then configure_host_for_func_testing elif [ "$VENV" == "api" ] then - if [[ -z "$DEVSTACK_LOCAL_CONFIG" ]]; then - export DEVSTACK_LOCAL_CONFIG="enable_plugin neutron-vpnaas git://git.openstack.org/openstack/neutron-vpnaas" - else - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin neutron-vpnaas git://git.openstack.org/openstack/neutron-vpnaas" - fi + cat > $DEVSTACK_PATH/local.conf < ./testrepository.subunit - .tox/$venv/bin/python $SCRIPTS_DIR/subunit2html.py ./testrepository.subunit testr_results.html + $SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html gzip -9 ./testrepository.subunit gzip -9 ./testr_results.html sudo mv ./*.gz /opt/stack/logs/ diff --git a/neutron/tests/etc/policy.json b/neutron/tests/etc/policy.json index 72756bdb630..9207142582e 100644 --- a/neutron/tests/etc/policy.json +++ b/neutron/tests/etc/policy.json @@ -1,8 +1,10 @@ { "context_is_admin": "role:admin", - "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "owner": "tenant_id:%(tenant_id)s", + "admin_or_owner": "rule:context_is_admin or rule:owner", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", @@ -62,7 +64,7 @@ "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:allowed_address_pairs": "rule:admin_or_network_owner", - "get_port": "rule:admin_or_owner or rule:context_is_advsvc", + "get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", @@ -76,7 +78,7 @@ "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:allowed_address_pairs": "rule:admin_or_network_owner", - "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", + "delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", @@ -174,5 +176,23 @@ "update_service_profile": "rule:admin_only", "delete_service_profile": "rule:admin_only", "get_service_profiles": "rule:admin_only", - "get_service_profile": "rule:admin_only" + "get_service_profile": "rule:admin_only", + + "get_policy": "rule:regular_user", + "create_policy": "rule:admin_only", + "update_policy": "rule:admin_only", + "delete_policy": "rule:admin_only", + "get_policy_bandwidth_limit_rule": "rule:regular_user", + "create_policy_bandwidth_limit_rule": "rule:admin_only", + "delete_policy_bandwidth_limit_rule": "rule:admin_only", + "update_policy_bandwidth_limit_rule": "rule:admin_only", + "get_rule_type": "rule:regular_user", + + "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only", + "create_rbac_policy": "", + "create_rbac_policy:target_tenant": "rule:restrict_wildcard", + "update_rbac_policy": "rule:admin_or_owner", + "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", + "get_rbac_policy": "rule:admin_or_owner", + "delete_rbac_policy": "rule:admin_or_owner" } diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py index 579831524f0..2e95ba4bd24 100644 --- a/neutron/tests/fullstack/base.py +++ b/neutron/tests/fullstack/base.py @@ -15,8 +15,7 @@ from oslo_config import cfg from oslo_db.sqlalchemy import test_base -from neutron.db.migration.models import head # noqa -from neutron.db import model_base +from neutron.db.migration import cli as migration from neutron.tests.common import base from neutron.tests.fullstack.resources import client as client_resource @@ -24,17 +23,12 @@ from neutron.tests.fullstack.resources import client as client_resource class BaseFullStackTestCase(base.MySQLTestCase): """Base test class for full-stack tests.""" - def __init__(self, environment, *args, **kwargs): - super(BaseFullStackTestCase, self).__init__(*args, **kwargs) - self.environment = environment - - def setUp(self): + def setUp(self, environment): super(BaseFullStackTestCase, self).setUp() self.create_db_tables() - + self.environment = environment self.environment.test_name = self.get_name() self.useFixture(self.environment) - self.client = self.environment.neutron_server.client self.safe_client = self.useFixture( client_resource.ClientFixture(self.client)) @@ -62,11 +56,13 @@ class BaseFullStackTestCase(base.MySQLTestCase): 'password': test_base.DbFixture.PASSWORD, 'db_name': self.engine.url.database}) + alembic_config = migration.get_neutron_config() + alembic_config.neutron_config = cfg.CONF self.original_conn = cfg.CONF.database.connection self.addCleanup(self._revert_connection_address) cfg.CONF.set_override('connection', conn, group='database') - model_base.BASEV2.metadata.create_all(self.engine) + migration.do_alembic_command(alembic_config, 'upgrade', 'heads') def _revert_connection_address(self): cfg.CONF.set_override('connection', diff --git a/neutron/tests/fullstack/resources/client.py b/neutron/tests/fullstack/resources/client.py index 42350793c59..d51fc3df5e3 100644 --- a/neutron/tests/fullstack/resources/client.py +++ b/neutron/tests/fullstack/resources/client.py @@ -12,12 +12,24 @@ # License for the specific language governing permissions and limitations # under the License. # +import functools import fixtures +from neutronclient.common import exceptions from neutron.tests import base +def _safe_method(f): + @functools.wraps(f) + def delete(*args, **kwargs): + try: + return f(*args, **kwargs) + except exceptions.NotFound: + pass + return delete + + class ClientFixture(fixtures.Fixture): """Manage and cleanup neutron resources.""" @@ -32,7 +44,7 @@ class ClientFixture(fixtures.Fixture): body = {resource_type: spec} resp = create(body=body) data = resp[resource_type] - self.addCleanup(delete, data['id']) + self.addCleanup(_safe_method(delete), data['id']) return data def create_router(self, tenant_id, name=None, ha=False): @@ -65,8 +77,15 @@ class ClientFixture(fixtures.Fixture): return self._create_resource(resource_type, spec) + def create_port(self, tenant_id, network_id, hostname): + return self._create_resource( + 'port', + {'network_id': network_id, + 'tenant_id': tenant_id, + 'binding:host_id': hostname}) + def add_router_interface(self, router_id, subnet_id): body = {'subnet_id': subnet_id} self.client.add_interface_router(router=router_id, body=body) - self.addCleanup(self.client.remove_interface_router, + self.addCleanup(_safe_method(self.client.remove_interface_router), router=router_id, body=body) diff --git a/neutron/tests/fullstack/resources/config.py b/neutron/tests/fullstack/resources/config.py index 21df3e1aa46..c4efa8197f2 100644 --- a/neutron/tests/fullstack/resources/config.py +++ b/neutron/tests/fullstack/resources/config.py @@ -81,9 +81,11 @@ class ConfigFixture(fixtures.Fixture): then the dynamic configuration values won't change. The correct usage is initializing a new instance of the class. """ - def __init__(self, temp_dir, base_filename): + def __init__(self, env_desc, host_desc, temp_dir, base_filename): super(ConfigFixture, self).__init__() self.config = ConfigDict() + self.env_desc = env_desc + self.host_desc = host_desc self.temp_dir = temp_dir self.base_filename = base_filename @@ -96,14 +98,15 @@ class ConfigFixture(fixtures.Fixture): class NeutronConfigFixture(ConfigFixture): - def __init__(self, temp_dir, connection, rabbitmq_environment): + def __init__(self, env_desc, host_desc, temp_dir, + connection, rabbitmq_environment): super(NeutronConfigFixture, self).__init__( - temp_dir, base_filename='neutron.conf') + env_desc, host_desc, temp_dir, base_filename='neutron.conf') self.config.update({ 'DEFAULT': { 'host': self._generate_host(), - 'state_path': self._generate_state_path(temp_dir), + 'state_path': self._generate_state_path(self.temp_dir), 'lock_path': '$state_path/lock', 'bind_port': self._generate_port(), 'api_paste_config': self._generate_api_paste(), @@ -111,16 +114,18 @@ class NeutronConfigFixture(ConfigFixture): 'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin', 'service_plugins': ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin'), - 'rabbit_userid': rabbitmq_environment.user, - 'rabbit_password': rabbitmq_environment.password, - 'rabbit_hosts': '127.0.0.1', - 'rabbit_virtual_host': rabbitmq_environment.vhost, 'auth_strategy': 'noauth', 'verbose': 'True', 'debug': 'True', }, 'database': { 'connection': connection, + }, + 'oslo_messaging_rabbit': { + 'rabbit_userid': rabbitmq_environment.user, + 'rabbit_password': rabbitmq_environment.password, + 'rabbit_hosts': '127.0.0.1', + 'rabbit_virtual_host': rabbitmq_environment.vhost, } }) @@ -150,9 +155,9 @@ class NeutronConfigFixture(ConfigFixture): class ML2ConfigFixture(ConfigFixture): - def __init__(self, temp_dir, tenant_network_types): + def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types): super(ML2ConfigFixture, self).__init__( - temp_dir, base_filename='ml2_conf.ini') + env_desc, host_desc, temp_dir, base_filename='ml2_conf.ini') self.config.update({ 'ml2': { @@ -173,9 +178,10 @@ class ML2ConfigFixture(ConfigFixture): class OVSConfigFixture(ConfigFixture): - def __init__(self, temp_dir): + def __init__(self, env_desc, host_desc, temp_dir): super(OVSConfigFixture, self).__init__( - temp_dir, base_filename='openvswitch_agent.ini') + env_desc, host_desc, temp_dir, + base_filename='openvswitch_agent.ini') self.config.update({ 'ovs': { @@ -205,9 +211,9 @@ class OVSConfigFixture(ConfigFixture): class L3ConfigFixture(ConfigFixture): - def __init__(self, temp_dir, integration_bridge): + def __init__(self, env_desc, host_desc, temp_dir, integration_bridge): super(L3ConfigFixture, self).__init__( - temp_dir, base_filename='l3_agent.ini') + env_desc, host_desc, temp_dir, base_filename='l3_agent.ini') self.config.update({ 'DEFAULT': { diff --git a/neutron/tests/fullstack/resources/environment.py b/neutron/tests/fullstack/resources/environment.py index 77f868e7f3e..67660f813b7 100644 --- a/neutron/tests/fullstack/resources/environment.py +++ b/neutron/tests/fullstack/resources/environment.py @@ -25,13 +25,21 @@ from neutron.tests.fullstack.resources import process LOG = logging.getLogger(__name__) +class EnvironmentDescription(object): + """A set of characteristics of an environment setup. + + Does the setup, as a whole, support tunneling? How about l2pop? + """ + pass + + class HostDescription(object): """A set of characteristics of an environment Host. What agents should the host spawn? What mode should each agent operate under? """ - def __init__(self, l3_agent=True): + def __init__(self, l3_agent=False): self.l3_agent = l3_agent @@ -50,18 +58,20 @@ class Host(fixtures.Fixture): and disconnects the host from other hosts. """ - def __init__(self, test_name, neutron_config, host_description, + def __init__(self, env_desc, host_desc, + test_name, neutron_config, central_data_bridge, central_external_bridge): + self.env_desc = env_desc + self.host_desc = host_desc self.test_name = test_name self.neutron_config = neutron_config - self.host_description = host_description self.central_data_bridge = central_data_bridge self.central_external_bridge = central_external_bridge self.agents = {} def _setUp(self): agent_cfg_fixture = config.OVSConfigFixture( - self.neutron_config.temp_dir) + self.env_desc, self.host_desc, self.neutron_config.temp_dir) self.useFixture(agent_cfg_fixture) br_phys = self.useFixture( @@ -71,11 +81,13 @@ class Host(fixtures.Fixture): self.ovs_agent = self.useFixture( process.OVSAgentFixture( + self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture)) - if self.host_description.l3_agent: + if self.host_desc.l3_agent: l3_agent_cfg_fixture = self.useFixture( config.L3ConfigFixture( + self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.ovs_agent.agent_cfg_fixture.get_br_int_name())) br_ex = self.useFixture( @@ -84,6 +96,7 @@ class Host(fixtures.Fixture): self.connect_to_external_network(br_ex) self.l3_agent = self.useFixture( process.L3AgentFixture( + self.env_desc, self.host_desc, self.test_name, self.neutron_config, l3_agent_cfg_fixture)) @@ -98,6 +111,10 @@ class Host(fixtures.Fixture): net_helpers.create_patch_ports( self.central_external_bridge, host_external_bridge) + @property + def hostname(self): + return self.neutron_config.config.DEFAULT.host + @property def l3_agent(self): return self.agents['l3'] @@ -124,13 +141,15 @@ class Environment(fixtures.Fixture): the type of Host to create. """ - def __init__(self, hosts_descriptions): + def __init__(self, env_desc, hosts_desc): """ - :param hosts_descriptions: A list of HostDescription instances. + :param env_desc: An EnvironmentDescription instance. + :param hosts_desc: A list of HostDescription instances. """ super(Environment, self).__init__() - self.hosts_descriptions = hosts_descriptions + self.env_desc = env_desc + self.hosts_desc = hosts_desc self.hosts = [] def wait_until_env_is_up(self): @@ -144,33 +163,37 @@ class Environment(fixtures.Fixture): except nc_exc.NeutronClientException: return False - def _create_host(self, description): + def _create_host(self, host_desc): temp_dir = self.useFixture(fixtures.TempDir()).path neutron_config = config.NeutronConfigFixture( - temp_dir, cfg.CONF.database.connection, - self.rabbitmq_environment) + self.env_desc, host_desc, temp_dir, + cfg.CONF.database.connection, self.rabbitmq_environment) self.useFixture(neutron_config) return self.useFixture( - Host(self.test_name, + Host(self.env_desc, + host_desc, + self.test_name, neutron_config, - description, self.central_data_bridge, self.central_external_bridge)) def _setUp(self): self.temp_dir = self.useFixture(fixtures.TempDir()).path + self.rabbitmq_environment = self.useFixture( process.RabbitmqEnvironmentFixture()) + plugin_cfg_fixture = self.useFixture( - config.ML2ConfigFixture(self.temp_dir, 'vlan')) + config.ML2ConfigFixture( + self.env_desc, None, self.temp_dir, 'vlan')) neutron_cfg_fixture = self.useFixture( config.NeutronConfigFixture( - self.temp_dir, - cfg.CONF.database.connection, - self.rabbitmq_environment)) + self.env_desc, None, self.temp_dir, + cfg.CONF.database.connection, self.rabbitmq_environment)) self.neutron_server = self.useFixture( process.NeutronServerFixture( + self.env_desc, None, self.test_name, neutron_cfg_fixture, plugin_cfg_fixture)) self.central_data_bridge = self.useFixture( @@ -178,7 +201,6 @@ class Environment(fixtures.Fixture): self.central_external_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-ex')).bridge - self.hosts = [self._create_host(description) for description in - self.hosts_descriptions] + self.hosts = [self._create_host(desc) for desc in self.hosts_desc] self.wait_until_env_is_up() diff --git a/neutron/tests/fullstack/resources/machine.py b/neutron/tests/fullstack/resources/machine.py new file mode 100644 index 00000000000..3553322203d --- /dev/null +++ b/neutron/tests/fullstack/resources/machine.py @@ -0,0 +1,71 @@ +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from neutron.agent.linux import utils +from neutron.tests.common import machine_fixtures +from neutron.tests.common import net_helpers + + +class FakeFullstackMachine(machine_fixtures.FakeMachineBase): + def __init__(self, host, network_id, tenant_id, safe_client): + super(FakeFullstackMachine, self).__init__() + self.bridge = host.ovs_agent.br_int + self.host_binding = host.hostname + self.tenant_id = tenant_id + self.network_id = network_id + self.safe_client = safe_client + + def _setUp(self): + super(FakeFullstackMachine, self)._setUp() + + self.neutron_port = self.safe_client.create_port( + network_id=self.network_id, + tenant_id=self.tenant_id, + hostname=self.host_binding) + self.neutron_port_id = self.neutron_port['id'] + mac_address = self.neutron_port['mac_address'] + + self.port = self.useFixture( + net_helpers.PortFixture.get( + self.bridge, self.namespace, mac_address, + self.neutron_port_id)).port + + self._ip = self.neutron_port['fixed_ips'][0]['ip_address'] + subnet_id = self.neutron_port['fixed_ips'][0]['subnet_id'] + subnet = self.safe_client.client.show_subnet(subnet_id) + prefixlen = netaddr.IPNetwork(subnet['subnet']['cidr']).prefixlen + self._ip_cidr = '%s/%s' % (self._ip, prefixlen) + + # TODO(amuller): Support DHCP + self.port.addr.add(self.ip_cidr) + + self.gateway_ip = subnet['subnet']['gateway_ip'] + if self.gateway_ip: + net_helpers.set_namespace_gateway(self.port, self.gateway_ip) + + @property + def ip(self): + return self._ip + + @property + def ip_cidr(self): + return self._ip_cidr + + def block_until_boot(self): + utils.wait_until_true( + lambda: (self.safe_client.client.show_port(self.neutron_port_id) + ['port']['status'] == 'ACTIVE'), + sleep=3) diff --git a/neutron/tests/fullstack/resources/process.py b/neutron/tests/fullstack/resources/process.py index 1a818426c47..4414102e212 100644 --- a/neutron/tests/fullstack/resources/process.py +++ b/neutron/tests/fullstack/resources/process.py @@ -90,7 +90,10 @@ class NeutronServerFixture(fixtures.Fixture): NEUTRON_SERVER = "neutron-server" - def __init__(self, test_name, neutron_cfg_fixture, plugin_cfg_fixture): + def __init__(self, env_desc, host_desc, + test_name, neutron_cfg_fixture, plugin_cfg_fixture): + self.env_desc = env_desc + self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.plugin_cfg_fixture = plugin_cfg_fixture @@ -125,7 +128,10 @@ class OVSAgentFixture(fixtures.Fixture): NEUTRON_OVS_AGENT = "neutron-openvswitch-agent" - def __init__(self, test_name, neutron_cfg_fixture, agent_cfg_fixture): + def __init__(self, env_desc, host_desc, + test_name, neutron_cfg_fixture, agent_cfg_fixture): + self.env_desc = env_desc + self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config @@ -151,8 +157,11 @@ class L3AgentFixture(fixtures.Fixture): NEUTRON_L3_AGENT = "neutron-l3-agent" - def __init__(self, test_name, neutron_cfg_fixture, l3_agent_cfg_fixture): + def __init__(self, env_desc, host_desc, + test_name, neutron_cfg_fixture, l3_agent_cfg_fixture): super(L3AgentFixture, self).__init__() + self.env_desc = env_desc + self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.l3_agent_cfg_fixture = l3_agent_cfg_fixture diff --git a/neutron/tests/fullstack/test_connectivity.py b/neutron/tests/fullstack/test_connectivity.py new file mode 100644 index 00000000000..b0f546a3eb3 --- /dev/null +++ b/neutron/tests/fullstack/test_connectivity.py @@ -0,0 +1,50 @@ +# Copyright 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + +from neutron.tests.fullstack import base +from neutron.tests.fullstack.resources import environment +from neutron.tests.fullstack.resources import machine + + +class TestConnectivitySameNetwork(base.BaseFullStackTestCase): + + def setUp(self): + host_descriptions = [ + environment.HostDescription() for _ in range(2)] + env = environment.Environment(environment.EnvironmentDescription(), + host_descriptions) + super(TestConnectivitySameNetwork, self).setUp(env) + + def test_connectivity(self): + tenant_uuid = uuidutils.generate_uuid() + + network = self.safe_client.create_network(tenant_uuid) + self.safe_client.create_subnet( + tenant_uuid, network['id'], '20.0.0.0/24') + + vms = [ + self.useFixture( + machine.FakeFullstackMachine( + self.environment.hosts[i], + network['id'], + tenant_uuid, + self.safe_client)) + for i in range(2)] + + for vm in vms: + vm.block_until_boot() + + vms[0].assert_ping(vms[1].ip) diff --git a/neutron/tests/fullstack/test_l3_agent.py b/neutron/tests/fullstack/test_l3_agent.py index 9f8036c3bfb..28f2419b878 100644 --- a/neutron/tests/fullstack/test_l3_agent.py +++ b/neutron/tests/fullstack/test_l3_agent.py @@ -25,11 +25,12 @@ from neutron.tests.fullstack.resources import environment class TestLegacyL3Agent(base.BaseFullStackTestCase): - def __init__(self, *args, **kwargs): - super(TestLegacyL3Agent, self).__init__( - environment.Environment( - [environment.HostDescription(l3_agent=True)]), - *args, **kwargs) + + def setUp(self): + host_descriptions = [environment.HostDescription(l3_agent=True)] + env = environment.Environment(environment.EnvironmentDescription(), + host_descriptions) + super(TestLegacyL3Agent, self).setUp(env) def _get_namespace(self, router_id): return namespaces.build_ns_name(l3_agent.NS_PREFIX, router_id) @@ -54,12 +55,13 @@ class TestLegacyL3Agent(base.BaseFullStackTestCase): class TestHAL3Agent(base.BaseFullStackTestCase): - def __init__(self, *args, **kwargs): - super(TestHAL3Agent, self).__init__( - environment.Environment( - [environment.HostDescription(l3_agent=True), - environment.HostDescription(l3_agent=True)]), - *args, **kwargs) + + def setUp(self): + host_descriptions = [ + environment.HostDescription(l3_agent=True) for _ in range(2)] + env = environment.Environment(environment.EnvironmentDescription(), + host_descriptions) + super(TestHAL3Agent, self).setUp(env) def _is_ha_router_active_on_one_agent(self, router_id): agents = self.client.list_l3_agent_hosting_routers(router_id) diff --git a/neutron/plugins/vmware/extensions/__init__.py b/neutron/tests/functional/agent/l2/__init__.py similarity index 100% rename from neutron/plugins/vmware/extensions/__init__.py rename to neutron/tests/functional/agent/l2/__init__.py diff --git a/neutron/tests/functional/agent/l2/base.py b/neutron/tests/functional/agent/l2/base.py new file mode 100644 index 00000000000..ae6182e0586 --- /dev/null +++ b/neutron/tests/functional/agent/l2/base.py @@ -0,0 +1,292 @@ +# Copyright (c) 2015 Red Hat, Inc. +# Copyright (c) 2015 SUSE Linux Products GmbH +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +import eventlet +import mock +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import uuidutils + +from neutron.agent.common import config as agent_config +from neutron.agent.common import ovs_lib +from neutron.agent.l2.extensions import manager as ext_manager +from neutron.agent.linux import interface +from neutron.agent.linux import polling +from neutron.agent.linux import utils as agent_utils +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import utils +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \ + as ovs_config +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_int +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_phys +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_tun +from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ + as ovs_agent +from neutron.tests.common import net_helpers +from neutron.tests.functional.agent.linux import base + +LOG = logging.getLogger(__name__) + + +class OVSAgentTestFramework(base.BaseOVSLinuxTestCase): + + def setUp(self): + super(OVSAgentTestFramework, self).setUp() + agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.' + 'ovs_neutron_agent.OVSPluginApi') + mock.patch(agent_rpc).start() + mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() + self.br_int = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, + prefix='br-int') + self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, + prefix='br-tun') + patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") + self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:] + self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:] + self.ovs = ovs_lib.BaseOVS() + self.config = self._configure_agent() + self.driver = interface.OVSInterfaceDriver(self.config) + self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name + + def _get_config_opts(self): + config = cfg.ConfigOpts() + config.register_opts(common_config.core_opts) + config.register_opts(interface.OPTS) + config.register_opts(ovs_config.ovs_opts, "OVS") + config.register_opts(ovs_config.agent_opts, "AGENT") + agent_config.register_interface_driver_opts_helper(config) + agent_config.register_agent_state_opts_helper(config) + ext_manager.register_opts(config) + return config + + def _configure_agent(self): + config = self._get_config_opts() + config.set_override( + 'interface_driver', + 'neutron.agent.linux.interface.OVSInterfaceDriver') + config.set_override('integration_bridge', self.br_int, "OVS") + config.set_override('ovs_integration_bridge', self.br_int) + config.set_override('tunnel_bridge', self.br_tun, "OVS") + config.set_override('int_peer_patch_port', self.patch_tun, "OVS") + config.set_override('tun_peer_patch_port', self.patch_int, "OVS") + config.set_override('host', 'ovs-agent') + return config + + def _bridge_classes(self): + return { + 'br_int': br_int.OVSIntegrationBridge, + 'br_phys': br_phys.OVSPhysicalBridge, + 'br_tun': br_tun.OVSTunnelBridge + } + + def create_agent(self, create_tunnels=True): + if create_tunnels: + tunnel_types = [p_const.TYPE_VXLAN] + else: + tunnel_types = None + local_ip = '192.168.10.1' + bridge_mappings = {'physnet': self.br_int} + agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(), + self.br_int, self.br_tun, + local_ip, bridge_mappings, + polling_interval=1, + tunnel_types=tunnel_types, + prevent_arp_spoofing=False, + conf=self.config) + self.addCleanup(self.ovs.delete_bridge, self.br_int) + if tunnel_types: + self.addCleanup(self.ovs.delete_bridge, self.br_tun) + agent.sg_agent = mock.Mock() + return agent + + def start_agent(self, agent): + self.setup_agent_rpc_mocks(agent) + polling_manager = polling.InterfacePollingMinimizer() + self.addCleanup(polling_manager.stop) + polling_manager.start() + agent_utils.wait_until_true( + polling_manager._monitor.is_active) + agent.check_ovs_status = mock.Mock( + return_value=constants.OVS_NORMAL) + t = eventlet.spawn(agent.rpc_loop, polling_manager) + + def stop_agent(agent, rpc_loop_thread): + agent.run_daemon_loop = False + rpc_loop_thread.wait() + + self.addCleanup(stop_agent, agent, t) + + def _bind_ports(self, ports, network, agent): + devices = [] + for port in ports: + dev = OVSAgentTestFramework._get_device_details(port, network) + vif_name = port.get('vif_name') + vif_id = uuidutils.generate_uuid(), + vif_port = ovs_lib.VifPort( + vif_name, "%s" % vif_id, 'id-%s' % vif_id, + port.get('mac_address'), agent.int_br) + dev['vif_port'] = vif_port + devices.append(dev) + agent._bind_devices(devices) + + def _create_test_port_dict(self): + return {'id': uuidutils.generate_uuid(), + 'mac_address': utils.get_random_mac( + 'fa:16:3e:00:00:00'.split(':')), + 'fixed_ips': [{ + 'ip_address': '10.%d.%d.%d' % ( + random.randint(3, 254), + random.randint(3, 254), + random.randint(3, 254))}], + 'vif_name': base.get_rand_name( + self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)} + + def _create_test_network_dict(self): + return {'id': uuidutils.generate_uuid(), + 'tenant_id': uuidutils.generate_uuid()} + + def _plug_ports(self, network, ports, agent, ip_len=24): + for port in ports: + self.driver.plug( + network.get('id'), port.get('id'), port.get('vif_name'), + port.get('mac_address'), + agent.int_br.br_name, namespace=self.namespace) + ip_cidrs = ["%s/%s" % (port.get('fixed_ips')[0][ + 'ip_address'], ip_len)] + self.driver.init_l3(port.get('vif_name'), ip_cidrs, + namespace=self.namespace) + + def _get_device_details(self, port, network): + dev = {'device': port['id'], + 'port_id': port['id'], + 'network_id': network['id'], + 'network_type': 'vlan', + 'physical_network': 'physnet', + 'segmentation_id': 1, + 'fixed_ips': port['fixed_ips'], + 'device_owner': 'compute', + 'port_security_enabled': True, + 'security_groups': ['default'], + 'admin_state_up': True} + return dev + + def assert_bridge(self, br, exists=True): + self.assertEqual(exists, self.ovs.bridge_exists(br)) + + def assert_patch_ports(self, agent): + + def get_peer(port): + return agent.int_br.db_get_val( + 'Interface', port, 'options', check_error=True) + + agent_utils.wait_until_true( + lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) + agent_utils.wait_until_true( + lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) + + def assert_bridge_ports(self): + for port in [self.patch_tun, self.patch_int]: + self.assertTrue(self.ovs.port_exists(port)) + + def assert_vlan_tags(self, ports, agent): + for port in ports: + res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') + self.assertTrue(res) + + def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True): + """Helper to check expected rpc call are received + :param call: The call to check + :param expected_devices The device for which call is expected + :param is_up True if expected_devices are devices that are set up, + False if expected_devices are devices that are set down + """ + if is_up: + rpc_devices = [ + dev for args in call.call_args_list for dev in args[0][1]] + else: + rpc_devices = [ + dev for args in call.call_args_list for dev in args[0][2]] + return not (set(expected_devices) - set(rpc_devices)) + + def create_test_ports(self, amount=3, **kwargs): + ports = [] + for x in range(amount): + ports.append(self._create_test_port_dict(**kwargs)) + return ports + + def _mock_update_device(self, context, devices_up, devices_down, agent_id, + host=None): + dev_up = [] + dev_down = [] + for port in self.ports: + if devices_up and port['id'] in devices_up: + dev_up.append(port['id']) + if devices_down and port['id'] in devices_down: + dev_down.append({'device': port['id'], 'exists': True}) + return {'devices_up': dev_up, + 'failed_devices_up': [], + 'devices_down': dev_down, + 'failed_devices_down': []} + + def setup_agent_rpc_mocks(self, agent): + def mock_device_details(context, devices, agent_id, host=None): + + details = [] + for port in self.ports: + if port['id'] in devices: + dev = self._get_device_details( + port, self.network) + details.append(dev) + return {'devices': details, 'failed_devices': []} + + (agent.plugin_rpc.get_devices_details_list_and_failed_devices. + side_effect) = mock_device_details + agent.plugin_rpc.update_device_list.side_effect = ( + self._mock_update_device) + + def _prepare_resync_trigger(self, agent): + def mock_device_raise_exception(context, devices_up, devices_down, + agent_id, host=None): + agent.plugin_rpc.update_device_list.side_effect = ( + self._mock_update_device) + raise Exception('Exception to trigger resync') + + self.agent.plugin_rpc.update_device_list.side_effect = ( + mock_device_raise_exception) + + def wait_until_ports_state(self, ports, up): + port_ids = [p['id'] for p in ports] + agent_utils.wait_until_true( + lambda: self._expected_plugin_rpc_call( + self.agent.plugin_rpc.update_device_list, port_ids, up)) + + def setup_agent_and_ports(self, port_dicts, create_tunnels=True, + trigger_resync=False): + self.agent = self.create_agent(create_tunnels=create_tunnels) + self.start_agent(self.agent) + self.network = self._create_test_network_dict() + self.ports = port_dicts + if trigger_resync: + self._prepare_resync_trigger(self.agent) + self._plug_ports(self.network, self.ports, self.agent) diff --git a/neutron/tests/unit/plugins/ibm/__init__.py b/neutron/tests/functional/agent/l2/extensions/__init__.py similarity index 100% rename from neutron/tests/unit/plugins/ibm/__init__.py rename to neutron/tests/functional/agent/l2/extensions/__init__.py diff --git a/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py b/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py new file mode 100644 index 00000000000..ad6e38b214f --- /dev/null +++ b/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py @@ -0,0 +1,194 @@ +# Copyright (c) 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from oslo_utils import uuidutils + +from neutron.api.rpc.callbacks.consumer import registry as consumer_reg +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks import resources +from neutron.objects.qos import policy +from neutron.objects.qos import rule +from neutron.tests.common.agents import l2_extensions +from neutron.tests.functional.agent.l2 import base + + +TEST_POLICY_ID1 = "a2d72369-4246-4f19-bd3c-af51ec8d70cd" +TEST_POLICY_ID2 = "46ebaec0-0570-43ac-82f6-60d2b03168c5" +TEST_BW_LIMIT_RULE_1 = rule.QosBandwidthLimitRule( + context=None, + id="5f126d84-551a-4dcf-bb01-0e9c0df0c793", + max_kbps=1000, + max_burst_kbps=10) +TEST_BW_LIMIT_RULE_2 = rule.QosBandwidthLimitRule( + context=None, + id="fa9128d9-44af-49b2-99bb-96548378ad42", + max_kbps=900, + max_burst_kbps=9) + + +class OVSAgentQoSExtensionTestFramework(base.OVSAgentTestFramework): + def setUp(self): + super(OVSAgentQoSExtensionTestFramework, self).setUp() + self.config.set_override('extensions', ['qos'], 'agent') + self._set_pull_mock() + self.set_test_qos_rules(TEST_POLICY_ID1, [TEST_BW_LIMIT_RULE_1]) + self.set_test_qos_rules(TEST_POLICY_ID2, [TEST_BW_LIMIT_RULE_2]) + + def _set_pull_mock(self): + + self.qos_policies = {} + + def _pull_mock(context, resource_type, resource_id): + return self.qos_policies[resource_id] + + self.pull = mock.patch( + 'neutron.api.rpc.handlers.resources_rpc.' + 'ResourcesPullRpcApi.pull').start() + self.pull.side_effect = _pull_mock + + def set_test_qos_rules(self, policy_id, policy_rules): + """This function sets the policy test rules to be exposed.""" + + qos_policy = policy.QosPolicy( + context=None, + tenant_id=uuidutils.generate_uuid(), + id=policy_id, + name="Test Policy Name", + description="This is a policy for testing purposes", + shared=False, + rules=policy_rules) + + qos_policy.obj_reset_changes() + self.qos_policies[policy_id] = qos_policy + + def _create_test_port_dict(self, policy_id=None): + port_dict = super(OVSAgentQoSExtensionTestFramework, + self)._create_test_port_dict() + port_dict['qos_policy_id'] = policy_id + return port_dict + + def _get_device_details(self, port, network): + dev = super(OVSAgentQoSExtensionTestFramework, + self)._get_device_details(port, network) + dev['qos_policy_id'] = port['qos_policy_id'] + return dev + + def _assert_bandwidth_limit_rule_is_set(self, port, rule): + max_rate, burst = ( + self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name'])) + self.assertEqual(max_rate, rule.max_kbps) + self.assertEqual(burst, rule.max_burst_kbps) + + def _assert_bandwidth_limit_rule_not_set(self, port): + max_rate, burst = ( + self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name'])) + self.assertIsNone(max_rate) + self.assertIsNone(burst) + + def wait_until_bandwidth_limit_rule_applied(self, port, rule): + l2_extensions.wait_until_bandwidth_limit_rule_applied( + self.agent.int_br, port['vif_name'], rule) + + def _create_port_with_qos(self): + port_dict = self._create_test_port_dict() + port_dict['qos_policy_id'] = TEST_POLICY_ID1 + self.setup_agent_and_ports([port_dict]) + self.wait_until_ports_state(self.ports, up=True) + self.wait_until_bandwidth_limit_rule_applied(port_dict, + TEST_BW_LIMIT_RULE_1) + return port_dict + + +class TestOVSAgentQosExtension(OVSAgentQoSExtensionTestFramework): + + def test_port_creation_with_bandwidth_limit(self): + """Make sure bandwidth limit rules are set in low level to ports.""" + + self.setup_agent_and_ports( + port_dicts=self.create_test_ports(amount=1, + policy_id=TEST_POLICY_ID1)) + self.wait_until_ports_state(self.ports, up=True) + + for port in self.ports: + self._assert_bandwidth_limit_rule_is_set( + port, TEST_BW_LIMIT_RULE_1) + + def test_port_creation_with_different_bandwidth_limits(self): + """Make sure different types of policies end on the right ports.""" + + port_dicts = self.create_test_ports(amount=3) + + port_dicts[0]['qos_policy_id'] = TEST_POLICY_ID1 + port_dicts[1]['qos_policy_id'] = TEST_POLICY_ID2 + + self.setup_agent_and_ports(port_dicts) + self.wait_until_ports_state(self.ports, up=True) + + self._assert_bandwidth_limit_rule_is_set(self.ports[0], + TEST_BW_LIMIT_RULE_1) + + self._assert_bandwidth_limit_rule_is_set(self.ports[1], + TEST_BW_LIMIT_RULE_2) + + self._assert_bandwidth_limit_rule_not_set(self.ports[2]) + + def test_simple_port_policy_update(self): + self.setup_agent_and_ports( + port_dicts=self.create_test_ports(amount=1, + policy_id=TEST_POLICY_ID1)) + self.wait_until_ports_state(self.ports, up=True) + policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) + policy_copy.rules[0].max_kbps = 500 + policy_copy.rules[0].max_burst_kbps = 5 + consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) + self.wait_until_bandwidth_limit_rule_applied(self.ports[0], + policy_copy.rules[0]) + self._assert_bandwidth_limit_rule_is_set(self.ports[0], + policy_copy.rules[0]) + + def test_port_qos_disassociation(self): + """Test that qos_policy_id set to None will remove all qos rules from + given port. + """ + port_dict = self._create_port_with_qos() + + port_dict['qos_policy_id'] = None + self.agent.port_update(None, port=port_dict) + + self.wait_until_bandwidth_limit_rule_applied(port_dict, None) + + def test_port_qos_update_policy_id(self): + """Test that change of qos policy id on given port refreshes all its + rules. + """ + port_dict = self._create_port_with_qos() + + port_dict['qos_policy_id'] = TEST_POLICY_ID2 + self.agent.port_update(None, port=port_dict) + + self.wait_until_bandwidth_limit_rule_applied(port_dict, + TEST_BW_LIMIT_RULE_2) + + def test_policy_rule_delete(self): + port_dict = self._create_port_with_qos() + + policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) + policy_copy.rules = list() + consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) + + self.wait_until_bandwidth_limit_rule_applied(port_dict, None) diff --git a/neutron/tests/functional/agent/linux/test_ip_lib.py b/neutron/tests/functional/agent/linux/test_ip_lib.py index 4e8316f77ee..b166b0ec5cc 100644 --- a/neutron/tests/functional/agent/linux/test_ip_lib.py +++ b/neutron/tests/functional/agent/linux/test_ip_lib.py @@ -24,6 +24,7 @@ from neutron.agent.common import config from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import utils +from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base from neutron.tests.functional import base as functional_base @@ -164,3 +165,16 @@ class IpLibTestCase(IpLibTestFramework): routes = ip_lib.get_routing_table(4, namespace=attr.namespace) self.assertEqual(expected_routes, routes) + + def _check_for_device_name(self, ip, name, should_exist): + exist = any(d for d in ip.get_devices() if d.name == name) + self.assertEqual(should_exist, exist) + + def test_dummy_exists(self): + namespace = self.useFixture(net_helpers.NamespaceFixture()) + dev_name = base.get_rand_name() + device = namespace.ip_wrapper.add_dummy(dev_name) + self.addCleanup(self._safe_delete_device, device) + self._check_for_device_name(namespace.ip_wrapper, dev_name, True) + device.link.delete() + self._check_for_device_name(namespace.ip_wrapper, dev_name, False) diff --git a/neutron/tests/functional/agent/linux/test_iptables.py b/neutron/tests/functional/agent/linux/test_iptables.py index 2130ec8ccd4..93c03e672e3 100644 --- a/neutron/tests/functional/agent/linux/test_iptables.py +++ b/neutron/tests/functional/agent/linux/test_iptables.py @@ -83,14 +83,23 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): self.client.namespace, self.server.namespace, self.server.ip, self.port, protocol) self.addCleanup(netcat.stop_processes) - self.assertTrue(netcat.test_connectivity()) + filter_params = 'direction %s, port %s and protocol %s' % ( + direction, port, protocol) + self.assertTrue(netcat.test_connectivity(), + 'Failed connectivity check before applying a filter ' + 'with %s' % filter_params) self.filter_add_rule( fw_manager, self.server.ip, direction, protocol, port) - with testtools.ExpectedException(RuntimeError): + with testtools.ExpectedException( + RuntimeError, + msg='Wrongfully passed a connectivity check after applying ' + 'a filter with %s' % filter_params): netcat.test_connectivity() self.filter_remove_rule( fw_manager, self.server.ip, direction, protocol, port) - self.assertTrue(netcat.test_connectivity(True)) + self.assertTrue(netcat.test_connectivity(True), + 'Failed connectivity check after removing a filter ' + 'with %s' % filter_params) def test_icmp(self): self.client.assert_ping(self.server.ip) diff --git a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py index fc49b1ae4d1..e88329df43c 100644 --- a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py +++ b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py @@ -135,12 +135,9 @@ class TestSimpleInterfaceMonitor(BaseMonitorTest): devices = self.monitor.get_events() self.assertTrue(devices.get('added'), 'Initial call should always be true') - p_attrs = [('external_ids', {'iface-status': 'active'})] br = self.useFixture(net_helpers.OVSBridgeFixture()) - p1 = self.useFixture(net_helpers.OVSPortFixture( - br.bridge, None, p_attrs)) - p2 = self.useFixture(net_helpers.OVSPortFixture( - br.bridge, None, p_attrs)) + p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) + p2 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) added_devices = [p1.port.name, p2.port.name] utils.wait_until_true( lambda: self._expected_devices_events(added_devices, 'added')) diff --git a/neutron/tests/functional/agent/test_l2_ovs_agent.py b/neutron/tests/functional/agent/test_l2_ovs_agent.py index db57e18b18c..3987c9f5489 100644 --- a/neutron/tests/functional/agent/test_l2_ovs_agent.py +++ b/neutron/tests/functional/agent/test_l2_ovs_agent.py @@ -14,301 +14,66 @@ # License for the specific language governing permissions and limitations # under the License. -import eventlet -import mock -import random +import time -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from neutron.agent.common import config as agent_config -from neutron.agent.common import ovs_lib -from neutron.agent.linux import interface -from neutron.agent.linux import polling -from neutron.agent.linux import utils as agent_utils -from neutron.common import config as common_config -from neutron.common import constants as n_const -from neutron.common import utils -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \ - as ovs_config from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants -from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ - import br_int -from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ - import br_phys -from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ - import br_tun -from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ - as ovs_agent -from neutron.tests.functional.agent.linux import base - -LOG = logging.getLogger(__name__) +from neutron.tests.common import net_helpers +from neutron.tests.functional.agent.l2 import base -class OVSAgentTestFramework(base.BaseOVSLinuxTestCase): - - def setUp(self): - super(OVSAgentTestFramework, self).setUp() - agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.' - 'ovs_neutron_agent.OVSPluginApi') - mock.patch(agent_rpc).start() - mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() - self.br_int = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, - prefix='br-int') - self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, - prefix='br-tun') - patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") - self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:] - self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:] - self.ovs = ovs_lib.BaseOVS() - self.config = self._configure_agent() - self.driver = interface.OVSInterfaceDriver(self.config) - - def _get_config_opts(self): - config = cfg.ConfigOpts() - config.register_opts(common_config.core_opts) - config.register_opts(interface.OPTS) - config.register_opts(ovs_config.ovs_opts, "OVS") - config.register_opts(ovs_config.agent_opts, "AGENT") - agent_config.register_interface_driver_opts_helper(config) - agent_config.register_agent_state_opts_helper(config) - return config - - def _configure_agent(self): - config = self._get_config_opts() - config.set_override( - 'interface_driver', - 'neutron.agent.linux.interface.OVSInterfaceDriver') - config.set_override('integration_bridge', self.br_int, "OVS") - config.set_override('ovs_integration_bridge', self.br_int) - config.set_override('tunnel_bridge', self.br_tun, "OVS") - config.set_override('int_peer_patch_port', self.patch_tun, "OVS") - config.set_override('tun_peer_patch_port', self.patch_int, "OVS") - config.set_override('host', 'ovs-agent') - return config - - def _bridge_classes(self): - return { - 'br_int': br_int.OVSIntegrationBridge, - 'br_phys': br_phys.OVSPhysicalBridge, - 'br_tun': br_tun.OVSTunnelBridge - } - - def create_agent(self, create_tunnels=True): - if create_tunnels: - tunnel_types = [p_const.TYPE_VXLAN] - else: - tunnel_types = None - local_ip = '192.168.10.1' - bridge_mappings = {'physnet': self.br_int} - agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(), - self.br_int, self.br_tun, - local_ip, bridge_mappings, - polling_interval=1, - tunnel_types=tunnel_types, - prevent_arp_spoofing=False, - conf=self.config) - self.addCleanup(self.ovs.delete_bridge, self.br_int) - if tunnel_types: - self.addCleanup(self.ovs.delete_bridge, self.br_tun) - agent.sg_agent = mock.Mock() - return agent - - def start_agent(self, agent): - polling_manager = polling.InterfacePollingMinimizer() - self.addCleanup(polling_manager.stop) - polling_manager.start() - agent_utils.wait_until_true( - polling_manager._monitor.is_active) - agent.check_ovs_status = mock.Mock( - return_value=constants.OVS_NORMAL) - t = eventlet.spawn(agent.rpc_loop, polling_manager) - - def stop_agent(agent, rpc_loop_thread): - agent.run_daemon_loop = False - rpc_loop_thread.wait() - - self.addCleanup(stop_agent, agent, t) - - def _bind_ports(self, ports, network, agent): - devices = [] - for port in ports: - dev = OVSAgentTestFramework._get_device_details(port, network) - vif_name = port.get('vif_name') - vif_id = uuidutils.generate_uuid(), - vif_port = ovs_lib.VifPort( - vif_name, "%s" % vif_id, 'id-%s' % vif_id, - port.get('mac_address'), agent.int_br) - dev['vif_port'] = vif_port - devices.append(dev) - agent._bind_devices(devices) - - def _create_test_port_dict(self): - return {'id': uuidutils.generate_uuid(), - 'mac_address': utils.get_random_mac( - 'fa:16:3e:00:00:00'.split(':')), - 'fixed_ips': [{ - 'ip_address': '10.%d.%d.%d' % ( - random.randint(3, 254), - random.randint(3, 254), - random.randint(3, 254))}], - 'vif_name': base.get_rand_name( - self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)} - - def _create_test_network_dict(self): - return {'id': uuidutils.generate_uuid(), - 'tenant_id': uuidutils.generate_uuid()} - - def _plug_ports(self, network, ports, agent, ip_len=24): - for port in ports: - self.driver.plug( - network.get('id'), port.get('id'), port.get('vif_name'), - port.get('mac_address'), - agent.int_br.br_name, namespace=None) - ip_cidrs = ["%s/%s" % (port.get('fixed_ips')[0][ - 'ip_address'], ip_len)] - self.driver.init_l3(port.get('vif_name'), ip_cidrs, namespace=None) - - @staticmethod - def _get_device_details(port, network): - dev = {'device': port['id'], - 'port_id': port['id'], - 'network_id': network['id'], - 'network_type': 'vlan', - 'physical_network': 'physnet', - 'segmentation_id': 1, - 'fixed_ips': port['fixed_ips'], - 'device_owner': 'compute', - 'admin_state_up': True} - return dev - - def assert_bridge(self, br, exists=True): - self.assertEqual(exists, self.ovs.bridge_exists(br)) - - def assert_patch_ports(self, agent): - - def get_peer(port): - return agent.int_br.db_get_val( - 'Interface', port, 'options', check_error=True) - - agent_utils.wait_until_true( - lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) - agent_utils.wait_until_true( - lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) - - def assert_bridge_ports(self): - for port in [self.patch_tun, self.patch_int]: - self.assertTrue(self.ovs.port_exists(port)) - - def assert_vlan_tags(self, ports, agent): - for port in ports: - res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') - self.assertTrue(res) - - -class TestOVSAgent(OVSAgentTestFramework): - - def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True): - """Helper to check expected rpc call are received - :param call: The call to check - :param expected_devices The device for which call is expected - :param is_up True if expected_devices are devices that are set up, - False if expected_devices are devices that are set down - """ - if is_up: - rpc_devices = [ - dev for args in call.call_args_list for dev in args[0][1]] - else: - rpc_devices = [ - dev for args in call.call_args_list for dev in args[0][2]] - return not (set(expected_devices) - set(rpc_devices)) - - def _create_ports(self, network, agent, trigger_resync=False): - ports = [] - for x in range(3): - ports.append(self._create_test_port_dict()) - - def mock_device_raise_exception(context, devices_up, devices_down, - agent_id, host=None): - agent.plugin_rpc.update_device_list.side_effect = ( - mock_update_device) - raise Exception('Exception to trigger resync') - - def mock_device_details(context, devices, agent_id, host=None): - - details = [] - for port in ports: - if port['id'] in devices: - dev = OVSAgentTestFramework._get_device_details( - port, network) - details.append(dev) - return {'devices': details, 'failed_devices': []} - - def mock_update_device(context, devices_up, devices_down, agent_id, - host=None): - dev_up = [] - dev_down = [] - for port in ports: - if devices_up and port['id'] in devices_up: - dev_up.append(port['id']) - if devices_down and port['id'] in devices_down: - dev_down.append({'device': port['id'], 'exists': True}) - return {'devices_up': dev_up, - 'failed_devices_up': [], - 'devices_down': dev_down, - 'failed_devices_down': []} - - (agent.plugin_rpc.get_devices_details_list_and_failed_devices. - side_effect) = mock_device_details - if trigger_resync: - agent.plugin_rpc.update_device_list.side_effect = ( - mock_device_raise_exception) - else: - agent.plugin_rpc.update_device_list.side_effect = ( - mock_update_device) - return ports +class TestOVSAgent(base.OVSAgentTestFramework): def test_port_creation_and_deletion(self): + self.setup_agent_and_ports( + port_dicts=self.create_test_ports()) + self.wait_until_ports_state(self.ports, up=True) + + for port in self.ports: + self.agent.int_br.delete_port(port['vif_name']) + + self.wait_until_ports_state(self.ports, up=False) + + def test_datapath_type_system(self): + expected = constants.OVS_DATAPATH_SYSTEM agent = self.create_agent() self.start_agent(agent) - network = self._create_test_network_dict() - ports = self._create_ports(network, agent) - self._plug_ports(network, ports, agent) - up_ports_ids = [p['id'] for p in ports] - agent_utils.wait_until_true( - lambda: self._expected_plugin_rpc_call( - agent.plugin_rpc.update_device_list, up_ports_ids)) - down_ports_ids = [p['id'] for p in ports] - for port in ports: - agent.int_br.delete_port(port['vif_name']) - agent_utils.wait_until_true( - lambda: self._expected_plugin_rpc_call( - agent.plugin_rpc.update_device_list, down_ports_ids, False)) + actual = self.ovs.db_get_val('Bridge', + agent.int_br.br_name, + 'datapath_type') + self.assertEqual(expected, actual) + actual = self.ovs.db_get_val('Bridge', + agent.tun_br.br_name, + 'datapath_type') + self.assertEqual(expected, actual) + + def test_datapath_type_netdev(self): + expected = constants.OVS_DATAPATH_NETDEV + self.config.set_override('datapath_type', + expected, + "OVS") + agent = self.create_agent() + self.start_agent(agent) + actual = self.ovs.db_get_val('Bridge', + agent.int_br.br_name, + 'datapath_type') + self.assertEqual(expected, actual) + actual = self.ovs.db_get_val('Bridge', + agent.tun_br.br_name, + 'datapath_type') + self.assertEqual(expected, actual) def test_resync_devices_set_up_after_exception(self): - agent = self.create_agent() - self.start_agent(agent) - network = self._create_test_network_dict() - ports = self._create_ports(network, agent, True) - self._plug_ports(network, ports, agent) - ports_ids = [p['id'] for p in ports] - agent_utils.wait_until_true( - lambda: self._expected_plugin_rpc_call( - agent.plugin_rpc.update_device_list, ports_ids)) + self.setup_agent_and_ports( + port_dicts=self.create_test_ports(), + trigger_resync=True) + self.wait_until_ports_state(self.ports, up=True) def test_port_vlan_tags(self): - agent = self.create_agent() - self.start_agent(agent) - network = self._create_test_network_dict() - ports = self._create_ports(network, agent) - ports_ids = [p['id'] for p in ports] - self._plug_ports(network, ports, agent) - agent_utils.wait_until_true( - lambda: self._expected_plugin_rpc_call( - agent.plugin_rpc.update_device_list, ports_ids)) - self.assert_vlan_tags(ports, agent) + self.setup_agent_and_ports( + port_dicts=self.create_test_ports(), + trigger_resync=True) + self.wait_until_ports_state(self.ports, up=True) + self.assert_vlan_tags(self.ports, self.agent) def test_assert_bridges_ports_vxlan(self): agent = self.create_agent() @@ -321,3 +86,13 @@ class TestOVSAgent(OVSAgentTestFramework): self.create_agent(create_tunnels=False) self.assertTrue(self.ovs.bridge_exists(self.br_int)) self.assertFalse(self.ovs.bridge_exists(self.br_tun)) + + def test_assert_pings_during_br_int_setup_not_lost(self): + self.setup_agent_and_ports(port_dicts=self.create_test_ports(), + create_tunnels=False) + self.wait_until_ports_state(self.ports, up=True) + ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports] + with net_helpers.async_ping(self.namespace, ips) as running: + while running(): + self.agent.setup_integration_br() + time.sleep(0.25) diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index ef2bd498ed8..986da2c1a06 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -1276,8 +1276,8 @@ class TestDvrRouter(L3AgentTestFramework): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) - self._add_fip(router1, '192.168.111.12', self.agent.conf.host) fip_ns = router1.fip_ns.get_name() + self.assertTrue(self._namespace_exists(fip_ns)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router[l3_constants.FLOATINGIP_KEY] = [] @@ -1285,6 +1285,31 @@ class TestDvrRouter(L3AgentTestFramework): self._assert_dvr_snat_gateway(router1) self.assertFalse(self._namespace_exists(fip_ns)) + def test_dvr_router_add_fips_on_restarted_agent(self): + self.agent.conf.agent_mode = 'dvr' + router_info = self.generate_dvr_router_info() + router = self.manage_router(self.agent, router_info) + floating_ips = router.router[l3_constants.FLOATINGIP_KEY] + router_ns = router.ns_name + fip_rule_prio_1 = self._get_fixed_ip_rule_priority( + router_ns, floating_ips[0]['fixed_ip_address']) + restarted_agent = neutron_l3_agent.L3NATAgent( + self.agent.host, self.agent.conf) + floating_ips[0]['floating_ip_address'] = '21.4.4.2' + floating_ips[0]['fixed_ip_address'] = '10.0.0.2' + self.manage_router(restarted_agent, router_info) + fip_rule_prio_2 = self._get_fixed_ip_rule_priority( + router_ns, floating_ips[0]['fixed_ip_address']) + self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2) + + def _get_fixed_ip_rule_priority(self, namespace, fip): + iprule = ip_lib.IPRule(namespace) + lines = iprule.rule._as_root([4], ['show']).splitlines() + for line in lines: + if fip in line: + info = iprule.rule._parse_line(4, line) + return info['priority'] + def test_dvr_router_add_internal_network_set_arp_cache(self): # Check that, when the router is set up and there are # existing ports on the the uplinked subnet, the ARP diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index 5d73ea1a5f3..d9856a8f4bf 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -179,19 +179,15 @@ class _ARPSpoofTestCase(object): net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def _setup_arp_spoof_for_port(self, port, addrs, psec=True): - of_port_map = self.br.get_vif_port_to_ofport_map() - - class VifPort(object): - ofport = of_port_map[port] - port_name = port - + vif = next( + vif for vif in self.br.get_vif_ports() if vif.port_name == port) ip_addr = addrs.pop() details = {'port_security_enabled': psec, 'fixed_ips': [{'ip_address': ip_addr}], 'allowed_address_pairs': [ dict(ip_address=ip) for ip in addrs]} ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection( - self.br_int, VifPort(), details) + self.br_int, vif, details) class ARPSpoofOFCtlTestCase(_ARPSpoofTestCase, _OVSAgentOFCtlTestBase): diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py index 903ed8c72f8..768209424ae 100644 --- a/neutron/tests/functional/agent/test_ovs_lib.py +++ b/neutron/tests/functional/agent/test_ovs_lib.py @@ -311,6 +311,17 @@ class OVSBridgeTestCase(OVSBridgeTestBase): controller, 'connection_mode')) + def test_egress_bw_limit(self): + port_name, _ = self.create_ovs_port() + self.br.create_egress_bw_limit_for_port(port_name, 700, 70) + max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) + self.assertEqual(700, max_rate) + self.assertEqual(70, burst) + self.br.delete_egress_bw_limit_for_port(port_name) + max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) + self.assertIsNone(max_rate) + self.assertIsNone(burst) + class OVSLibTestCase(base.BaseOVSLinuxTestCase): diff --git a/neutron/tests/functional/db/test_migrations.py b/neutron/tests/functional/db/test_migrations.py index 200b601ac49..b8e476073fd 100644 --- a/neutron/tests/functional/db/test_migrations.py +++ b/neutron/tests/functional/db/test_migrations.py @@ -13,8 +13,8 @@ # under the License. import functools -import logging import pprint +import six import alembic import alembic.autogenerate @@ -26,15 +26,13 @@ from oslo_config import fixture as config_fixture from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations import sqlalchemy +from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models from neutron.tests.common import base -LOG = logging.getLogger(__name__) - - cfg.CONF.import_opt('core_plugin', 'neutron.common.config') CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' @@ -116,13 +114,12 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): super(_TestModelsMigrations, self).setUp() self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(core_plugin=CORE_PLUGIN) - self.alembic_config = migration.get_alembic_config() + self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') - cfg.CONF.clear_override('connection', group='database') def get_engine(self): return self.engine @@ -210,7 +207,35 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): class TestModelsMigrationsMysql(_TestModelsMigrations, base.MySQLTestCase): - pass + + # There is no use to run this against both dialects, so add this test just + # for MySQL tests + def test_external_tables_not_changed(self): + + def block_external_tables(conn, clauseelement, multiparams, params): + if isinstance(clauseelement, sqlalchemy.sql.selectable.Select): + return + + if (isinstance(clauseelement, six.string_types) and + any(name in clauseelement for name in external.TABLES)): + self.fail("External table referenced by neutron core " + "migration.") + + if hasattr(clauseelement, 'element'): + if (clauseelement.element.name in external.TABLES or + (hasattr(clauseelement, 'table') and + clauseelement.element.table.name in external.TABLES)): + self.fail("External table referenced by neutron core " + "migration.") + + engine = self.get_engine() + cfg.CONF.set_override('connection', engine.url, group='database') + migration.do_alembic_command(self.alembic_config, 'upgrade', 'kilo') + + event.listen(engine, 'before_execute', block_external_tables) + migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') + + event.remove(engine, 'before_execute', block_external_tables) class TestModelsMigrationsPsql(_TestModelsMigrations, @@ -222,7 +247,7 @@ class TestSanityCheck(test_base.DbTestCase): def setUp(self): super(TestSanityCheck, self).setUp() - self.alembic_config = migration.get_alembic_config() + self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def test_check_sanity_14be42f3d0a5(self): @@ -250,7 +275,7 @@ class TestWalkMigrations(test_base.DbTestCase): def setUp(self): super(TestWalkMigrations, self).setUp() - self.alembic_config = migration.get_alembic_config() + self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def test_no_downgrade(self): diff --git a/neutron/tests/functional/sanity/test_sanity.py b/neutron/tests/functional/sanity/test_sanity.py index b65de687a5b..a47bb4e2759 100644 --- a/neutron/tests/functional/sanity/test_sanity.py +++ b/neutron/tests/functional/sanity/test_sanity.py @@ -35,6 +35,9 @@ class SanityTestCase(base.BaseTestCase): def test_dnsmasq_version(self): checks.dnsmasq_version_supported() + def test_dibbler_version(self): + checks.dibbler_version_supported() + class SanityTestCaseRoot(functional_base.BaseSudoTestCase): """Sanity checks that require root access. @@ -47,6 +50,9 @@ class SanityTestCaseRoot(functional_base.BaseSudoTestCase): def test_ovs_vxlan_support_runs(self): checks.ovs_vxlan_supported() + def test_ovs_geneve_support_runs(self): + checks.ovs_geneve_supported() + def test_iproute2_vxlan_support_runs(self): checks.iproute2_vxlan_supported() diff --git a/neutron/tests/functional/services/__init__.py b/neutron/tests/functional/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/functional/services/l3_router/__init__.py b/neutron/tests/functional/services/l3_router/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py new file mode 100644 index 00000000000..ca12c9fe7bc --- /dev/null +++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py @@ -0,0 +1,171 @@ +# Copyright (c) 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.common import constants as l3_const +from neutron.db import l3_dvr_db +from neutron.extensions import external_net +from neutron.tests.common import helpers +from neutron.tests.unit.plugins.ml2 import base as ml2_test_base + + +class L3DvrTestCase(ml2_test_base.ML2TestFramework): + def setUp(self): + super(L3DvrTestCase, self).setUp() + self.l3_agent = helpers.register_l3_agent( + agent_mode=l3_const.L3_AGENT_MODE_DVR_SNAT) + + def _create_router(self, distributed=True): + return (super(L3DvrTestCase, self). + _create_router(distributed=distributed)) + + def test_update_router_db_centralized_to_distributed(self): + router = self._create_router(distributed=False) + # router needs to be in admin state down in order to be upgraded to DVR + self.l3_plugin.update_router( + self.context, router['id'], {'router': {'admin_state_up': False}}) + self.assertFalse(router['distributed']) + self.l3_plugin.update_router( + self.context, router['id'], {'router': {'distributed': True}}) + router = self.l3_plugin.get_router(self.context, router['id']) + self.assertTrue(router['distributed']) + + def test_get_device_owner_distributed_router_object(self): + router = self._create_router() + self.assertEqual( + l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE, + self.l3_plugin._get_device_owner(self.context, router)) + + def test_get_device_owner_distributed_router_id(self): + router = self._create_router() + self.assertEqual( + l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE, + self.l3_plugin._get_device_owner(self.context, router['id'])) + + def test_get_device_owner_centralized(self): + router = self._create_router(distributed=False) + self.assertEqual( + l3_const.DEVICE_OWNER_ROUTER_INTF, + self.l3_plugin._get_device_owner(self.context, router['id'])) + + def test_get_agent_gw_ports_exist_for_network_no_port(self): + self.assertIsNone( + self.l3_plugin._get_agent_gw_ports_exist_for_network( + self.context, 'network_id', 'host', 'agent_id')) + + def _test_remove_router_interface_leaves_snat_intact(self, by_subnet): + with self.subnet() as subnet1, \ + self.subnet(cidr='20.0.0.0/24') as subnet2: + kwargs = {'arg_list': (external_net.EXTERNAL,), + external_net.EXTERNAL: True} + with self.network(**kwargs) as ext_net, \ + self.subnet(network=ext_net, + cidr='30.0.0.0/24'): + router = self._create_router() + self.l3_plugin.add_router_interface( + self.context, router['id'], + {'subnet_id': subnet1['subnet']['id']}) + self.l3_plugin.add_router_interface( + self.context, router['id'], + {'subnet_id': subnet2['subnet']['id']}) + self.l3_plugin._update_router_gw_info( + self.context, router['id'], + {'network_id': ext_net['network']['id']}) + + snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( + self.context, [router['id']]) + self.assertEqual( + 2, len(snat_router_intfs[router['id']])) + + if by_subnet: + self.l3_plugin.remove_router_interface( + self.context, router['id'], + {'subnet_id': subnet1['subnet']['id']}) + else: + port = self.core_plugin.get_ports( + self.context, filters={ + 'network_id': [subnet1['subnet']['network_id']], + 'device_owner': + [l3_const.DEVICE_OWNER_DVR_INTERFACE]})[0] + self.l3_plugin.remove_router_interface( + self.context, router['id'], + {'port_id': port['id']}) + + self.assertEqual( + 1, len(self.l3_plugin._get_snat_sync_interfaces( + self.context, [router['id']]))) + + def test_remove_router_interface_by_subnet_leaves_snat_intact(self): + self._test_remove_router_interface_leaves_snat_intact(by_subnet=True) + + def test_remove_router_interface_by_port_leaves_snat_intact(self): + self._test_remove_router_interface_leaves_snat_intact( + by_subnet=False) + + def setup_create_agent_gw_port_for_network(self): + network = self._make_network(self.fmt, '', True) + network_id = network['network']['id'] + port = self.core_plugin.create_port( + self.context, + {'port': {'tenant_id': '', + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_id': self.l3_agent['id'], + 'device_owner': l3_dvr_db.DEVICE_OWNER_AGENT_GW, + 'binding:host_id': '', + 'admin_state_up': True, + 'name': ''}}) + return network_id, port + + def test_get_agent_gw_port_for_network(self): + network_id, port = ( + self.setup_create_agent_gw_port_for_network()) + + self.assertEqual( + port['id'], + self.l3_plugin._get_agent_gw_ports_exist_for_network( + self.context, network_id, None, self.l3_agent['id'])['id']) + + def test_delete_agent_gw_port_for_network(self): + network_id, port = ( + self.setup_create_agent_gw_port_for_network()) + + self.l3_plugin._delete_floatingip_agent_gateway_port( + self.context, "", network_id) + self.assertIsNone( + self.l3_plugin._get_agent_gw_ports_exist_for_network( + self.context, network_id, "", self.l3_agent['id'])) + + def test_get_fip_sync_interfaces(self): + self.setup_create_agent_gw_port_for_network() + + self.assertEqual( + 1, len(self.l3_plugin._get_fip_sync_interfaces( + self.context, self.l3_agent['id']))) + + def test_process_routers(self): + router = self._create_router() + result = self.l3_plugin._process_routers(self.context, [router]) + self.assertEqual( + router['id'], result[router['id']]['id']) + + def test_get_router_ids(self): + router = self._create_router() + self.assertEqual( + router['id'], + self.l3_plugin._get_router_ids(self.context)[0]) + self._create_router() + self.assertEqual( + 2, len(self.l3_plugin._get_router_ids(self.context))) diff --git a/neutron/tests/tempest/common/glance_http.py b/neutron/tests/tempest/common/glance_http.py index 0a6f985e7c6..3d8c8aaf8c3 100644 --- a/neutron/tests/tempest/common/glance_http.py +++ b/neutron/tests/tempest/common/glance_http.py @@ -17,7 +17,6 @@ import copy import hashlib -import json import posixpath import re import socket @@ -28,6 +27,7 @@ import urlparse import OpenSSL from oslo_log import log as logging +from oslo_serialization import jsonutils as json from six import moves from six.moves import http_client as httplib from tempest_lib import exceptions as lib_exc diff --git a/neutron/tests/tempest/config.py b/neutron/tests/tempest/config.py index c459d76afd3..200b24736f0 100644 --- a/neutron/tests/tempest/config.py +++ b/neutron/tests/tempest/config.py @@ -15,7 +15,6 @@ from __future__ import print_function -import logging as std_logging import os from oslo_config import cfg @@ -1191,7 +1190,7 @@ class TempestConfigPrivate(object): register_opts() self._set_attrs() if parse_conf: - cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + cfg.CONF.log_opt_values(LOG, logging.DEBUG) class TempestConfigProxy(object): @@ -1199,15 +1198,15 @@ class TempestConfigProxy(object): _path = None _extra_log_defaults = [ - ('keystoneclient.session', std_logging.INFO), - ('paramiko.transport', std_logging.INFO), - ('requests.packages.urllib3.connectionpool', std_logging.WARN), + ('keystoneclient.session', logging.INFO), + ('paramiko.transport', logging.INFO), + ('requests.packages.urllib3.connectionpool', logging.WARN), ] def _fix_log_levels(self): """Tweak the oslo log defaults.""" for name, level in self._extra_log_defaults: - std_logging.getLogger(name).setLevel(level) + logging.getLogger(name).logger.setLevel(level) def __getattr__(self, attr): if not self._config: diff --git a/neutron/tests/tempest/services/identity/v2/json/identity_client.py b/neutron/tests/tempest/services/identity/v2/json/identity_client.py index 7efda1febdf..46e8f8781ab 100644 --- a/neutron/tests/tempest/services/identity/v2/json/identity_client.py +++ b/neutron/tests/tempest/services/identity/v2/json/identity_client.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from tempest_lib import exceptions as lib_exc from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v2/json/token_client.py b/neutron/tests/tempest/services/identity/v2/json/token_client.py index 51d9db02bd2..e8b33ea8007 100644 --- a/neutron/tests/tempest/services/identity/v2/json/token_client.py +++ b/neutron/tests/tempest/services/identity/v2/json/token_client.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from tempest_lib.common import rest_client from tempest_lib import exceptions as lib_exc diff --git a/neutron/tests/tempest/services/identity/v3/json/credentials_client.py b/neutron/tests/tempest/services/identity/v3/json/credentials_client.py index 4300c0fc7db..07e230ac49c 100644 --- a/neutron/tests/tempest/services/identity/v3/json/credentials_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/credentials_client.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py b/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py index b60dd260bfd..27ac3e54d03 100644 --- a/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/endpoints_client.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/identity_client.py b/neutron/tests/tempest/services/identity/v3/json/identity_client.py index a7db46a5785..a090acf9a03 100644 --- a/neutron/tests/tempest/services/identity/v3/json/identity_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/identity_client.py @@ -13,8 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json - +from oslo_serialization import jsonutils as json from six.moves.urllib import parse from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/policy_client.py b/neutron/tests/tempest/services/identity/v3/json/policy_client.py index 2e44185ddb7..2d247afec84 100644 --- a/neutron/tests/tempest/services/identity/v3/json/policy_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/policy_client.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/region_client.py b/neutron/tests/tempest/services/identity/v3/json/region_client.py index d2fa53b7561..0effae881d5 100644 --- a/neutron/tests/tempest/services/identity/v3/json/region_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/region_client.py @@ -13,8 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json - +from oslo_serialization import jsonutils as json from six.moves.urllib import parse from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/service_client.py b/neutron/tests/tempest/services/identity/v3/json/service_client.py index 529693e34b1..75a5cf8150e 100644 --- a/neutron/tests/tempest/services/identity/v3/json/service_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/service_client.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from neutron.tests.tempest.common import service_client diff --git a/neutron/tests/tempest/services/identity/v3/json/token_client.py b/neutron/tests/tempest/services/identity/v3/json/token_client.py index c60b24c56f9..77ecf8423e6 100644 --- a/neutron/tests/tempest/services/identity/v3/json/token_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/token_client.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -import json +from oslo_serialization import jsonutils as json from tempest_lib.common import rest_client from tempest_lib import exceptions as lib_exc diff --git a/neutron/tests/tempest/services/network/json/network_client.py b/neutron/tests/tempest/services/network/json/network_client.py index 4958bc51c03..25400ca2a84 100644 --- a/neutron/tests/tempest/services/network/json/network_client.py +++ b/neutron/tests/tempest/services/network/json/network_client.py @@ -10,9 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import json import time +import urllib + +from oslo_serialization import jsonutils as json from six.moves.urllib import parse from tempest_lib.common.utils import misc from tempest_lib import exceptions as lib_exc @@ -65,7 +67,11 @@ class NetworkClientJSON(service_client.ServiceClient): 'metering_label_rules': 'metering', 'firewall_rules': 'fw', 'firewall_policies': 'fw', - 'firewalls': 'fw' + 'firewalls': 'fw', + 'policies': 'qos', + 'bandwidth_limit_rules': 'qos', + 'rule_types': 'qos', + 'rbac-policies': '', } service_prefix = service_resource_prefix_map.get( plural_name) @@ -90,7 +96,9 @@ class NetworkClientJSON(service_client.ServiceClient): 'ikepolicy': 'ikepolicies', 'ipsec_site_connection': 'ipsec-site-connections', 'quotas': 'quotas', - 'firewall_policy': 'firewall_policies' + 'firewall_policy': 'firewall_policies', + 'qos_policy': 'policies', + 'rbac_policy': 'rbac_policies', } return resource_plural_map.get(resource_name, resource_name + 's') @@ -620,3 +628,88 @@ class NetworkClientJSON(service_client.ServiceClient): self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) + + def list_qos_policies(self, **filters): + if filters: + uri = '%s/qos/policies?%s' % (self.uri_prefix, + urllib.urlencode(filters)) + else: + uri = '%s/qos/policies' % self.uri_prefix + resp, body = self.get(uri) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def create_qos_policy(self, name, description, shared, tenant_id=None): + uri = '%s/qos/policies' % self.uri_prefix + post_data = {'policy': { + 'name': name, + 'description': description, + 'shared': shared + }} + if tenant_id is not None: + post_data['policy']['tenant_id'] = tenant_id + resp, body = self.post(uri, self.serialize(post_data)) + body = self.deserialize_single(body) + self.expected_success(201, resp.status) + return service_client.ResponseBody(resp, body) + + def update_qos_policy(self, policy_id, **kwargs): + uri = '%s/qos/policies/%s' % (self.uri_prefix, policy_id) + post_data = self.serialize({'policy': kwargs}) + resp, body = self.put(uri, post_data) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def create_bandwidth_limit_rule(self, policy_id, max_kbps, max_burst_kbps): + uri = '%s/qos/policies/%s/bandwidth_limit_rules' % ( + self.uri_prefix, policy_id) + post_data = self.serialize( + {'bandwidth_limit_rule': { + 'max_kbps': max_kbps, + 'max_burst_kbps': max_burst_kbps} + }) + resp, body = self.post(uri, post_data) + self.expected_success(201, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) + + def list_bandwidth_limit_rules(self, policy_id): + uri = '%s/qos/policies/%s/bandwidth_limit_rules' % ( + self.uri_prefix, policy_id) + resp, body = self.get(uri) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def show_bandwidth_limit_rule(self, policy_id, rule_id): + uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( + self.uri_prefix, policy_id, rule_id) + resp, body = self.get(uri) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def update_bandwidth_limit_rule(self, policy_id, rule_id, **kwargs): + uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( + self.uri_prefix, policy_id, rule_id) + post_data = {'bandwidth_limit_rule': kwargs} + resp, body = self.put(uri, json.dumps(post_data)) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def delete_bandwidth_limit_rule(self, policy_id, rule_id): + uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( + self.uri_prefix, policy_id, rule_id) + resp, body = self.delete(uri) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def list_qos_rule_types(self): + uri = '%s/qos/rule-types' % self.uri_prefix + resp, body = self.get(uri) + self.expected_success(200, resp.status) + body = json.loads(body) + return service_client.ResponseBody(resp, body) diff --git a/neutron/tests/tempest/test.py b/neutron/tests/tempest/test.py index d95174bd886..3abf826b2c3 100644 --- a/neutron/tests/tempest/test.py +++ b/neutron/tests/tempest/test.py @@ -15,7 +15,6 @@ import atexit import functools -import json import os import re import sys @@ -24,6 +23,7 @@ import uuid import fixtures from oslo_log import log as logging +from oslo_serialization import jsonutils as json from oslo_utils import importutils import six from six.moves.urllib import parse diff --git a/neutron/tests/unit/agent/common/test_ovs_lib.py b/neutron/tests/unit/agent/common/test_ovs_lib.py index b0b8180c1b9..b6ab9dd2bbc 100644 --- a/neutron/tests/unit/agent/common/test_ovs_lib.py +++ b/neutron/tests/unit/agent/common/test_ovs_lib.py @@ -22,6 +22,8 @@ from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.common import exceptions from neutron.plugins.common import constants +from neutron.plugins.ml2.drivers.openvswitch.agent.common \ + import constants as p_const from neutron.tests import base from neutron.tests import tools @@ -108,73 +110,9 @@ class OVS_Lib_Test(base.BaseTestCase): # test __str__ str(port) - def test_set_controller(self): - controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'] - self.br.set_controller(controller_names) - self._verify_vsctl_mock('set-controller', self.BR_NAME, - 'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555') - - def test_del_controller(self): - self.br.del_controller() - self._verify_vsctl_mock('del-controller', self.BR_NAME) - - def test_get_controller(self): - self.execute.return_value = ( - 'tcp:127.0.0.1:6633\\ntcp:172.17.16.10:5555') - names = self.br.get_controller() - self.assertEqual(names, - ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']) - self._verify_vsctl_mock('get-controller', self.BR_NAME) - - def test_set_secure_mode(self): - self.br.set_secure_mode() - self._verify_vsctl_mock('set-fail-mode', self.BR_NAME, 'secure') - - def test_set_standalone_mode(self): - self.br.set_standalone_mode() - self._verify_vsctl_mock('set-fail-mode', self.BR_NAME, 'standalone') - - def test_set_protocols(self): - protocols = 'OpenFlow13' - self.br.set_protocols(protocols) - self._verify_vsctl_mock('set', 'Bridge', self.BR_NAME, - "protocols=%s" % protocols) - - def test_create(self): - self.br.add_bridge(self.BR_NAME) - - self.br.create() - - def test_destroy(self): - self.br.delete_bridge(self.BR_NAME) - - self.br.destroy() - - def test_reset_bridge(self): - self.br.destroy() - self.br.create() - - self.br.reset_bridge() - def _build_timeout_opt(self, exp_timeout): return "--timeout=%d" % exp_timeout if exp_timeout else self.TO - def _test_delete_port(self, exp_timeout=None): - pname = "tap5" - self.br.delete_port(pname) - self._verify_vsctl_mock("--if-exists", "del-port", self.BR_NAME, pname) - - def test_delete_port(self): - self._test_delete_port() - - def test_call_command_non_default_timeput(self): - # This test is only for verifying a non-default timeout - # is correctly applied. Does not need to be repeated for - # every ovs_lib method - new_timeout = 5 - self.br.vsctl_timeout = new_timeout - self._test_delete_port(new_timeout) - def test_add_flow(self): ofport = "99" vid = 4000 @@ -182,29 +120,36 @@ class OVS_Lib_Test(base.BaseTestCase): cidr = '192.168.1.0/24' flow_dict_1 = collections.OrderedDict([ + ('cookie', 1234), ('priority', 2), ('dl_src', 'ca:fe:de:ad:be:ef'), ('actions', 'strip_vlan,output:0')]) flow_dict_2 = collections.OrderedDict([ + ('cookie', 1254), ('priority', 1), ('actions', 'normal')]) flow_dict_3 = collections.OrderedDict([ + ('cookie', 1257), ('priority', 2), ('actions', 'drop')]) flow_dict_4 = collections.OrderedDict([ + ('cookie', 1274), ('priority', 2), ('in_port', ofport), ('actions', 'drop')]) flow_dict_5 = collections.OrderedDict([ + ('cookie', 1284), ('priority', 4), ('in_port', ofport), ('dl_vlan', vid), ('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))]) flow_dict_6 = collections.OrderedDict([ + ('cookie', 1754), ('priority', 3), ('tun_id', lsw_id), ('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))]) flow_dict_7 = collections.OrderedDict([ + ('cookie', 1256), ('priority', 4), ('nw_src', cidr), ('proto', 'arp'), @@ -220,36 +165,39 @@ class OVS_Lib_Test(base.BaseTestCase): expected_calls = [ self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0," + "hard_timeout=0,idle_timeout=0,cookie=1234," "priority=2,dl_src=ca:fe:de:ad:be:ef," "actions=strip_vlan,output:0")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0," + "hard_timeout=0,idle_timeout=0,cookie=1254," "priority=1,actions=normal")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0," + "hard_timeout=0,idle_timeout=0,cookie=1257," "priority=2,actions=drop")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0,priority=2," - "in_port=%s,actions=drop" % ofport)), + "hard_timeout=0,idle_timeout=0,cookie=1274," + "priority=2,in_port=%s,actions=drop" % ofport + )), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0," + "hard_timeout=0,idle_timeout=0,cookie=1284," "priority=4,dl_vlan=%s,in_port=%s," "actions=strip_vlan,set_tunnel:%s,normal" % (vid, ofport, lsw_id))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0,priority=3," - "tun_id=%s,actions=mod_vlan_vid:%s," - "output:%s" % (lsw_id, vid, ofport))), + "hard_timeout=0,idle_timeout=0,cookie=1754," + "priority=3," + "tun_id=%s,actions=mod_vlan_vid:%s,output:%s" + % (lsw_id, vid, ofport))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( - "hard_timeout=0,idle_timeout=0,priority=4," - "nw_src=%s,arp,actions=drop" % cidr)), + "hard_timeout=0,idle_timeout=0,cookie=1256," + "priority=4,nw_src=%s,arp,actions=drop" + % cidr)), ] self.execute.assert_has_calls(expected_calls) @@ -269,6 +217,7 @@ class OVS_Lib_Test(base.BaseTestCase): def test_add_flow_timeout_set(self): flow_dict = collections.OrderedDict([ + ('cookie', 1234), ('priority', 1), ('hard_timeout', 1000), ('idle_timeout', 2000), @@ -277,17 +226,18 @@ class OVS_Lib_Test(base.BaseTestCase): self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', - process_input="hard_timeout=1000,idle_timeout=2000,priority=1," - "actions=normal") + process_input="hard_timeout=1000,idle_timeout=2000," + "priority=1,cookie=1234,actions=normal") def test_add_flow_default_priority(self): - flow_dict = collections.OrderedDict([('actions', 'normal')]) + flow_dict = collections.OrderedDict([('actions', 'normal'), + ('cookie', 1234)]) self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', process_input="hard_timeout=0,idle_timeout=0,priority=1," - "actions=normal") + "cookie=1234,actions=normal") def _test_get_port_ofport(self, ofport, expected_result): pname = "tap99" @@ -307,13 +257,15 @@ class OVS_Lib_Test(base.BaseTestCase): self._test_get_port_ofport(ovs_lib.INVALID_OFPORT, ovs_lib.INVALID_OFPORT) - def test_get_datapath_id(self): - datapath_id = '"0000b67f4fbcc149"' - self.execute.return_value = self._encode_ovs_json(['datapath_id'], - [[datapath_id]]) - self.assertEqual(self.br.get_datapath_id(), datapath_id) - self._verify_vsctl_mock("--columns=datapath_id", "list", "Bridge", - self.BR_NAME) + def test_default_datapath(self): + # verify kernel datapath is default + expected = p_const.OVS_DATAPATH_SYSTEM + self.assertEqual(expected, self.br.datapath_type) + + def test_non_default_datapath(self): + expected = p_const.OVS_DATAPATH_NETDEV + self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected) + self.assertEqual(expected, self.br.datapath_type) def test_count_flows(self): self.execute.return_value = 'ignore\nflow-1\n' @@ -448,25 +400,6 @@ class OVS_Lib_Test(base.BaseTestCase): tools.verify_mock_calls(self.execute, expected_calls_and_values) - def test_add_patch_port(self): - pname = "tap99" - peer = "bar10" - ofport = 6 - - # Each element is a tuple of (expected mock call, return_value) - command = ["--may-exist", "add-port", self.BR_NAME, pname] - command.extend(["--", "set", "Interface", pname]) - command.extend(["type=patch", "options:peer=" + peer]) - expected_calls_and_values = [ - (self._vsctl_mock(*command), None), - (self._vsctl_mock("--columns=ofport", "list", "Interface", pname), - self._encode_ovs_json(['ofport'], [[ofport]])) - ] - tools.setup_mock_calls(self.execute, expected_calls_and_values) - - self.assertEqual(self.br.add_patch_port(pname, peer), ofport) - tools.verify_mock_calls(self.execute, expected_calls_and_values) - def _test_get_vif_ports(self, is_xen=False): pname = "tap99" ofport = 6 @@ -683,20 +616,13 @@ class OVS_Lib_Test(base.BaseTestCase): self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False) tools.verify_mock_calls(self.execute, expected_calls_and_values) - def _test_get_bridges(self, exp_timeout=None): + def test_get_bridges_not_default_timeout(self): bridges = ['br-int', 'br-ex'] - if exp_timeout: - self.br.vsctl_timeout = exp_timeout + self.br.vsctl_timeout = 5 self.execute.return_value = 'br-int\\nbr-ex\n' self.assertEqual(self.br.get_bridges(), bridges) self._verify_vsctl_mock("list-br") - def test_get_bridges(self): - self._test_get_bridges() - - def test_get_bridges_not_default_timeout(self): - self._test_get_bridges(5) - def test_get_local_port_mac_succeeds(self): with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', return_value=mock.Mock(address='foo')): diff --git a/neutron/tests/unit/agent/common/test_utils.py b/neutron/tests/unit/agent/common/test_utils.py index 7c89b1e2b5e..a4cf6680204 100644 --- a/neutron/tests/unit/agent/common/test_utils.py +++ b/neutron/tests/unit/agent/common/test_utils.py @@ -27,6 +27,7 @@ class TestLoadInterfaceDriver(base.BaseTestCase): def setUp(self): super(TestLoadInterfaceDriver, self).setUp() self.conf = config.setup_conf() + self.conf.register_opts(interface.OPTS) config.register_interface_driver_opts_helper(self.conf) def test_load_interface_driver_not_set(self): diff --git a/neutron/tests/unit/agent/l2/__init__.py b/neutron/tests/unit/agent/l2/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/agent/l2/extensions/__init__.py b/neutron/tests/unit/agent/l2/extensions/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/agent/l2/extensions/test_manager.py b/neutron/tests/unit/agent/l2/extensions/test_manager.py new file mode 100644 index 00000000000..0f0e4294042 --- /dev/null +++ b/neutron/tests/unit/agent/l2/extensions/test_manager.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.agent.l2.extensions import manager as ext_manager +from neutron.tests import base + + +class TestAgentExtensionsManager(base.BaseTestCase): + + def setUp(self): + super(TestAgentExtensionsManager, self).setUp() + mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', + autospec=True).start() + conf = cfg.CONF + ext_manager.register_opts(conf) + cfg.CONF.set_override('extensions', ['qos'], 'agent') + self.manager = ext_manager.AgentExtensionsManager(conf) + + def _get_extension(self): + return self.manager.extensions[0].obj + + def test_initialize(self): + connection = object() + self.manager.initialize(connection, 'fake_driver_type') + ext = self._get_extension() + ext.initialize.assert_called_once_with(connection, 'fake_driver_type') + + def test_handle_port(self): + context = object() + data = object() + self.manager.handle_port(context, data) + ext = self._get_extension() + ext.handle_port.assert_called_once_with(context, data) + + def test_delete_port(self): + context = object() + data = object() + self.manager.delete_port(context, data) + ext = self._get_extension() + ext.delete_port.assert_called_once_with(context, data) diff --git a/neutron/tests/unit/agent/l2/extensions/test_qos.py b/neutron/tests/unit/agent/l2/extensions/test_qos.py new file mode 100755 index 00000000000..0ff6175c560 --- /dev/null +++ b/neutron/tests/unit/agent/l2/extensions/test_qos.py @@ -0,0 +1,187 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import uuidutils + +from neutron.agent.l2.extensions import qos +from neutron.api.rpc.callbacks.consumer import registry +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron import context +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants +from neutron.tests import base + + +TEST_POLICY = object() + + +class QosExtensionBaseTestCase(base.BaseTestCase): + + def setUp(self): + super(QosExtensionBaseTestCase, self).setUp() + self.qos_ext = qos.QosAgentExtension() + self.context = context.get_admin_context() + self.connection = mock.Mock() + + # Don't rely on used driver + mock.patch( + 'neutron.manager.NeutronManager.load_class_for_provider', + return_value=lambda: mock.Mock(spec=qos.QosAgentDriver) + ).start() + + +class QosExtensionRpcTestCase(QosExtensionBaseTestCase): + + def setUp(self): + super(QosExtensionRpcTestCase, self).setUp() + self.qos_ext.initialize( + self.connection, constants.EXTENSION_DRIVER_TYPE) + + self.pull_mock = mock.patch.object( + self.qos_ext.resource_rpc, 'pull', + return_value=TEST_POLICY).start() + + def _create_test_port_dict(self): + return {'port_id': uuidutils.generate_uuid(), + 'qos_policy_id': uuidutils.generate_uuid()} + + def test_handle_port_with_no_policy(self): + port = self._create_test_port_dict() + del port['qos_policy_id'] + self.qos_ext._process_reset_port = mock.Mock() + self.qos_ext.handle_port(self.context, port) + self.qos_ext._process_reset_port.assert_called_with(port) + + def test_handle_unknown_port(self): + port = self._create_test_port_dict() + qos_policy_id = port['qos_policy_id'] + port_id = port['port_id'] + self.qos_ext.handle_port(self.context, port) + # we make sure the underlaying qos driver is called with the + # right parameters + self.qos_ext.qos_driver.create.assert_called_once_with( + port, TEST_POLICY) + self.assertEqual(port, + self.qos_ext.qos_policy_ports[qos_policy_id][port_id]) + self.assertTrue(port_id in self.qos_ext.known_ports) + + def test_handle_known_port(self): + port_obj1 = self._create_test_port_dict() + port_obj2 = dict(port_obj1) + self.qos_ext.handle_port(self.context, port_obj1) + self.qos_ext.qos_driver.reset_mock() + self.qos_ext.handle_port(self.context, port_obj2) + self.assertFalse(self.qos_ext.qos_driver.create.called) + + def test_handle_known_port_change_policy_id(self): + port = self._create_test_port_dict() + self.qos_ext.handle_port(self.context, port) + self.qos_ext.resource_rpc.pull.reset_mock() + port['qos_policy_id'] = uuidutils.generate_uuid() + self.qos_ext.handle_port(self.context, port) + self.pull_mock.assert_called_once_with( + self.context, resources.QOS_POLICY, + port['qos_policy_id']) + #TODO(QoS): handle qos_driver.update call check when + # we do that + + def test_delete_known_port(self): + port = self._create_test_port_dict() + port_id = port['port_id'] + self.qos_ext.handle_port(self.context, port) + self.qos_ext.qos_driver.reset_mock() + self.qos_ext.delete_port(self.context, port) + self.qos_ext.qos_driver.delete.assert_called_with(port, None) + self.assertNotIn(port_id, self.qos_ext.known_ports) + + def test_delete_unknown_port(self): + port = self._create_test_port_dict() + port_id = port['port_id'] + self.qos_ext.delete_port(self.context, port) + self.assertFalse(self.qos_ext.qos_driver.delete.called) + self.assertNotIn(port_id, self.qos_ext.known_ports) + + def test__handle_notification_ignores_all_event_types_except_updated(self): + with mock.patch.object( + self.qos_ext, '_process_update_policy') as update_mock: + + for event_type in set(events.VALID) - {events.UPDATED}: + self.qos_ext._handle_notification(object(), event_type) + self.assertFalse(update_mock.called) + + def test__handle_notification_passes_update_events(self): + with mock.patch.object( + self.qos_ext, '_process_update_policy') as update_mock: + + policy = mock.Mock() + self.qos_ext._handle_notification(policy, events.UPDATED) + update_mock.assert_called_with(policy) + + def test__process_update_policy(self): + port1 = self._create_test_port_dict() + port2 = self._create_test_port_dict() + self.qos_ext.qos_policy_ports = { + port1['qos_policy_id']: {port1['port_id']: port1}, + port2['qos_policy_id']: {port2['port_id']: port2}, + } + policy = mock.Mock() + policy.id = port1['qos_policy_id'] + self.qos_ext._process_update_policy(policy) + self.qos_ext.qos_driver.update.assert_called_with(port1, policy) + + self.qos_ext.qos_driver.update.reset_mock() + policy.id = port2['qos_policy_id'] + self.qos_ext._process_update_policy(policy) + self.qos_ext.qos_driver.update.assert_called_with(port2, policy) + + def test__process_reset_port(self): + port1 = self._create_test_port_dict() + port2 = self._create_test_port_dict() + port1_id = port1['port_id'] + port2_id = port2['port_id'] + self.qos_ext.qos_policy_ports = { + port1['qos_policy_id']: {port1_id: port1}, + port2['qos_policy_id']: {port2_id: port2}, + } + self.qos_ext.known_ports = {port1_id, port2_id} + + self.qos_ext._process_reset_port(port1) + self.qos_ext.qos_driver.delete.assert_called_with(port1, None) + self.assertNotIn(port1_id, self.qos_ext.known_ports) + self.assertIn(port2_id, self.qos_ext.known_ports) + + self.qos_ext.qos_driver.delete.reset_mock() + self.qos_ext._process_reset_port(port2) + self.qos_ext.qos_driver.delete.assert_called_with(port2, None) + self.assertNotIn(port2_id, self.qos_ext.known_ports) + + +class QosExtensionInitializeTestCase(QosExtensionBaseTestCase): + + @mock.patch.object(registry, 'subscribe') + @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') + def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): + self.qos_ext.initialize( + self.connection, constants.EXTENSION_DRIVER_TYPE) + self.connection.create_consumer.assert_has_calls( + [mock.call( + resources_rpc.resource_type_versioned_topic(resource_type), + [rpc_mock()], + fanout=True) + for resource_type in self.qos_ext.SUPPORTED_RESOURCES] + ) + subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index b59c9cc632d..54721ae76b9 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -23,6 +23,7 @@ import netaddr from oslo_log import log import oslo_messaging from oslo_utils import uuidutils +import six from testtools import matchers from neutron.agent.common import config as agent_config @@ -35,8 +36,10 @@ from neutron.agent.l3 import legacy_router from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as l3router +from neutron.agent.linux import dibbler from neutron.agent.linux import external_process from neutron.agent.linux import interface +from neutron.agent.linux import pd from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc @@ -352,7 +355,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): sn_port['id'], sn_port['fixed_ips'], sn_port['mac_address'], - ri.get_snat_int_device_name(sn_port['id']), + ri._get_snat_int_device_name(sn_port['id']), dvr_snat_ns.SNAT_INT_DEV_PREFIX) elif action == 'remove': self.device_exists.return_value = False @@ -450,7 +453,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): **self.ri_kwargs) ri._create_dvr_gateway = mock.Mock() ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports) - ri.create_snat_namespace() + ri._create_snat_namespace() ri.fip_ns = agent.get_fip_ns(ex_net_id) ri.internal_ports = self.snat_ports else: @@ -578,41 +581,60 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): def test_external_gateway_updated_dual_stack(self): self._test_external_gateway_updated(dual_stack=True) - def _test_ext_gw_updated_dvr_agent_mode(self, host, - agent_mode, expected_call_count): + def _test_ext_gw_updated_dvr_edge_router(self, host_match, + snat_hosted_before=True): + """ + Helper to test external gw update for edge router on dvr_snat agent + + :param host_match: True if new gw host should be the same as agent host + :param snat_hosted_before: True if agent has already been hosting + snat for the router + """ router = l3_test_common.prepare_router_data(num_internal_ports=2) - agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) - ri = dvr_router.DvrEdgeRouter(agent, + ri = dvr_router.DvrEdgeRouter(mock.Mock(), HOSTNAME, router['id'], router, **self.ri_kwargs) - ri.create_snat_namespace() + if snat_hosted_before: + ri._create_snat_namespace() + snat_ns_name = ri.snat_namespace.name + else: + self.assertIsNone(ri.snat_namespace) + interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_added = mock.Mock() - # test agent mode = dvr (compute node) - router['gw_port_host'] = host - agent.conf.agent_mode = agent_mode + router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo') ri.external_gateway_updated(ex_gw_port, interface_name) - # no gateway should be added on dvr node - self.assertEqual(expected_call_count, - ri._external_gateway_added.call_count) + if not host_match: + self.assertFalse(ri._external_gateway_added.called) + if snat_hosted_before: + # host mismatch means that snat was rescheduled to another + # agent, hence need to verify that gw port was unplugged and + # snat namespace was deleted + self.mock_driver.unplug.assert_called_with( + interface_name, + bridge=self.conf.external_network_bridge, + namespace=snat_ns_name, + prefix=l3_agent.EXTERNAL_DEV_PREFIX) + self.assertIsNone(ri.snat_namespace) + else: + if not snat_hosted_before: + self.assertIsNotNone(ri.snat_namespace) + self.assertTrue(ri._external_gateway_added.called) - def test_ext_gw_updated_dvr_agent_mode(self): - # no gateway should be added on dvr node - self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0) + def test_ext_gw_updated_dvr_edge_router(self): + self._test_ext_gw_updated_dvr_edge_router(host_match=True) - def test_ext_gw_updated_dvr_snat_agent_mode_no_host(self): - # no gateway should be added on dvr_snat node without host match - self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr_snat', 0) + def test_ext_gw_updated_dvr_edge_router_host_mismatch(self): + self._test_ext_gw_updated_dvr_edge_router(host_match=False) - def test_ext_gw_updated_dvr_snat_agent_mode_host(self): - # gateway should be added on dvr_snat node - self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME, - 'dvr_snat', 1) + def test_ext_gw_updated_dvr_dvr_edge_router_snat_rescheduled(self): + self._test_ext_gw_updated_dvr_edge_router(host_match=True, + snat_hosted_before=False) def test_agent_add_external_gateway(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) @@ -1058,6 +1080,28 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): router) self.assertEqual(self.send_adv_notif.call_count, 1) + def test_update_routing_table(self): + # Just verify the correct namespace was used in the call + router = l3_test_common.prepare_router_data() + uuid = router['id'] + netns = 'snat-' + uuid + fake_route1 = {'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'} + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + ri = dvr_router.DvrEdgeRouter( + agent, + HOSTNAME, + uuid, + router, + **self.ri_kwargs) + ri._update_routing_table = mock.Mock() + + ri.update_routing_table('replace', fake_route1) + ri._update_routing_table.assert_called_once_with('replace', + fake_route1, + netns) + def test_process_router_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() @@ -1130,14 +1174,18 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.assertFalse(nat_rules_delta) return ri - def _expected_call_lookup_ri_process(self, ri, process): - """Expected call if a process is looked up in a router instance.""" - return [mock.call(uuid=ri.router['id'], - service=process, + def _radvd_expected_call_external_process(self, ri, enable=True): + expected_calls = [mock.call(uuid=ri.router['id'], + service='radvd', default_cmd_callback=mock.ANY, namespace=ri.ns_name, conf=mock.ANY, run_as_root=True)] + if enable: + expected_calls.append(mock.call().enable(reload_cfg=True)) + else: + expected_calls.append(mock.call().disable()) + return expected_calls def _process_router_ipv6_subnet_added( self, router, ipv6_subnet_modes=None): @@ -1156,24 +1204,20 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self._process_router_instance_for_agent(agent, ri, router) return ri - def _assert_ri_process_enabled(self, ri, process): + def _assert_ri_process_enabled(self, ri): """Verify that process was enabled for a router instance.""" - expected_calls = self._expected_call_lookup_ri_process( - ri, process) - expected_calls.append(mock.call().enable(reload_cfg=True)) + expected_calls = self._radvd_expected_call_external_process(ri) self.assertEqual(expected_calls, self.external_process.mock_calls) - def _assert_ri_process_disabled(self, ri, process): + def _assert_ri_process_disabled(self, ri): """Verify that process was disabled for a router instance.""" - expected_calls = self._expected_call_lookup_ri_process( - ri, process) - expected_calls.append(mock.call().disable()) + expected_calls = self._radvd_expected_call_external_process(ri, False) self.assertEqual(expected_calls, self.external_process.mock_calls) def test_process_router_ipv6_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added(router) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) # Expect radvd configured without prefix self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1].split()) @@ -1182,7 +1226,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=l3_constants.IPV6_SLAAC) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix self.assertIn('prefix', self.utils_replace_file.call_args[0][1].split()) @@ -1196,7 +1240,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'address_mode': l3_constants.DHCPV6_STATELESS}, {'ra_mode': l3_constants.DHCPV6_STATEFUL, 'address_mode': l3_constants.DHCPV6_STATEFUL}]) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() # Assert we have a prefix from IPV6_SLAAC and a prefix from # DHCPV6_STATELESS on one interface @@ -1216,7 +1260,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): {'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}]) self._process_router_instance_for_agent(agent, ri, router) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) @@ -1238,7 +1282,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self._process_router_instance_for_agent(agent, ri, router) # radvd should have been enabled again and the interface # should have two prefixes - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(2, len(ri.internal_ports[1]['subnets'])) self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips'])) @@ -1257,7 +1301,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): l3_test_common.router_append_interface(router, count=1, ip_version=6) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) def test_process_router_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1283,14 +1327,14 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): # Add an IPv6 interface and reprocess l3_test_common.router_append_interface(router, count=1, ip_version=6) self._process_router_instance_for_agent(agent, ri, router) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) # Reset the calls so we can check for disable radvd self.external_process.reset_mock() self.process_monitor.reset_mock() # Remove the IPv6 interface and reprocess del router[l3_constants.INTERFACE_KEY][1] self._process_router_instance_for_agent(agent, ri, router) - self._assert_ri_process_disabled(ri, 'radvd') + self._assert_ri_process_disabled(ri) def test_process_router_ipv6_subnet_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1305,7 +1349,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'address_mode': l3_constants.IPV6_SLAAC}] * 2)) self._process_router_instance_for_agent(agent, ri, router) - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) # Reset mocks to check for modified radvd config self.utils_replace_file.reset_mock() self.external_process.reset_mock() @@ -1317,7 +1361,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self._process_router_instance_for_agent(agent, ri, router) # Assert radvd was enabled again and that we only have one # prefix on the interface - self._assert_ri_process_enabled(ri, 'radvd') + self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) @@ -1958,7 +2002,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} - interface_name = ri.get_snat_int_device_name(port_id) + interface_name = ri._get_snat_int_device_name(port_id) self.device_exists.return_value = False with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: @@ -2102,3 +2146,364 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.utils_replace_file.call_args[0][1]) assertFlag(managed_flag)('AdvManagedFlag on;', self.utils_replace_file.call_args[0][1]) + + def _pd_expected_call_external_process(self, requestor, ri, enable=True): + expected_calls = [] + if enable: + expected_calls.append(mock.call(uuid=requestor, + service='dibbler', + default_cmd_callback=mock.ANY, + namespace=ri.ns_name, + conf=mock.ANY, + pid_file=mock.ANY)) + expected_calls.append(mock.call().enable(reload_cfg=False)) + else: + expected_calls.append(mock.call(uuid=requestor, + service='dibbler', + namespace=ri.ns_name, + conf=mock.ANY, + pid_file=mock.ANY)) + expected_calls.append(mock.call().disable( + get_stop_command=mock.ANY)) + return expected_calls + + def _pd_setup_agent_router(self): + router = l3_test_common.prepare_router_data() + ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.external_gateway_added = mock.Mock() + ri.process(agent) + agent._router_added(router['id'], router) + # Make sure radvd monitor is created + if not ri.radvd: + ri.radvd = ra.DaemonMonitor(router['id'], + ri.ns_name, + agent.process_monitor, + ri.get_internal_device_name) + return agent, router, ri + + def _pd_remove_gw_interface(self, intfs, agent, router, ri): + expected_pd_update = {} + expected_calls = [] + for intf in intfs: + requestor_id = self._pd_get_requestor_id(intf, router, ri) + expected_calls += (self._pd_expected_call_external_process( + requestor_id, ri, False)) + for subnet in intf['subnets']: + expected_pd_update[subnet['id']] = ( + l3_constants.PROVISIONAL_IPV6_PD_PREFIX) + + # Implement the prefix update notifier + # Keep track of the updated prefix + self.pd_update = {} + + def pd_notifier(context, prefix_update): + self.pd_update = prefix_update + for subnet_id, prefix in six.iteritems(prefix_update): + for intf in intfs: + for subnet in intf['subnets']: + if subnet['id'] == subnet_id: + # Update the prefix + subnet['cidr'] = prefix + break + + # Remove the gateway interface + agent.pd.notifier = pd_notifier + agent.pd.remove_gw_interface(router['id']) + + self._pd_assert_dibbler_calls(expected_calls, + self.external_process.mock_calls[-len(expected_calls):]) + self.assertEqual(expected_pd_update, self.pd_update) + + def _pd_remove_interfaces(self, intfs, agent, router, ri): + expected_pd_update = [] + expected_calls = [] + for intf in intfs: + # Remove the router interface + router[l3_constants.INTERFACE_KEY].remove(intf) + requestor_id = self._pd_get_requestor_id(intf, router, ri) + expected_calls += (self._pd_expected_call_external_process( + requestor_id, ri, False)) + for subnet in intf['subnets']: + expected_pd_update += [{subnet['id']: + l3_constants.PROVISIONAL_IPV6_PD_PREFIX}] + + # Implement the prefix update notifier + # Keep track of the updated prefix + self.pd_update = [] + + def pd_notifier(context, prefix_update): + self.pd_update.append(prefix_update) + for intf in intfs: + for subnet in intf['subnets']: + if subnet['id'] == prefix_update.keys()[0]: + # Update the prefix + subnet['cidr'] = prefix_update.values()[0] + + # Process the router for removed interfaces + agent.pd.notifier = pd_notifier + ri.process(agent) + + # The number of external process calls takes radvd into account. + # This is because there is no ipv6 interface any more after removing + # the interfaces, and radvd will be killed because of that + self._pd_assert_dibbler_calls(expected_calls, + self.external_process.mock_calls[-len(expected_calls) - 2:]) + self._pd_assert_radvd_calls(ri, False) + self.assertEqual(expected_pd_update, self.pd_update) + + def _pd_get_requestor_id(self, intf, router, ri): + ifname = ri.get_internal_device_name(intf['id']) + for subnet in intf['subnets']: + return dibbler.PDDibbler(router['id'], + subnet['id'], ifname).requestor_id + + def _pd_assert_dibbler_calls(self, expected, actual): + '''Check the external process calls for dibbler are expected + + in the case of multiple pd-enabled router ports, the exact sequence + of these calls are not deterministic. It's known, though, that each + external_process call is followed with either an enable() or disable() + ''' + + num_ext_calls = len(expected) / 2 + expected_ext_calls = [] + actual_ext_calls = [] + expected_action_calls = [] + actual_action_calls = [] + for c in range(num_ext_calls): + expected_ext_calls.append(expected[c * 2]) + actual_ext_calls.append(actual[c * 2]) + expected_action_calls.append(expected[c * 2 + 1]) + actual_action_calls.append(actual[c * 2 + 1]) + + self.assertEqual(expected_action_calls, actual_action_calls) + for exp in expected_ext_calls: + for act in actual_ext_calls: + if exp == act: + break + else: + msg = "Unexpected dibbler external process call." + self.fail(msg) + + def _pd_assert_radvd_calls(self, ri, enable=True): + exp_calls = self._radvd_expected_call_external_process(ri, enable) + self.assertEqual(exp_calls, + self.external_process.mock_calls[-len(exp_calls):]) + + def _pd_get_prefixes(self, agent, router, ri, + existing_intfs, new_intfs, mock_get_prefix): + # First generate the prefixes that will be used for each interface + prefixes = {} + expected_pd_update = {} + expected_calls = [] + for ifno, intf in enumerate(existing_intfs + new_intfs): + requestor_id = self._pd_get_requestor_id(intf, router, ri) + prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno + if intf in new_intfs: + subnet_id = (intf['subnets'][0]['id'] if intf['subnets'] + else None) + expected_pd_update[subnet_id] = prefixes[requestor_id] + expected_calls += ( + self._pd_expected_call_external_process(requestor_id, ri)) + + # Implement the prefix update notifier + # Keep track of the updated prefix + self.pd_update = {} + + def pd_notifier(context, prefix_update): + self.pd_update = prefix_update + for subnet_id, prefix in six.iteritems(prefix_update): + for intf in new_intfs: + for subnet in intf['subnets']: + if subnet['id'] == subnet_id: + # Update the prefix + subnet['cidr'] = prefix + break + + # Start the dibbler client + agent.pd.notifier = pd_notifier + agent.pd.process_prefix_update() + + # Get the prefix and check that the neutron server is notified + def get_prefix(pdo): + key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname) + return prefixes[key] + mock_get_prefix.side_effect = get_prefix + agent.pd.process_prefix_update() + + # Make sure that the updated prefixes are expected + self._pd_assert_dibbler_calls(expected_calls, + self.external_process.mock_calls[-len(expected_calls):]) + self.assertEqual(expected_pd_update, self.pd_update) + + def _pd_add_gw_interface(self, agent, router, ri): + gw_ifname = ri.get_external_device_name(router['gw_port']['id']) + agent.pd.add_gw_interface(router['id'], gw_ifname) + + @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) + @mock.patch.object(dibbler.os, 'getpid', return_value=1234) + @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', + return_value=True) + @mock.patch.object(dibbler.os, 'chmod') + @mock.patch.object(dibbler.shutil, 'rmtree') + @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') + def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4, + mock_getpid, mock_get_prefix): + '''Add and remove one pd-enabled subnet + Remove the interface by deleting it from the router + ''' + # Initial setup + agent, router, ri = self._pd_setup_agent_router() + + # Create one pd-enabled subnet and add router interface + intfs = l3_test_common.router_append_pd_enabled_subnet(router) + ri.process(agent) + + # No client should be started since there is no gateway port + self.assertFalse(self.external_process.call_count) + self.assertFalse(mock_get_prefix.call_count) + + # Add the gateway interface + self._pd_add_gw_interface(agent, router, ri) + + # Get one prefix + self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) + + # Update the router with the new prefix + ri.process(agent) + + # Check that radvd is started and the router port is configured + # with the new prefix + self._pd_assert_radvd_calls(ri) + + # Now remove the interface + self._pd_remove_interfaces(intfs, agent, router, ri) + + @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) + @mock.patch.object(dibbler.os, 'getpid', return_value=1234) + @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', + return_value=True) + @mock.patch.object(dibbler.os, 'chmod') + @mock.patch.object(dibbler.shutil, 'rmtree') + @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') + def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4, + mock_getpid, mock_get_prefix): + '''Add one pd-enabled subnet and remove the gateway port + Remove the gateway port and check the prefix is removed + ''' + # Initial setup + agent, router, ri = self._pd_setup_agent_router() + + # Create one pd-enabled subnet and add router interface + intfs = l3_test_common.router_append_pd_enabled_subnet(router) + ri.process(agent) + + # Add the gateway interface + self._pd_add_gw_interface(agent, router, ri) + + # Get one prefix + self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) + + # Update the router with the new prefix + ri.process(agent) + + # Check that radvd is started + self._pd_assert_radvd_calls(ri) + + # Now remove the gw interface + self._pd_remove_gw_interface(intfs, agent, router, ri) + + # There will be a router update + ri.process(agent) + + @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) + @mock.patch.object(dibbler.os, 'getpid', return_value=1234) + @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', + return_value=True) + @mock.patch.object(dibbler.os, 'chmod') + @mock.patch.object(dibbler.shutil, 'rmtree') + @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') + def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4, + mock_getpid, mock_get_prefix): + '''Add and remove two pd-enabled subnets + Remove the interfaces by deleting them from the router + ''' + # Initial setup + agent, router, ri = self._pd_setup_agent_router() + + # Create 2 pd-enabled subnets and add router interfaces + intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2) + ri.process(agent) + + # No client should be started + self.assertFalse(self.external_process.call_count) + self.assertFalse(mock_get_prefix.call_count) + + # Add the gateway interface + self._pd_add_gw_interface(agent, router, ri) + + # Get prefixes + self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) + + # Update the router with the new prefix + ri.process(agent) + + # Check that radvd is started and the router port is configured + # with the new prefix + self._pd_assert_radvd_calls(ri) + + # Now remove the interface + self._pd_remove_interfaces(intfs, agent, router, ri) + + @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) + @mock.patch.object(dibbler.os, 'getpid', return_value=1234) + @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', + return_value=True) + @mock.patch.object(dibbler.os, 'chmod') + @mock.patch.object(dibbler.shutil, 'rmtree') + @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') + def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4, + mock_getpid, mock_get_prefix): + '''Add one pd-enabled subnet, followed by adding another one + Remove the gateway port and check the prefix is removed + ''' + # Initial setup + agent, router, ri = self._pd_setup_agent_router() + + # Add the gateway interface + self._pd_add_gw_interface(agent, router, ri) + + # Create 1 pd-enabled subnet and add router interface + intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1) + ri.process(agent) + + # Get prefixes + self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) + + # Update the router with the new prefix + ri.process(agent) + + # Check that radvd is started + self._pd_assert_radvd_calls(ri) + + # Now add another interface + # Create one pd-enabled subnet and add router interface + intfs1 = l3_test_common.router_append_pd_enabled_subnet(router, + count=1) + ri.process(agent) + + # Get prefixes + self._pd_get_prefixes(agent, router, ri, intfs, + intfs1, mock_get_prefix) + + # Update the router with the new prefix + ri.process(agent) + + # Check that radvd is notified for the new prefix + self._pd_assert_radvd_calls(ri) + + # Now remove the gw interface + self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri) + + ri.process(agent) diff --git a/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py b/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py index db3423f6c3e..951149f39fd 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py +++ b/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py @@ -57,13 +57,15 @@ class TestDvrFipNs(base.BaseTestCase): self.assertFalse(is_last) def test_allocate_rule_priority(self): - pr = self.fip_ns.allocate_rule_priority() - self.assertNotIn(pr, self.fip_ns._rule_priorities) + pr = self.fip_ns.allocate_rule_priority('20.0.0.30') + self.assertIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) + self.assertNotIn(pr, self.fip_ns._rule_priorities.pool) def test_deallocate_rule_priority(self): - pr = self.fip_ns.allocate_rule_priority() - self.fip_ns.deallocate_rule_priority(pr) - self.assertIn(pr, self.fip_ns._rule_priorities) + pr = self.fip_ns.allocate_rule_priority('20.0.0.30') + self.fip_ns.deallocate_rule_priority('20.0.0.30') + self.assertNotIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) + self.assertIn(pr, self.fip_ns._rule_priorities.pool) @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') @@ -179,6 +181,7 @@ class TestDvrFipNs(base.BaseTestCase): device_exists.return_value = True ri = mock.Mock() ri.dist_fip_count = None + ri.floating_ips_dict = {} ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}] self._test_scan_fip_ports(ri, ip_list) self.assertEqual(2, ri.dist_fip_count) @@ -188,6 +191,7 @@ class TestDvrFipNs(base.BaseTestCase): device_exists.return_value = True ri = mock.Mock() ri.dist_fip_count = None + ri.floating_ips_dict = {} self._test_scan_fip_ports(ri, []) self.assertEqual(0, ri.dist_fip_count) diff --git a/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py b/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py new file mode 100644 index 00000000000..b7d606d5865 --- /dev/null +++ b/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py @@ -0,0 +1,61 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent.l3 import fip_rule_priority_allocator as frpa +from neutron.tests import base + + +class TestFipPriority(base.BaseTestCase): + def setUp(self): + super(TestFipPriority, self).setUp() + + def test__init__(self): + test_pr = frpa.FipPriority(10) + self.assertEqual(10, test_pr.index) + + def test__repr__(self): + test_pr = frpa.FipPriority(20) + self.assertEqual("20", str(test_pr)) + + def test__eq__(self): + left_pr = frpa.FipPriority(10) + right_pr = frpa.FipPriority(10) + other_pr = frpa.FipPriority(20) + self.assertEqual(left_pr, right_pr) + self.assertNotEqual(left_pr, other_pr) + self.assertNotEqual(right_pr, other_pr) + + def test__hash__(self): + left_pr = frpa.FipPriority(10) + right_pr = frpa.FipPriority(10) + other_pr = frpa.FipPriority(20) + self.assertEqual(hash(left_pr), hash(right_pr)) + self.assertNotEqual(hash(left_pr), hash(other_pr)) + self.assertNotEqual(hash(other_pr), hash(right_pr)) + + +class TestFipRulePriorityAllocator(base.BaseTestCase): + def setUp(self): + super(TestFipRulePriorityAllocator, self).setUp() + self.priority_rule_start = 100 + self.priority_rule_end = 200 + self.data_store_path = '/data_store_path_test' + + def test__init__(self): + _frpa = frpa.FipRulePriorityAllocator(self.data_store_path, + self.priority_rule_start, + self.priority_rule_end) + self.assertEqual(self.data_store_path, _frpa.state_file) + self.assertEqual(frpa.FipPriority, _frpa.ItemClass) + self.assertEqual(100, len(_frpa.pool)) diff --git a/neutron/tests/unit/agent/l3/test_item_allocator.py b/neutron/tests/unit/agent/l3/test_item_allocator.py index 767ad8d5c52..c1142bbc449 100644 --- a/neutron/tests/unit/agent/l3/test_item_allocator.py +++ b/neutron/tests/unit/agent/l3/test_item_allocator.py @@ -12,18 +12,93 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from neutron.agent.l3 import item_allocator as ia from neutron.tests import base +class TestObject(object): + def __init__(self, value): + super(TestObject, self).__init__() + self._value = value + + def __str__(self): + return str(self._value) + + class TestItemAllocator(base.BaseTestCase): def setUp(self): super(TestItemAllocator, self).setUp() def test__init__(self): - test_pool = set(s for s in range(32768, 40000)) - a = ia.ItemAllocator('/file', object, test_pool) - self.assertEqual('/file', a.state_file) + test_pool = set(TestObject(s) for s in range(32768, 40000)) + with mock.patch.object(ia.ItemAllocator, '_write') as write: + a = ia.ItemAllocator('/file', TestObject, test_pool) + test_object = a.allocate('test') + + self.assertTrue('test' in a.allocations) + self.assertTrue(test_object in a.allocations.values()) + self.assertTrue(test_object not in a.pool) + self.assertTrue(write.called) + + def test__init__readfile(self): + test_pool = set(TestObject(s) for s in range(32768, 40000)) + with mock.patch.object(ia.ItemAllocator, '_read') as read: + read.return_value = ["da873ca2,10\n"] + a = ia.ItemAllocator('/file', TestObject, test_pool) + + self.assertTrue('da873ca2' in a.remembered) self.assertEqual({}, a.allocations) - self.assertEqual(object, a.ItemClass) - self.assertEqual(test_pool, a.pool) + + def test_allocate(self): + test_pool = set([TestObject(33000), TestObject(33001)]) + a = ia.ItemAllocator('/file', TestObject, test_pool) + with mock.patch.object(ia.ItemAllocator, '_write') as write: + test_object = a.allocate('test') + + self.assertTrue('test' in a.allocations) + self.assertTrue(test_object in a.allocations.values()) + self.assertTrue(test_object not in a.pool) + self.assertTrue(write.called) + + def test_allocate_from_file(self): + test_pool = set([TestObject(33000), TestObject(33001)]) + with mock.patch.object(ia.ItemAllocator, '_read') as read: + read.return_value = ["deadbeef,33000\n"] + a = ia.ItemAllocator('/file', TestObject, test_pool) + + with mock.patch.object(ia.ItemAllocator, '_write') as write: + t_obj = a.allocate('deadbeef') + + self.assertEqual('33000', t_obj._value) + self.assertTrue('deadbeef' in a.allocations) + self.assertTrue(t_obj in a.allocations.values()) + self.assertTrue(33000 not in a.pool) + self.assertFalse(write.called) + + def test_allocate_exhausted_pool(self): + test_pool = set([TestObject(33000)]) + with mock.patch.object(ia.ItemAllocator, '_read') as read: + read.return_value = ["deadbeef,33000\n"] + a = ia.ItemAllocator('/file', TestObject, test_pool) + + with mock.patch.object(ia.ItemAllocator, '_write') as write: + allocation = a.allocate('abcdef12') + + self.assertFalse('deadbeef' in a.allocations) + self.assertTrue(allocation not in a.pool) + self.assertTrue(write.called) + + def test_release(self): + test_pool = set([TestObject(33000), TestObject(33001)]) + with mock.patch.object(ia.ItemAllocator, '_write') as write: + a = ia.ItemAllocator('/file', TestObject, test_pool) + allocation = a.allocate('deadbeef') + write.reset_mock() + a.release('deadbeef') + + self.assertTrue('deadbeef' not in a.allocations) + self.assertTrue(allocation in a.pool) + self.assertEqual({}, a.allocations) + write.assert_called_once_with([]) diff --git a/neutron/tests/unit/agent/l3/test_link_local_allocator.py b/neutron/tests/unit/agent/l3/test_link_local_allocator.py index 89ad856f1be..e33b6769d97 100644 --- a/neutron/tests/unit/agent/l3/test_link_local_allocator.py +++ b/neutron/tests/unit/agent/l3/test_link_local_allocator.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import mock import netaddr from neutron.agent.l3 import link_local_allocator as lla @@ -28,69 +27,3 @@ class TestLinkLocalAddrAllocator(base.BaseTestCase): a = lla.LinkLocalAllocator('/file', self.subnet.cidr) self.assertEqual('/file', a.state_file) self.assertEqual({}, a.allocations) - - def test__init__readfile(self): - with mock.patch.object(lla.LinkLocalAllocator, '_read') as read: - read.return_value = ["da873ca2,169.254.31.28/31\n"] - a = lla.LinkLocalAllocator('/file', self.subnet.cidr) - - self.assertTrue('da873ca2' in a.remembered) - self.assertEqual({}, a.allocations) - - def test_allocate(self): - a = lla.LinkLocalAllocator('/file', self.subnet.cidr) - with mock.patch.object(lla.LinkLocalAllocator, '_write') as write: - subnet = a.allocate('deadbeef') - - self.assertTrue('deadbeef' in a.allocations) - self.assertTrue(subnet not in a.pool) - self._check_allocations(a.allocations) - write.assert_called_once_with(['deadbeef,%s\n' % subnet.cidr]) - - def test_allocate_from_file(self): - with mock.patch.object(lla.LinkLocalAllocator, '_read') as read: - read.return_value = ["deadbeef,169.254.31.88/31\n"] - a = lla.LinkLocalAllocator('/file', self.subnet.cidr) - - with mock.patch.object(lla.LinkLocalAllocator, '_write') as write: - subnet = a.allocate('deadbeef') - - self.assertEqual(netaddr.IPNetwork('169.254.31.88/31'), subnet) - self.assertTrue(subnet not in a.pool) - self._check_allocations(a.allocations) - self.assertFalse(write.called) - - def test_allocate_exhausted_pool(self): - subnet = netaddr.IPNetwork('169.254.31.0/31') - with mock.patch.object(lla.LinkLocalAllocator, '_read') as read: - read.return_value = ["deadbeef,169.254.31.0/31\n"] - a = lla.LinkLocalAllocator('/file', subnet.cidr) - - with mock.patch.object(lla.LinkLocalAllocator, '_write') as write: - allocation = a.allocate('abcdef12') - - self.assertEqual(subnet, allocation) - self.assertFalse('deadbeef' in a.allocations) - self.assertTrue('abcdef12' in a.allocations) - self.assertTrue(allocation not in a.pool) - self._check_allocations(a.allocations) - write.assert_called_once_with(['abcdef12,%s\n' % allocation.cidr]) - - self.assertRaises(RuntimeError, a.allocate, 'deadbeef') - - def test_release(self): - with mock.patch.object(lla.LinkLocalAllocator, '_write') as write: - a = lla.LinkLocalAllocator('/file', self.subnet.cidr) - subnet = a.allocate('deadbeef') - write.reset_mock() - a.release('deadbeef') - - self.assertTrue('deadbeef' not in a.allocations) - self.assertTrue(subnet in a.pool) - self.assertEqual({}, a.allocations) - write.assert_called_once_with([]) - - def _check_allocations(self, allocations): - for key, subnet in allocations.items(): - self.assertTrue(subnet in self.subnet) - self.assertEqual(subnet.prefixlen, 31) diff --git a/neutron/tests/unit/agent/l3/test_router_info.py b/neutron/tests/unit/agent/l3/test_router_info.py index 557a639291a..66cafa41f4f 100644 --- a/neutron/tests/unit/agent/l3/test_router_info.py +++ b/neutron/tests/unit/agent/l3/test_router_info.py @@ -52,26 +52,41 @@ class TestRouterInfo(base.BaseTestCase): fake_route2 = {'destination': '135.207.111.111/32', 'nexthop': '1.2.3.4'} - ri._update_routing_table('replace', fake_route1) + ri.update_routing_table('replace', fake_route1) expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) - ri._update_routing_table('delete', fake_route1) + ri.update_routing_table('delete', fake_route1) expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) - ri._update_routing_table('replace', fake_route2) + ri.update_routing_table('replace', fake_route2) expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) - ri._update_routing_table('delete', fake_route2) + ri.update_routing_table('delete', fake_route2) expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) + def test_update_routing_table(self): + # Just verify the correct namespace was used in the call + uuid = _uuid() + netns = 'qrouter-' + uuid + fake_route1 = {'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'} + + ri = router_info.RouterInfo(uuid, {'id': uuid}, **self.ri_kwargs) + ri._update_routing_table = mock.Mock() + + ri.update_routing_table('replace', fake_route1) + ri._update_routing_table.assert_called_once_with('replace', + fake_route1, + netns) + def test_routes_updated(self): ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) ri.router = {} diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 1e2631dae4d..0045f56237f 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -39,6 +39,19 @@ class FakeIPAllocation(object): self.subnet_id = subnet_id +class FakeDNSAssignment(object): + def __init__(self, ip_address, dns_name='', domain='openstacklocal'): + if dns_name: + self.hostname = dns_name + else: + self.hostname = 'host-%s' % ip_address.replace( + '.', '-').replace(':', '-') + self.ip_address = ip_address + self.fqdn = self.hostname + if domain: + self.fqdn = '%s.%s.' % (self.hostname, domain) + + class DhcpOpt(object): def __init__(self, **kwargs): self.__dict__.update(ip_version=4) @@ -48,6 +61,13 @@ class DhcpOpt(object): return str(self.__dict__) +# A base class where class attributes can also be accessed by treating +# an instance as a dict. +class Dictable(object): + def __getitem__(self, k): + return self.__class__.__dict__.get(k) + + class FakeDhcpPort(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' admin_state_up = True @@ -61,6 +81,19 @@ class FakeDhcpPort(object): self.extra_dhcp_opts = [] +class FakeReservedPort(object): + admin_state_up = True + device_owner = 'network:dhcp' + fixed_ips = [FakeIPAllocation('192.168.0.6', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + mac_address = '00:00:80:aa:bb:ee' + device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT + + def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'): + self.extra_dhcp_opts = [] + self.id = id + + class FakePort1(object): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' admin_state_up = True @@ -70,8 +103,9 @@ class FakePort1(object): mac_address = '00:00:80:aa:bb:cc' device_id = 'fake_port1' - def __init__(self): + def __init__(self, domain='openstacklocal'): self.extra_dhcp_opts = [] + self.dns_assignment = [FakeDNSAssignment('192.168.0.2', domain=domain)] class FakePort2(object): @@ -82,6 +116,7 @@ class FakePort2(object): 'dddddddd-dddd-dddd-dddd-dddddddddddd')] mac_address = '00:00:f3:aa:bb:cc' device_id = 'fake_port2' + dns_assignment = [FakeDNSAssignment('192.168.0.3')] def __init__(self): self.extra_dhcp_opts = [] @@ -95,6 +130,8 @@ class FakePort3(object): 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('192.168.1.2', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] + dns_assignment = [FakeDNSAssignment('192.168.0.4'), + FakeDNSAssignment('192.168.1.2')] mac_address = '00:00:0f:aa:bb:cc' device_id = 'fake_port3' @@ -111,6 +148,7 @@ class FakePort4(object): 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] + dns_assignment = [FakeDNSAssignment('192.168.0.4')] mac_address = '00:16:3E:C2:77:1D' device_id = 'fake_port4' @@ -124,6 +162,7 @@ class FakePort5(object): device_owner = 'foo5' fixed_ips = [FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + dns_assignment = [FakeDNSAssignment('192.168.0.5')] mac_address = '00:00:0f:aa:bb:55' device_id = 'fake_port5' @@ -139,6 +178,7 @@ class FakePort6(object): device_owner = 'foo6' fixed_ips = [FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + dns_assignment = [FakeDNSAssignment('192.168.0.6')] mac_address = '00:00:0f:aa:bb:66' device_id = 'fake_port6' @@ -161,8 +201,10 @@ class FakeV6Port(object): mac_address = '00:00:f3:aa:bb:cc' device_id = 'fake_port6' - def __init__(self): + def __init__(self, domain='openstacklocal'): self.extra_dhcp_opts = [] + self.dns_assignment = [FakeDNSAssignment('fdca:3ba5:a17a:4ba3::2', + domain=domain)] class FakeV6PortExtraOpt(object): @@ -171,6 +213,7 @@ class FakeV6PortExtraOpt(object): device_owner = 'foo3' fixed_ips = [FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] + dns_assignment = [] mac_address = '00:16:3e:c2:77:1d' device_id = 'fake_port6' @@ -189,6 +232,7 @@ class FakeDualPortWithV6ExtraOpt(object): 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] + dns_assignment = [FakeDNSAssignment('192.168.0.3')] mac_address = '00:16:3e:c2:77:1d' device_id = 'fake_port6' @@ -210,8 +254,11 @@ class FakeDualPort(object): mac_address = '00:00:0f:aa:bb:cc' device_id = 'fake_dual_port' - def __init__(self): + def __init__(self, domain='openstacklocal'): self.extra_dhcp_opts = [] + self.dns_assignment = [FakeDNSAssignment('192.168.0.3', domain=domain), + FakeDNSAssignment('fdca:3ba5:a17a:4ba3::3', + domain=domain)] class FakeRouterPort(object): @@ -220,13 +267,16 @@ class FakeRouterPort(object): device_owner = constants.DEVICE_OWNER_ROUTER_INTF mac_address = '00:00:0f:rr:rr:rr' device_id = 'fake_router_port' + dns_assignment = [] def __init__(self, dev_owner=constants.DEVICE_OWNER_ROUTER_INTF, - ip_address='192.168.0.1'): + ip_address='192.168.0.1', domain='openstacklocal'): self.extra_dhcp_opts = [] self.device_owner = dev_owner self.fixed_ips = [FakeIPAllocation( ip_address, 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + self.dns_assignment = [FakeDNSAssignment(ip.ip_address, domain=domain) + for ip in self.fixed_ips] class FakeRouterPort2(object): @@ -235,6 +285,7 @@ class FakeRouterPort2(object): device_owner = constants.DEVICE_OWNER_ROUTER_INTF fixed_ips = [FakeIPAllocation('192.168.1.1', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + dns_assignment = [FakeDNSAssignment('192.168.1.1')] mac_address = '00:00:0f:rr:rr:r2' device_id = 'fake_router_port2' @@ -248,6 +299,7 @@ class FakePortMultipleAgents1(object): device_owner = constants.DEVICE_OWNER_DHCP fixed_ips = [FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + dns_assignment = [FakeDNSAssignment('192.168.0.5')] mac_address = '00:00:0f:dd:dd:dd' device_id = 'fake_multiple_agents_port' @@ -261,6 +313,7 @@ class FakePortMultipleAgents2(object): device_owner = constants.DEVICE_OWNER_DHCP fixed_ips = [FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + dns_assignment = [FakeDNSAssignment('192.168.0.6')] mac_address = '00:00:0f:ee:ee:ee' device_id = 'fake_multiple_agents_port2' @@ -283,7 +336,7 @@ class FakeV6HostRoute(object): nexthop = '2001:0200:feed:7ac0::1' -class FakeV4Subnet(object): +class FakeV4Subnet(Dictable): id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' ip_version = 4 cidr = '192.168.0.0/24' @@ -293,52 +346,29 @@ class FakeV4Subnet(object): dns_nameservers = ['8.8.8.8'] -class FakeV4Subnet2(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 +class FakeV4Subnet2(FakeV4Subnet): cidr = '192.168.1.0/24' gateway_ip = '192.168.1.1' - enable_dhcp = True host_routes = [] - dns_nameservers = ['8.8.8.8'] -class FakeV4MetadataSubnet(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 +class FakeV4MetadataSubnet(FakeV4Subnet): cidr = '169.254.169.254/30' gateway_ip = '169.254.169.253' - enable_dhcp = True host_routes = [] dns_nameservers = [] -class FakeV4SubnetGatewayRoute(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 - cidr = '192.168.0.0/24' - gateway_ip = '192.168.0.1' - enable_dhcp = True +class FakeV4SubnetGatewayRoute(FakeV4Subnet): host_routes = [FakeV4HostRouteGateway] - dns_nameservers = ['8.8.8.8'] -class FakeV4SubnetMultipleAgentsWithoutDnsProvided(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 - cidr = '192.168.0.0/24' - gateway_ip = '192.168.0.1' - enable_dhcp = True +class FakeV4SubnetMultipleAgentsWithoutDnsProvided(FakeV4Subnet): dns_nameservers = [] host_routes = [] -class FakeV4SubnetAgentWithManyDnsProvided(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 - cidr = '192.168.0.0/24' - gateway_ip = '192.168.0.1' - enable_dhcp = True +class FakeV4SubnetAgentWithManyDnsProvided(FakeV4Subnet): dns_nameservers = ['2.2.2.2', '9.9.9.9', '1.1.1.1', '3.3.3.3'] host_routes = [] @@ -360,13 +390,7 @@ class FakeV4AgentWithManyDnsProvided(object): namespace = 'qdhcp-ns' -class FakeV4SubnetMultipleAgentsWithDnsProvided(object): - id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' - ip_version = 4 - cidr = '192.168.0.0/24' - gateway_ip = '192.168.0.1' - enable_dhcp = True - dns_nameservers = ['8.8.8.8'] +class FakeV4SubnetMultipleAgentsWithDnsProvided(FakeV4Subnet): host_routes = [] @@ -400,7 +424,7 @@ class FakeV4SubnetNoDHCP(object): dns_nameservers = [] -class FakeV6SubnetDHCPStateful(object): +class FakeV6SubnetDHCPStateful(Dictable): id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' ip_version = 6 cidr = 'fdca:3ba5:a17a:4ba3::/64' @@ -435,9 +459,8 @@ class FakeV6SubnetStateless(object): ipv6_ra_mode = None -class FakeV4SubnetNoGateway(object): +class FakeV4SubnetNoGateway(FakeV4Subnet): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' - ip_version = 4 cidr = '192.168.1.0/24' gateway_ip = None enable_dhcp = True @@ -445,12 +468,10 @@ class FakeV4SubnetNoGateway(object): dns_nameservers = [] -class FakeV4SubnetNoRouter(object): +class FakeV4SubnetNoRouter(FakeV4Subnet): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' - ip_version = 4 cidr = '192.168.1.0/24' gateway_ip = '192.168.1.1' - enable_dhcp = True host_routes = [] dns_nameservers = [] @@ -477,12 +498,40 @@ class FakeV6Network(object): class FakeDualNetwork(object): + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] + # ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + def __init__(self, domain='openstacklocal'): + self.ports = [FakePort1(domain=domain), FakeV6Port(domain=domain), + FakeDualPort(domain=domain), + FakeRouterPort(domain=domain)] + + +class FakeDeviceManagerNetwork(object): id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort()] namespace = 'qdhcp-ns' +class FakeDualNetworkReserved(object): + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] + ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), + FakeReservedPort()] + namespace = 'qdhcp-ns' + + +class FakeDualNetworkReserved2(object): + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] + ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), + FakeReservedPort(), FakeReservedPort(id='reserved-2')] + namespace = 'qdhcp-ns' + + class FakeNetworkDhcpPort(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' subnets = [FakeV4Subnet()] @@ -714,9 +763,9 @@ class LocalChild(dhcp.DhcpLocalProcess): self.called.append('spawn') -class TestBase(base.BaseTestCase): +class TestConfBase(base.BaseTestCase): def setUp(self): - super(TestBase, self).setUp() + super(TestConfBase, self).setUp() self.conf = config.setup_conf() self.conf.register_opts(base_config.core_opts) self.conf.register_opts(dhcp_config.DHCP_OPTS) @@ -724,10 +773,17 @@ class TestBase(base.BaseTestCase): self.conf.register_opts(external_process.OPTS) config.register_interface_driver_opts_helper(self.conf) config.register_use_namespaces_opts_helper(self.conf) + + +class TestBase(TestConfBase): + def setUp(self): + super(TestBase, self).setUp() instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager") self.mock_mgr = instance.start() self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=True)) + self.conf.register_opt(cfg.BoolOpt("force_metadata", + default=False)) self.conf.register_opt(cfg.BoolOpt('enable_metadata_network', default=False)) self.config_parse(self.conf) @@ -1029,7 +1085,8 @@ class TestDnsmasq(TestBase): (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data) = self._test_no_dhcp_domain_alloc_data self.conf.set_override('dhcp_domain', '') - self._test_spawn(['--conf-file=']) + network = FakeDualNetwork(domain=self.conf.dhcp_domain) + self._test_spawn(['--conf-file='], network=network) self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_addn_name, exp_addn_data)]) @@ -1425,30 +1482,30 @@ class TestDnsmasq(TestBase): @property def _test_reload_allocation_data(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' - exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' + exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.' - 'openstacklocal,[fdca:3ba5:a17a:4ba3::2]\n' - '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal,' + 'openstacklocal.,[fdca:3ba5:a17a:4ba3::2]\n' + '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.' - 'openstacklocal,[fdca:3ba5:a17a:4ba3::3]\n' - '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,' + 'openstacklocal.,[fdca:3ba5:a17a:4ba3::3]\n' + '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' exp_addn_data = ( '192.168.0.2\t' - 'host-192-168-0-2.openstacklocal host-192-168-0-2\n' + 'host-192-168-0-2.openstacklocal. host-192-168-0-2\n' 'fdca:3ba5:a17a:4ba3::2\t' - 'host-fdca-3ba5-a17a-4ba3--2.openstacklocal ' + 'host-fdca-3ba5-a17a-4ba3--2.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--2\n' - '192.168.0.3\thost-192-168-0-3.openstacklocal ' + '192.168.0.3\thost-192-168-0-3.openstacklocal. ' 'host-192-168-0-3\n' 'fdca:3ba5:a17a:4ba3::3\t' - 'host-fdca-3ba5-a17a-4ba3--3.openstacklocal ' + 'host-fdca-3ba5-a17a-4ba3--3.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--3\n' '192.168.0.1\t' - 'host-192-168-0-1.openstacklocal ' + 'host-192-168-0-1.openstacklocal. ' 'host-192-168-0-1\n' ).lstrip() exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts' @@ -1724,11 +1781,11 @@ class TestDnsmasq(TestBase): def test_only_populates_dhcp_enabled_subnets(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' - exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' + exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' - '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal,' + '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' - '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,' + '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) dm._output_hosts_file() @@ -1737,13 +1794,13 @@ class TestDnsmasq(TestBase): def test_only_populates_dhcp_client_id(self): exp_host_name = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/host' - exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' + exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:0f:aa:bb:55,id:test5,' - 'host-192-168-0-5.openstacklocal,' + 'host-192-168-0-5.openstacklocal.,' '192.168.0.5\n' '00:00:0f:aa:bb:66,id:test6,' - 'host-192-168-0-6.openstacklocal,192.168.0.6,' + 'host-192-168-0-6.openstacklocal.,192.168.0.6,' 'set:ccccccccc-cccc-cccc-cccc-ccccccccc\n').lstrip() dm = self._get_dnsmasq(FakeV4NetworkClientId) @@ -1753,13 +1810,13 @@ class TestDnsmasq(TestBase): def test_only_populates_dhcp_enabled_subnet_on_a_network(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' - exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' + exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' - '00:00:f3:aa:bb:cc,host-192-168-0-3.openstacklocal,' + '00:00:f3:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' - '00:00:0f:aa:bb:cc,host-192-168-0-4.openstacklocal,' + '00:00:0f:aa:bb:cc,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' - '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,' + '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualNetworkSingleDHCP()) dm._output_hosts_file() @@ -1785,10 +1842,10 @@ class TestDnsmasq(TestBase): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ( '00:16:3e:c2:77:1d,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' - '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal,' + '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal.,' '192.168.0.3,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' '00:00:0f:rr:rr:rr,' - 'host-192-168-0-1.openstacklocal,192.168.0.1\n').lstrip() + 'host-192-168-0-1.openstacklocal.,192.168.0.1\n').lstrip() exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ( 'tag:tag0,option6:domain-search,openstacklocal\n' @@ -1829,3 +1886,143 @@ class TestDnsmasq(TestBase): self.conf.set_override('enable_metadata_network', True) self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4MetadataNetwork())) + + def test_should_force_metadata_returns_true(self): + self.conf.set_override("force_metadata", True) + self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(self.conf, + mock.ANY)) + + +class TestDeviceManager(TestConfBase): + + @mock.patch('neutron.agent.linux.dhcp.ip_lib') + @mock.patch('neutron.agent.linux.dhcp.common_utils.load_interface_driver') + def test_setup(self, load_interface_driver, ip_lib): + """Test new and existing cases of DeviceManager's DHCP port setup + logic. + """ + + # Create DeviceManager. + self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', + default=False)) + plugin = mock.Mock() + mgr = dhcp.DeviceManager(self.conf, plugin) + load_interface_driver.assert_called_with(self.conf) + + # Setup with no existing DHCP port - expect a new DHCP port to + # be created. + network = FakeDeviceManagerNetwork() + network.tenant_id = 'Tenant A' + + def mock_create(dict): + port = dhcp.DictModel(dict['port']) + port.id = 'abcd-123456789' + port.mac_address = '00-12-34-56-78-90' + port.fixed_ips = [ + dhcp.DictModel({'subnet_id': ip['subnet_id'], + 'ip_address': 'unique-IP-address'}) + for ip in port.fixed_ips + ] + return port + + plugin.create_dhcp_port.side_effect = mock_create + mgr.driver.get_device_name.return_value = 'ns-XXX' + ip_lib.ensure_device_is_ready.return_value = True + mgr.setup(network) + plugin.create_dhcp_port.assert_called_with(mock.ANY) + + mgr.driver.init_l3.assert_called_with('ns-XXX', + mock.ANY, + namespace='qdhcp-ns') + cidrs = set(mgr.driver.init_l3.call_args[0][1]) + self.assertEqual(cidrs, set(['unique-IP-address/24', + 'unique-IP-address/64'])) + + # Now call setup again. This time we go through the existing + # port code path, and the driver's init_l3 method is called + # again. + plugin.create_dhcp_port.reset_mock() + mgr.driver.init_l3.reset_mock() + mgr.setup(network) + mgr.driver.init_l3.assert_called_with('ns-XXX', + mock.ANY, + namespace='qdhcp-ns') + cidrs = set(mgr.driver.init_l3.call_args[0][1]) + self.assertEqual(cidrs, set(['unique-IP-address/24', + 'unique-IP-address/64'])) + self.assertFalse(plugin.create_dhcp_port.called) + + @mock.patch('neutron.agent.linux.dhcp.ip_lib') + @mock.patch('neutron.agent.linux.dhcp.common_utils.load_interface_driver') + def test_setup_reserved(self, load_interface_driver, ip_lib): + """Test reserved port case of DeviceManager's DHCP port setup + logic. + """ + + # Create DeviceManager. + self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', + default=False)) + plugin = mock.Mock() + mgr = dhcp.DeviceManager(self.conf, plugin) + load_interface_driver.assert_called_with(self.conf) + + # Setup with a reserved DHCP port. + network = FakeDualNetworkReserved() + network.tenant_id = 'Tenant A' + reserved_port = network.ports[-1] + + def mock_update(port_id, dict): + port = reserved_port + port.network_id = dict['port']['network_id'] + port.device_id = dict['port']['device_id'] + return port + + plugin.update_dhcp_port.side_effect = mock_update + mgr.driver.get_device_name.return_value = 'ns-XXX' + ip_lib.ensure_device_is_ready.return_value = True + mgr.setup(network) + plugin.update_dhcp_port.assert_called_with(reserved_port.id, mock.ANY) + + mgr.driver.init_l3.assert_called_with('ns-XXX', + ['192.168.0.6/24'], + namespace='qdhcp-ns') + + @mock.patch('neutron.agent.linux.dhcp.ip_lib') + @mock.patch('neutron.agent.linux.dhcp.common_utils.load_interface_driver') + def test_setup_reserved_2(self, load_interface_driver, ip_lib): + """Test scenario where a network has two reserved ports, and + update_dhcp_port fails for the first of those. + """ + + # Create DeviceManager. + self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', + default=False)) + plugin = mock.Mock() + mgr = dhcp.DeviceManager(self.conf, plugin) + load_interface_driver.assert_called_with(self.conf) + + # Setup with a reserved DHCP port. + network = FakeDualNetworkReserved2() + network.tenant_id = 'Tenant A' + reserved_port_1 = network.ports[-2] + reserved_port_2 = network.ports[-1] + + def mock_update(port_id, dict): + if port_id == reserved_port_1.id: + return None + + port = reserved_port_2 + port.network_id = dict['port']['network_id'] + port.device_id = dict['port']['device_id'] + return port + + plugin.update_dhcp_port.side_effect = mock_update + mgr.driver.get_device_name.return_value = 'ns-XXX' + ip_lib.ensure_device_is_ready.return_value = True + mgr.setup(network) + plugin.update_dhcp_port.assert_called_with(reserved_port_2.id, + mock.ANY) + + mgr.driver.init_l3.assert_called_with('ns-XXX', + ['192.168.0.6/24'], + namespace='qdhcp-ns') diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py index a46354a1a5c..ffa0f6e0636 100644 --- a/neutron/tests/unit/agent/linux/test_interface.py +++ b/neutron/tests/unit/agent/linux/test_interface.py @@ -15,6 +15,7 @@ import mock from oslo_utils import uuidutils +import testtools from neutron.agent.common import config from neutron.agent.common import ovs_lib @@ -249,6 +250,85 @@ class TestABCDriver(TestBase): namespace=ns) self.assertFalse(self.ip_dev().addr.add.called) + def test_add_ipv6_addr(self): + device_name = 'tap0' + cidr = '2001:db8::/64' + ns = '12345678-1234-5678-90ab-ba0987654321' + bc = BaseChild(self.conf) + + bc.add_ipv6_addr(device_name, cidr, ns) + + self.ip_dev.assert_has_calls( + [mock.call(device_name, namespace=ns), + mock.call().addr.add(cidr, 'global')]) + + def test_delete_ipv6_addr(self): + device_name = 'tap0' + cidr = '2001:db8::/64' + ns = '12345678-1234-5678-90ab-ba0987654321' + bc = BaseChild(self.conf) + + bc.delete_ipv6_addr(device_name, cidr, ns) + + self.ip_dev.assert_has_calls( + [mock.call(device_name, namespace=ns), + mock.call().delete_addr_and_conntrack_state(cidr)]) + + def test_delete_ipv6_addr_with_prefix(self): + device_name = 'tap0' + prefix = '2001:db8::/48' + in_cidr = '2001:db8::/64' + out_cidr = '2001:db7::/64' + ns = '12345678-1234-5678-90ab-ba0987654321' + in_addresses = [dict(scope='global', + dynamic=False, + cidr=in_cidr)] + out_addresses = [dict(scope='global', + dynamic=False, + cidr=out_cidr)] + # Initially set the address list to be empty + self.ip_dev().addr.list = mock.Mock(return_value=[]) + + bc = BaseChild(self.conf) + + # Call delete_v6addr_with_prefix when the address list is empty + bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) + # Assert that delete isn't called + self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) + + # Set the address list to contain only an address outside of the range + # of the given prefix + self.ip_dev().addr.list = mock.Mock(return_value=out_addresses) + bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) + # Assert that delete isn't called + self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) + + # Set the address list to contain only an address inside of the range + # of the given prefix + self.ip_dev().addr.list = mock.Mock(return_value=in_addresses) + bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) + # Assert that delete is called + self.ip_dev.assert_has_calls( + [mock.call(device_name, namespace=ns), + mock.call().addr.list(scope='global', filters=['permanent']), + mock.call().delete_addr_and_conntrack_state(in_cidr)]) + + def test_get_ipv6_llas(self): + ns = '12345678-1234-5678-90ab-ba0987654321' + addresses = [dict(scope='link', + dynamic=False, + cidr='fe80:cafe::/64')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + device_name = self.ip_dev().name + bc = BaseChild(self.conf) + + llas = bc.get_ipv6_llas(device_name, ns) + + self.assertEqual(addresses, llas) + self.ip_dev.assert_has_calls( + [mock.call(device_name, namespace=ns), + mock.call().addr.list(scope='link', ip_version=6)]) + class TestOVSInterfaceDriver(TestBase): @@ -335,6 +415,13 @@ class TestOVSInterfaceDriver(TestBase): self.conf.set_override('network_device_mtu', 9000) self.assertEqual(self.conf.network_device_mtu, 9000) + def test_validate_min_ipv6_mtu(self): + self.conf.set_override('network_device_mtu', 1200) + with mock.patch('neutron.common.ipv6_utils.is_enabled') as ipv6_status: + with testtools.ExpectedException(SystemExit): + ipv6_status.return_value = True + BaseChild(self.conf) + def test_plug_mtu(self): self.conf.set_override('network_device_mtu', 9000) self._test_plug([mock.call().device().link.set_mtu(9000)]) diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index 87a2a82274c..81e310d011a 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -313,6 +313,14 @@ class TestIpWrapper(base.BaseTestCase): run_as_root=True, namespace=None, log_fail_as_error=True) + def test_add_dummy(self): + ip_lib.IPWrapper().add_dummy('dummy0') + self.execute.assert_called_once_with([], 'link', + ('add', 'dummy0', + 'type', 'dummy'), + run_as_root=True, namespace=None, + log_fail_as_error=True) + def test_get_device(self): dev = ip_lib.IPWrapper(namespace='ns').device('eth0') self.assertEqual(dev.namespace, 'ns') diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py index 8c9b9e2a4bd..61494d851d7 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_firewall.py +++ b/neutron/tests/unit/agent/linux/test_iptables_firewall.py @@ -18,6 +18,7 @@ import copy import mock from oslo_config import cfg import six +import testtools from neutron.agent.common import config as a_cfg from neutron.agent.linux import ipset_manager @@ -41,6 +42,27 @@ OTHER_SGID = 'other_sgid' _IPv6 = constants.IPv6 _IPv4 = constants.IPv4 +RAW_TABLE_OUTPUT = """ +# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015 +*raw +:PREROUTING ACCEPT [11561:3470468] +:OUTPUT ACCEPT [11504:4064044] +:neutron-openvswi-OUTPUT - [0:0] +:neutron-openvswi-PREROUTING - [0:0] +-A PREROUTING -j neutron-openvswi-PREROUTING + -A OUTPUT -j neutron-openvswi-OUTPUT +-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9 +-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9 +COMMIT +# Completed on Fri Jul 31 16:13:28 2015 +""" # noqa + class BaseIptablesFirewallTestCase(base.BaseTestCase): def setUp(self): @@ -65,6 +87,8 @@ class BaseIptablesFirewallTestCase(base.BaseTestCase): } iptables_cls.return_value = self.iptables_inst + self.iptables_inst.get_rules_for_table.return_value = ( + RAW_TABLE_OUTPUT.splitlines()) self.firewall = iptables_firewall.IptablesFirewallDriver() self.firewall.iptables = self.iptables_inst @@ -1030,7 +1054,6 @@ class IptablesFirewallTestCase(BaseIptablesFirewallTestCase): def _test_remove_conntrack_entries(self, ethertype, protocol, direction): port = self._fake_port() - port['zone_id'] = 1 port['security_groups'] = 'fake_sg_id' self.firewall.filtered_ports[port['device']] = port self.firewall.updated_rule_sg_ids = set(['fake_sg_id']) @@ -1076,7 +1099,6 @@ class IptablesFirewallTestCase(BaseIptablesFirewallTestCase): def test_remove_conntrack_entries_for_port_sec_group_change(self): port = self._fake_port() - port['zone_id'] = 1 port['security_groups'] = ['fake_sg_id'] self.firewall.filtered_ports[port['device']] = port self.firewall.updated_sg_members = set(['tapfake_dev']) @@ -1620,20 +1642,12 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): self.assertEqual(sg_ids, self.firewall._get_sg_ids_set_for_ports(ports)) - def test_clear_sg_members(self): - self.firewall.sg_members = self._fake_sg_members( - sg_ids=[FAKE_SGID, OTHER_SGID]) - self.firewall._clear_sg_members(_IPv4, [OTHER_SGID]) - - self.assertEqual(0, len(self.firewall.sg_members[OTHER_SGID][_IPv4])) - - def test_remove_unused_sg_members(self): + def test_remove_sg_members(self): self.firewall.sg_members = self._fake_sg_members([FAKE_SGID, OTHER_SGID]) - self.firewall.sg_members[FAKE_SGID][_IPv4] = [] - self.firewall.sg_members[FAKE_SGID][_IPv6] = [] - self.firewall.sg_members[OTHER_SGID][_IPv6] = [] - self.firewall._remove_unused_sg_members() + remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]), + _IPv6: set([FAKE_SGID, OTHER_SGID])} + self.firewall._remove_sg_members(remote_sgs_to_remove) self.assertIn(OTHER_SGID, self.firewall.sg_members) self.assertNotIn(FAKE_SGID, self.firewall.sg_members) @@ -1652,13 +1666,26 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): self.assertNotIn(OTHER_SGID, self.firewall.sg_rules) def test_remove_unused_security_group_info(self): - self._setup_fake_firewall_members_and_rules(self.firewall) - # no filtered ports in 'fake_sgid', so all rules and members - # are not needed and we expect them to be cleaned up - self.firewall.prepare_port_filter(self._fake_port(OTHER_SGID)) + self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} + self.firewall.pre_sg_members = self.firewall.sg_members + self.firewall.sg_rules = self._fake_sg_rules( + remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]}) + self.firewall.pre_sg_rules = self.firewall.sg_rules + port = self._fake_port() + self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._remove_unused_security_group_info() + self.assertNotIn(OTHER_SGID, self.firewall.sg_members) - self.assertNotIn(FAKE_SGID, self.firewall.sg_members) + def test_not_remove_used_security_group_info(self): + self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} + self.firewall.pre_sg_members = self.firewall.sg_members + self.firewall.sg_rules = self._fake_sg_rules( + remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]}) + self.firewall.pre_sg_rules = self.firewall.sg_rules + port = self._fake_port() + self.firewall.filtered_ports['tapfake_dev'] = port + self.firewall._remove_unused_security_group_info() + self.assertIn(OTHER_SGID, self.firewall.sg_members) def test_remove_all_unused_info(self): self._setup_fake_firewall_members_and_rules(self.firewall) @@ -1802,3 +1829,46 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): self.firewall._update_ipset_members(sg_info) calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])] self.firewall.ipset.assert_has_calls(calls) + + +class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase): + + def setUp(self): + super(OVSHybridIptablesFirewallTestCase, self).setUp() + self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver() + + def test__populate_initial_zone_map(self): + expected = {'61634509-31': 2, '8f46cf18-12': 9, + '95c24827-02': 2, 'e804433b-61': 1} + self.assertEqual(expected, self.firewall._device_zone_map) + + def test__generate_device_zone(self): + # inital data has 1, 2, and 9 in use. + # we fill from top up first. + self.assertEqual(10, self.firewall._generate_device_zone('test')) + + # once it's maxed out, it scans for gaps + self.firewall._device_zone_map['someport'] = ( + iptables_firewall.MAX_CONNTRACK_ZONES) + for i in range(3, 9): + self.assertEqual(i, self.firewall._generate_device_zone(i)) + + # 9 and 10 are taken so next should be 11 + self.assertEqual(11, self.firewall._generate_device_zone('p11')) + + # take out zone 1 and make sure it's selected + self.firewall._device_zone_map.pop('e804433b-61') + self.assertEqual(1, self.firewall._generate_device_zone('p1')) + + # fill it up and then make sure an extra throws an error + for i in range(1, 65536): + self.firewall._device_zone_map['dev-%s' % i] = i + with testtools.ExpectedException(RuntimeError): + self.firewall._find_open_zone() + + def test_get_device_zone(self): + # calling get_device_zone should clear out all of the other entries + # since they aren't in the filtered ports list + self.assertEqual(1, self.firewall.get_device_zone('12345678901234567')) + # should have been truncated to 11 chars + self.assertEqual({'12345678901': 1}, self.firewall._device_zone_map) diff --git a/neutron/tests/unit/agent/linux/test_pd.py b/neutron/tests/unit/agent/linux/test_pd.py new file mode 100644 index 00000000000..e12106727dd --- /dev/null +++ b/neutron/tests/unit/agent/linux/test_pd.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.linux import pd +from neutron.tests import base as tests_base + + +class FakeRouter(object): + def __init__(self, router_id): + self.router_id = router_id + + +class TestPrefixDelegation(tests_base.DietTestCase): + def test_remove_router(self): + l3_agent = mock.Mock() + router_id = 1 + l3_agent.pd.routers = {router_id: pd.get_router_entry(None)} + pd.remove_router(None, None, l3_agent, router=FakeRouter(router_id)) + self.assertTrue(l3_agent.pd.delete_router_pd.called) + self.assertEqual({}, l3_agent.pd.routers) diff --git a/neutron/tests/unit/agent/linux/test_utils.py b/neutron/tests/unit/agent/linux/test_utils.py index 9a2e89ffa35..7476050c66b 100644 --- a/neutron/tests/unit/agent/linux/test_utils.py +++ b/neutron/tests/unit/agent/linux/test_utils.py @@ -15,6 +15,7 @@ import socket import mock +import six import testtools from neutron.agent.linux import utils @@ -107,6 +108,55 @@ class AgentUtilsExecuteTest(base.BaseTestCase): ['ls'], log_fail_as_error=False) self.assertFalse(log.error.called) + def test_encode_process_input(self): + str_idata = "%s\n" % self.test_file[:-1] + str_odata = "%s\n" % self.test_file + if six.PY3: + bytes_idata = str_idata.encode(encoding='utf-8') + bytes_odata = str_odata.encode(encoding='utf-8') + self.mock_popen.return_value = [bytes_odata, b''] + result = utils.execute(['cat'], process_input=str_idata) + self.mock_popen.assert_called_once_with(bytes_idata) + self.assertEqual(str_odata, result) + else: + self.mock_popen.return_value = [str_odata, ''] + result = utils.execute(['cat'], process_input=str_idata) + self.mock_popen.assert_called_once_with(str_idata) + self.assertEqual(str_odata, result) + + def test_return_str_data(self): + str_data = "%s\n" % self.test_file + self.mock_popen.return_value = [str_data, ''] + result = utils.execute(['ls', self.test_file], return_stderr=True) + self.assertEqual((str_data, ''), result) + + def test_raise_unicodeerror_in_decoding_out_data(self): + class m_bytes(bytes): + def decode(self, encoding=None): + raise UnicodeError + + err_data = 'UnicodeError' + bytes_err_data = b'UnicodeError' + out_data = "%s\n" % self.test_file + bytes_out_data = m_bytes(out_data.encode(encoding='utf-8')) + if six.PY3: + self.mock_popen.return_value = [bytes_out_data, bytes_err_data] + result = utils.execute(['ls', self.test_file], + return_stderr=True) + self.assertEqual((bytes_out_data, err_data), result) + + +class AgentUtilsExecuteEncodeTest(base.BaseTestCase): + def setUp(self): + super(AgentUtilsExecuteEncodeTest, self).setUp() + self.test_file = self.get_temp_file_path('test_execute.tmp') + open(self.test_file, 'w').close() + + def test_decode_return_data(self): + str_data = "%s\n" % self.test_file + result = utils.execute(['ls', self.test_file], return_stderr=True) + self.assertEqual((str_data, ''), result) + class AgentUtilsGetInterfaceMAC(base.BaseTestCase): def test_get_interface_mac(self): @@ -120,22 +170,33 @@ class AgentUtilsGetInterfaceMAC(base.BaseTestCase): class AgentUtilsReplaceFile(base.BaseTestCase): - def test_replace_file(self): + def _test_replace_file_helper(self, explicit_perms=None): # make file to replace with mock.patch('tempfile.NamedTemporaryFile') as ntf: ntf.return_value.name = '/baz' with mock.patch('os.chmod') as chmod: with mock.patch('os.rename') as rename: - utils.replace_file('/foo', 'bar') + if explicit_perms is None: + expected_perms = 0o644 + utils.replace_file('/foo', 'bar') + else: + expected_perms = explicit_perms + utils.replace_file('/foo', 'bar', explicit_perms) expected = [mock.call('w+', dir='/', delete=False), mock.call().write('bar'), mock.call().close()] ntf.assert_has_calls(expected) - chmod.assert_called_once_with('/baz', 0o644) + chmod.assert_called_once_with('/baz', expected_perms) rename.assert_called_once_with('/baz', '/foo') + def test_replace_file_with_default_perms(self): + self._test_replace_file_helper() + + def test_replace_file_with_0o600_perms(self): + self._test_replace_file_helper(0o600) + class TestFindChildPids(base.BaseTestCase): diff --git a/neutron/tests/unit/agent/metadata/test_namespace_proxy.py b/neutron/tests/unit/agent/metadata/test_namespace_proxy.py index 8cf8d1415ff..8403ecaf490 100644 --- a/neutron/tests/unit/agent/metadata/test_namespace_proxy.py +++ b/neutron/tests/unit/agent/metadata/test_namespace_proxy.py @@ -98,7 +98,7 @@ class TestNetworkMetadataProxyHandler(base.BaseTestCase): ) self.assertEqual(retval.headers['Content-Type'], 'text/plain') - self.assertEqual(retval.body, 'content') + self.assertEqual(b'content', retval.body) def test_proxy_request_network_200(self): self.handler.network_id = 'network_id' @@ -129,7 +129,7 @@ class TestNetworkMetadataProxyHandler(base.BaseTestCase): self.assertEqual(retval.headers['Content-Type'], 'application/json') - self.assertEqual(retval.body, '{}') + self.assertEqual(b'{}', retval.body) def _test_proxy_request_network_4xx(self, status, method, expected): self.handler.network_id = 'network_id' diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 1cc7afd594d..3b34ab0e07c 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1706,8 +1706,8 @@ IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager -j CT --zone 1 [0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port1)s -j CT --zone 1 [0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_%(port2)s \ --j CT --zone 1 -[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port2)s -j CT --zone 1 +-j CT --zone 2 +[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port2)s -j CT --zone 2 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG @@ -2609,9 +2609,9 @@ class TestSecurityGroupAgentWithIptables(base.BaseTestCase): value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS) value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS) value = value.replace('\n', '\\n') - value = value.replace('[', '\[') - value = value.replace(']', '\]') - value = value.replace('*', '\*') + value = value.replace('[', r'\[') + value = value.replace(']', r'\]') + value = value.replace('*', r'\*') return value def _register_mock_call(self, *args, **kwargs): diff --git a/neutron/tests/unit/api/rpc/callbacks/__init__.py b/neutron/tests/unit/api/rpc/callbacks/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py b/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py b/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py new file mode 100644 index 00000000000..d07b49c2fd5 --- /dev/null +++ b/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks.consumer import registry +from neutron.tests import base + + +class ConsumerRegistryTestCase(base.BaseTestCase): + + def setUp(self): + super(ConsumerRegistryTestCase, self).setUp() + + def test__get_manager_is_singleton(self): + self.assertIs(registry._get_manager(), registry._get_manager()) + + @mock.patch.object(registry, '_get_manager') + def test_subscribe(self, manager_mock): + callback = lambda: None + registry.subscribe(callback, 'TYPE') + manager_mock().register.assert_called_with(callback, 'TYPE') + + @mock.patch.object(registry, '_get_manager') + def test_unsubscribe(self, manager_mock): + callback = lambda: None + registry.unsubscribe(callback, 'TYPE') + manager_mock().unregister.assert_called_with(callback, 'TYPE') + + @mock.patch.object(registry, '_get_manager') + def test_clear(self, manager_mock): + registry.clear() + manager_mock().clear.assert_called_with() + + @mock.patch.object(registry, '_get_manager') + def test_push(self, manager_mock): + resource_type_ = object() + resource_ = object() + event_type_ = object() + + callback1 = mock.Mock() + callback2 = mock.Mock() + callbacks = {callback1, callback2} + manager_mock().get_callbacks.return_value = callbacks + registry.push(resource_type_, resource_, event_type_) + for callback in callbacks: + callback.assert_called_with(resource_, event_type_) diff --git a/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py b/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py b/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py new file mode 100644 index 00000000000..5b7b049c60a --- /dev/null +++ b/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.rpc.callbacks import exceptions +from neutron.api.rpc.callbacks.producer import registry +from neutron.api.rpc.callbacks import resources +from neutron.objects.qos import policy +from neutron.tests.unit.services.qos import base + + +class ProducerRegistryTestCase(base.BaseQosTestCase): + + def test_pull_returns_callback_result(self): + policy_obj = policy.QosPolicy(context=None) + + def _fake_policy_cb(*args, **kwargs): + return policy_obj + + registry.provide(_fake_policy_cb, resources.QOS_POLICY) + + self.assertEqual( + policy_obj, + registry.pull(resources.QOS_POLICY, 'fake_id')) + + def test_pull_does_not_raise_on_none(self): + def _none_cb(*args, **kwargs): + pass + + registry.provide(_none_cb, resources.QOS_POLICY) + + obj = registry.pull(resources.QOS_POLICY, 'fake_id') + self.assertIsNone(obj) + + def test_pull_raises_on_wrong_object_type(self): + def _wrong_type_cb(*args, **kwargs): + return object() + + registry.provide(_wrong_type_cb, resources.QOS_POLICY) + + self.assertRaises( + exceptions.CallbackWrongResourceType, + registry.pull, resources.QOS_POLICY, 'fake_id') + + def test_pull_raises_on_callback_not_found(self): + self.assertRaises( + exceptions.CallbackNotFound, + registry.pull, resources.QOS_POLICY, 'fake_id') + + def test__get_manager_is_singleton(self): + self.assertIs(registry._get_manager(), registry._get_manager()) + + def test_unprovide(self): + def _fake_policy_cb(*args, **kwargs): + pass + + registry.provide(_fake_policy_cb, resources.QOS_POLICY) + registry.unprovide(_fake_policy_cb, resources.QOS_POLICY) + + self.assertRaises( + exceptions.CallbackNotFound, + registry.pull, resources.QOS_POLICY, 'fake_id') + + def test_clear_unprovides_all_producers(self): + def _fake_policy_cb(*args, **kwargs): + pass + + registry.provide(_fake_policy_cb, resources.QOS_POLICY) + registry.clear() + + self.assertRaises( + exceptions.CallbackNotFound, + registry.pull, resources.QOS_POLICY, 'fake_id') diff --git a/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py b/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py new file mode 100644 index 00000000000..79d5ed55c5a --- /dev/null +++ b/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks import exceptions as rpc_exc +from neutron.api.rpc.callbacks import resource_manager +from neutron.callbacks import exceptions as exceptions +from neutron.tests.unit.services.qos import base + +IS_VALID_RESOURCE_TYPE = ( + 'neutron.api.rpc.callbacks.resources.is_valid_resource_type') + + +class ResourceCallbacksManagerTestCaseMixin(object): + + def test_register_fails_on_invalid_type(self): + self.assertRaises( + exceptions.Invalid, + self.mgr.register, lambda: None, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_clear_unregisters_all_callbacks(self, *mocks): + self.mgr.register(lambda: None, 'TYPE1') + self.mgr.register(lambda: None, 'TYPE2') + self.mgr.clear() + self.assertEqual([], self.mgr.get_subscribed_types()) + + def test_unregister_fails_on_invalid_type(self): + self.assertRaises( + exceptions.Invalid, + self.mgr.unregister, lambda: None, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_unregister_fails_on_unregistered_callback(self, *mocks): + self.assertRaises( + rpc_exc.CallbackNotFound, + self.mgr.unregister, lambda: None, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_unregister_unregisters_callback(self, *mocks): + callback = lambda: None + self.mgr.register(callback, 'TYPE') + self.mgr.unregister(callback, 'TYPE') + self.assertEqual([], self.mgr.get_subscribed_types()) + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test___init___does_not_reset_callbacks(self, *mocks): + callback = lambda: None + self.mgr.register(callback, 'TYPE') + resource_manager.ProducerResourceCallbacksManager() + self.assertEqual(['TYPE'], self.mgr.get_subscribed_types()) + + +class ProducerResourceCallbacksManagerTestCase( + base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): + + def setUp(self): + super(ProducerResourceCallbacksManagerTestCase, self).setUp() + self.mgr = self.prod_mgr + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_register_registers_callback(self, *mocks): + callback = lambda: None + self.mgr.register(callback, 'TYPE') + self.assertEqual(callback, self.mgr.get_callback('TYPE')) + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_register_fails_on_multiple_calls(self, *mocks): + self.mgr.register(lambda: None, 'TYPE') + self.assertRaises( + rpc_exc.CallbacksMaxLimitReached, + self.mgr.register, lambda: None, 'TYPE') + + def test_get_callback_fails_on_invalid_type(self): + self.assertRaises( + exceptions.Invalid, + self.mgr.get_callback, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_get_callback_fails_on_unregistered_callback( + self, *mocks): + self.assertRaises( + rpc_exc.CallbackNotFound, + self.mgr.get_callback, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_get_callback_returns_proper_callback(self, *mocks): + callback1 = lambda: None + callback2 = lambda: None + self.mgr.register(callback1, 'TYPE1') + self.mgr.register(callback2, 'TYPE2') + self.assertEqual(callback1, self.mgr.get_callback('TYPE1')) + self.assertEqual(callback2, self.mgr.get_callback('TYPE2')) + + +class ConsumerResourceCallbacksManagerTestCase( + base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): + + def setUp(self): + super(ConsumerResourceCallbacksManagerTestCase, self).setUp() + self.mgr = self.cons_mgr + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_register_registers_callback(self, *mocks): + callback = lambda: None + self.mgr.register(callback, 'TYPE') + self.assertEqual({callback}, self.mgr.get_callbacks('TYPE')) + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_register_succeeds_on_multiple_calls(self, *mocks): + callback1 = lambda: None + callback2 = lambda: None + self.mgr.register(callback1, 'TYPE') + self.mgr.register(callback2, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_get_callbacks_fails_on_unregistered_callback( + self, *mocks): + self.assertRaises( + rpc_exc.CallbackNotFound, + self.mgr.get_callbacks, 'TYPE') + + @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) + def test_get_callbacks_returns_proper_callbacks(self, *mocks): + callback1 = lambda: None + callback2 = lambda: None + self.mgr.register(callback1, 'TYPE1') + self.mgr.register(callback2, 'TYPE2') + self.assertEqual(set([callback1]), self.mgr.get_callbacks('TYPE1')) + self.assertEqual(set([callback2]), self.mgr.get_callbacks('TYPE2')) diff --git a/neutron/tests/unit/api/rpc/callbacks/test_resources.py b/neutron/tests/unit/api/rpc/callbacks/test_resources.py new file mode 100644 index 00000000000..78d8e5d825b --- /dev/null +++ b/neutron/tests/unit/api/rpc/callbacks/test_resources.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.rpc.callbacks import resources +from neutron.objects.qos import policy +from neutron.tests import base + + +class GetResourceTypeTestCase(base.BaseTestCase): + + def test_get_resource_type_none(self): + self.assertIsNone(resources.get_resource_type(None)) + + def test_get_resource_type_wrong_type(self): + self.assertIsNone(resources.get_resource_type(object())) + + def test_get_resource_type(self): + # we could use any other registered NeutronObject type here + self.assertEqual(policy.QosPolicy.obj_name(), + resources.get_resource_type(policy.QosPolicy())) + + +class IsValidResourceTypeTestCase(base.BaseTestCase): + + def test_known_type(self): + # it could be any other NeutronObject, assuming it's known to RPC + # callbacks + self.assertTrue(resources.is_valid_resource_type( + policy.QosPolicy.obj_name())) + + def test_unknown_type(self): + self.assertFalse( + resources.is_valid_resource_type('unknown-resource-type')) + + +class GetResourceClsTestCase(base.BaseTestCase): + + def test_known_type(self): + # it could be any other NeutronObject, assuming it's known to RPC + # callbacks + self.assertEqual(policy.QosPolicy, + resources.get_resource_cls(resources.QOS_POLICY)) + + def test_unknown_type(self): + self.assertIsNone(resources.get_resource_cls('unknown-resource-type')) diff --git a/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py index a06fd2a0dd5..d57632139f6 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py @@ -36,6 +36,8 @@ class TestDhcpRpcCallback(base.BaseTestCase): set_dirty_p = mock.patch('neutron.quota.resource_registry.' 'set_resources_dirty') self.mock_set_dirty = set_dirty_p.start() + self.utils_p = mock.patch('neutron.plugins.common.utils.create_port') + self.utils = self.utils_p.start() def test_get_active_networks(self): plugin_retval = [dict(id='a'), dict(id='b')] @@ -79,6 +81,7 @@ class TestDhcpRpcCallback(base.BaseTestCase): 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] } self.plugin.create_port.side_effect = exc + self.utils.side_effect = exc self.assertIsNone(self.callbacks._port_action(self.plugin, mock.Mock(), {'port': port}, @@ -87,7 +90,10 @@ class TestDhcpRpcCallback(base.BaseTestCase): def _test__port_action_good_action(self, action, port, expected_call): self.callbacks._port_action(self.plugin, mock.Mock(), port, action) - self.plugin.assert_has_calls([expected_call]) + if action == 'create_port': + self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) + else: + self.plugin.assert_has_calls([expected_call]) def test_port_action_create_port(self): self._test__port_action_good_action( diff --git a/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py index 5be1121fcd4..0931604db7b 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py @@ -47,7 +47,9 @@ class DVRServerRpcApiTestCase(base.BaseTestCase): host='foo_host', subnet='foo_subnet') def test_get_subnet_for_dvr(self): - self.rpc.get_subnet_for_dvr(self.ctxt, 'foo_subnet') + self.rpc.get_subnet_for_dvr( + self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_subnet_for_dvr', - subnet='foo_subnet') + subnet='foo_subnet', + fixed_ips='foo_fixed_ips') diff --git a/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py new file mode 100644 index 00000000000..68ec79d141b --- /dev/null +++ b/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py @@ -0,0 +1,65 @@ +# Copyright (c) 2015 Cisco Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from neutron.api.rpc.handlers import l3_rpc +from neutron.common import constants +from neutron import context +from neutron import manager +from neutron.tests.unit.db import test_db_base_plugin_v2 +from neutron.tests.unit import testlib_api + + +class TestL3RpcCallback(testlib_api.SqlTestCase): + + def setUp(self): + super(TestL3RpcCallback, self).setUp() + self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) + self.plugin = manager.NeutronManager.get_plugin() + self.ctx = context.get_admin_context() + cfg.CONF.set_override('default_ipv6_subnet_pool', + constants.IPV6_PD_POOL_ID) + self.callbacks = l3_rpc.L3RpcCallback() + self.network = self._prepare_network() + + def _prepare_network(self): + network = {'network': {'name': 'abc', + 'shared': False, + 'admin_state_up': True}} + return self.plugin.create_network(self.ctx, network) + + def _prepare_ipv6_pd_subnet(self): + subnet = {'subnet': {'network_id': self.network['id'], + 'cidr': None, + 'ip_version': 6, + 'name': 'ipv6_pd', + 'enable_dhcp': True, + 'host_routes': None, + 'dns_nameservers': None, + 'allocation_pools': None, + 'ipv6_ra_mode': constants.IPV6_SLAAC, + 'ipv6_address_mode': constants.IPV6_SLAAC}} + return self.plugin.create_subnet(self.ctx, subnet) + + def test_process_prefix_update(self): + subnet = self._prepare_ipv6_pd_subnet() + data = {subnet['id']: '2001:db8::/64'} + allocation_pools = [{'start': '2001:db8::2', + 'end': '2001:db8::ffff:ffff:ffff:ffff'}] + res = self.callbacks.process_prefix_update(self.ctx, subnets=data) + updated_subnet = res[0] + self.assertEqual(updated_subnet['cidr'], data[subnet['id']]) + self.assertEqual(updated_subnet['allocation_pools'], allocation_pools) diff --git a/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py new file mode 100755 index 00000000000..64d67dacff0 --- /dev/null +++ b/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py @@ -0,0 +1,222 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields +import testtools + +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.common import topics +from neutron import context +from neutron.objects import base as objects_base +from neutron.tests import base + + +def _create_test_dict(): + return {'id': 'uuid', + 'field': 'foo'} + + +def _create_test_resource(context=None): + resource_dict = _create_test_dict() + resource = FakeResource(context, **resource_dict) + resource.obj_reset_changes() + return resource + + +@obj_base.VersionedObjectRegistry.register +class FakeResource(objects_base.NeutronObject): + + fields = { + 'id': obj_fields.UUIDField(), + 'field': obj_fields.StringField() + } + + @classmethod + def get_objects(cls, context, **kwargs): + return list() + + +class ResourcesRpcBaseTestCase(base.BaseTestCase): + + def setUp(self): + super(ResourcesRpcBaseTestCase, self).setUp() + self.context = context.get_admin_context() + + +class _ValidateResourceTypeTestCase(base.BaseTestCase): + def setUp(self): + super(_ValidateResourceTypeTestCase, self).setUp() + self.is_valid_mock = mock.patch.object( + resources_rpc.resources, 'is_valid_resource_type').start() + + def test_valid_type(self): + self.is_valid_mock.return_value = True + resources_rpc._validate_resource_type('foo') + + def test_invalid_type(self): + self.is_valid_mock.return_value = False + with testtools.ExpectedException( + resources_rpc.InvalidResourceTypeClass): + resources_rpc._validate_resource_type('foo') + + +class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase): + + @mock.patch.object(resources_rpc, '_validate_resource_type') + def test_resource_type_versioned_topic(self, validate_mock): + obj_name = FakeResource.obj_name() + expected = topics.RESOURCE_TOPIC_PATTERN % { + 'resource_type': 'FakeResource', 'version': '1.0'} + with mock.patch.object(resources_rpc.resources, 'get_resource_cls', + return_value=FakeResource): + observed = resources_rpc.resource_type_versioned_topic(obj_name) + self.assertEqual(expected, observed) + + +class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase): + + def setUp(self): + super(ResourcesPullRpcApiTestCase, self).setUp() + mock.patch.object(resources_rpc, '_validate_resource_type').start() + mock.patch('neutron.api.rpc.callbacks.resources.get_resource_cls', + return_value=FakeResource).start() + self.rpc = resources_rpc.ResourcesPullRpcApi() + mock.patch.object(self.rpc, 'client').start() + self.cctxt_mock = self.rpc.client.prepare.return_value + + def test_is_singleton(self): + self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi()) + + def test_pull(self): + expected_obj = _create_test_resource(self.context) + resource_id = expected_obj.id + self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive() + + result = self.rpc.pull( + self.context, FakeResource.obj_name(), resource_id) + + self.cctxt_mock.call.assert_called_once_with( + self.context, 'pull', resource_type='FakeResource', + version=FakeResource.VERSION, resource_id=resource_id) + self.assertEqual(expected_obj, result) + + def test_pull_resource_not_found(self): + resource_dict = _create_test_dict() + resource_id = resource_dict['id'] + self.cctxt_mock.call.return_value = None + with testtools.ExpectedException(resources_rpc.ResourceNotFound): + self.rpc.pull(self.context, FakeResource.obj_name(), + resource_id) + + +class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase): + + def setUp(self): + super(ResourcesPullRpcCallbackTestCase, self).setUp() + self.callbacks = resources_rpc.ResourcesPullRpcCallback() + self.resource_obj = _create_test_resource(self.context) + + def test_pull(self): + resource_dict = _create_test_dict() + with mock.patch.object( + resources_rpc.prod_registry, 'pull', + return_value=self.resource_obj) as registry_mock: + primitive = self.callbacks.pull( + self.context, resource_type=FakeResource.obj_name(), + version=FakeResource.VERSION, + resource_id=self.resource_obj.id) + registry_mock.assert_called_once_with( + 'FakeResource', self.resource_obj.id, context=self.context) + self.assertEqual(resource_dict, + primitive['versioned_object.data']) + self.assertEqual(self.resource_obj.obj_to_primitive(), primitive) + + @mock.patch.object(FakeResource, 'obj_to_primitive') + def test_pull_no_backport_for_latest_version(self, to_prim_mock): + with mock.patch.object(resources_rpc.prod_registry, 'pull', + return_value=self.resource_obj): + self.callbacks.pull( + self.context, resource_type=FakeResource.obj_name(), + version=FakeResource.VERSION, + resource_id=self.resource_obj.id) + to_prim_mock.assert_called_with(target_version=None) + + @mock.patch.object(FakeResource, 'obj_to_primitive') + def test_pull_backports_to_older_version(self, to_prim_mock): + with mock.patch.object(resources_rpc.prod_registry, 'pull', + return_value=self.resource_obj): + self.callbacks.pull( + self.context, resource_type=FakeResource.obj_name(), + version='0.9', # less than initial version 1.0 + resource_id=self.resource_obj.id) + to_prim_mock.assert_called_with(target_version='0.9') + + +class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase): + + def setUp(self): + super(ResourcesPushRpcApiTestCase, self).setUp() + mock.patch.object(resources_rpc.n_rpc, 'get_client').start() + mock.patch.object(resources_rpc, '_validate_resource_type').start() + self.rpc = resources_rpc.ResourcesPushRpcApi() + self.cctxt_mock = self.rpc.client.prepare.return_value + self.resource_obj = _create_test_resource(self.context) + + def test__prepare_object_fanout_context(self): + expected_topic = topics.RESOURCE_TOPIC_PATTERN % { + 'resource_type': resources.get_resource_type(self.resource_obj), + 'version': self.resource_obj.VERSION} + + with mock.patch.object(resources_rpc.resources, 'get_resource_cls', + return_value=FakeResource): + observed = self.rpc._prepare_object_fanout_context( + self.resource_obj) + + self.rpc.client.prepare.assert_called_once_with( + fanout=True, topic=expected_topic) + self.assertEqual(self.cctxt_mock, observed) + + def test_pushy(self): + with mock.patch.object(resources_rpc.resources, 'get_resource_cls', + return_value=FakeResource): + self.rpc.push( + self.context, self.resource_obj, 'TYPE') + + self.cctxt_mock.cast.assert_called_once_with( + self.context, 'push', + resource=self.resource_obj.obj_to_primitive(), + event_type='TYPE') + + +class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase): + + def setUp(self): + super(ResourcesPushRpcCallbackTestCase, self).setUp() + mock.patch.object(resources_rpc, '_validate_resource_type').start() + mock.patch.object( + resources_rpc.resources, + 'get_resource_cls', return_value=FakeResource).start() + self.resource_obj = _create_test_resource(self.context) + self.resource_prim = self.resource_obj.obj_to_primitive() + self.callbacks = resources_rpc.ResourcesPushRpcCallback() + + @mock.patch.object(resources_rpc.cons_registry, 'push') + def test_push(self, reg_push_mock): + self.callbacks.push(self.context, self.resource_prim, 'TYPE') + reg_push_mock.assert_called_once_with(self.resource_obj.obj_name(), + self.resource_obj, 'TYPE') diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py index 19b9858da5b..0aacc316ba8 100644 --- a/neutron/tests/unit/api/test_extensions.py +++ b/neutron/tests/unit/api/test_extensions.py @@ -30,10 +30,8 @@ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import config from neutron.common import exceptions -from neutron.db import db_base_plugin_v2 from neutron import manager from neutron.plugins.common import constants -from neutron.plugins.ml2 import plugin as ml2_plugin from neutron import quota from neutron.tests import base from neutron.tests.unit.api.v2 import test_base @@ -60,7 +58,7 @@ class ExtensionsTestApp(wsgi.Router): super(ExtensionsTestApp, self).__init__(mapper) -class FakePluginWithExtension(db_base_plugin_v2.NeutronDbPluginV2): +class FakePluginWithExtension(object): """A fake plugin used only for extension testing in this file.""" supported_extension_aliases = ["FOXNSOX"] @@ -168,7 +166,7 @@ class ResourceExtensionTest(base.BaseTestCase): test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/tweedles") self.assertEqual(200, index_response.status_int) - self.assertEqual("resource index", index_response.body) + self.assertEqual(b"resource index", index_response.body) show_response = test_app.get("/tweedles/25266") self.assertEqual({'data': {'id': "25266"}}, show_response.json) @@ -365,7 +363,7 @@ class ActionExtensionTest(base.BaseTestCase): response = self.extension_app.post('/dummy_resources/1/action', req_body, content_type='application/json') - self.assertEqual("Tweedle Beetle Added.", response.body) + self.assertEqual(b"Tweedle Beetle Added.", response.body) def test_extended_action_for_deleting_extra_data(self): action_name = 'FOXNSOX:delete_tweedle' @@ -374,7 +372,7 @@ class ActionExtensionTest(base.BaseTestCase): response = self.extension_app.post("/dummy_resources/1/action", req_body, content_type='application/json') - self.assertEqual("Tweedle Bailey Deleted.", response.body) + self.assertEqual(b"Tweedle Bailey Deleted.", response.body) def test_returns_404_for_non_existent_action(self): non_existent_action = 'blah_action' @@ -418,7 +416,7 @@ class RequestExtensionTest(base.BaseTestCase): def extend_response_data(req, res): data = jsonutils.loads(res.body) data['FOXNSOX:extended_key'] = req.GET.get('extended_key') - res.body = jsonutils.dumps(data) + res.body = jsonutils.dumps(data).encode('utf-8') return res app = self._setup_app_with_request_handler(extend_response_data, 'GET') @@ -444,7 +442,7 @@ class RequestExtensionTest(base.BaseTestCase): def _update_handler(req, res): data = jsonutils.loads(res.body) data['uneditable'] = req.params['uneditable'] - res.body = jsonutils.dumps(data) + res.body = jsonutils.dumps(data).encode('utf-8') return res base_app = webtest.TestApp(setup_base_app(self)) @@ -736,8 +734,7 @@ class SimpleExtensionManager(object): return request_extensions -class ExtensionExtendedAttributeTestPlugin( - ml2_plugin.Ml2Plugin): +class ExtensionExtendedAttributeTestPlugin(object): supported_extension_aliases = [ 'ext-obj-test', "extended-ext-attr" @@ -778,7 +775,7 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase): ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, - {constants.CORE: ExtensionExtendedAttributeTestPlugin} + {constants.CORE: ExtensionExtendedAttributeTestPlugin()} ) ext_mgr.extend_resources("2.0", {}) extensions.PluginAwareExtensionManager._instance = ext_mgr diff --git a/neutron/tests/unit/api/v2/test_attributes.py b/neutron/tests/unit/api/v2/test_attributes.py index 512fc3022e7..df20fe14aed 100644 --- a/neutron/tests/unit/api/v2/test_attributes.py +++ b/neutron/tests/unit/api/v2/test_attributes.py @@ -19,6 +19,7 @@ import testtools import mock from neutron.api.v2 import attributes +from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.tests import base from neutron.tests import tools @@ -187,6 +188,10 @@ class TestAttributes(base.BaseTestCase): err_msg = "'%s' is not a valid MAC address" self.assertEqual(err_msg % mac_addr, msg) + for invalid_mac_addr in constants.INVALID_MAC_ADDRESSES: + msg = validator(invalid_mac_addr) + self.assertEqual(err_msg % invalid_mac_addr, msg) + mac_addr = "123" msg = validator(mac_addr) self.assertEqual(err_msg % mac_addr, msg) @@ -878,3 +883,90 @@ class TestConvertToList(base.BaseTestCase): def test_convert_to_list_non_iterable(self): for item in (True, False, 1, 1.2, object()): self.assertEqual([item], attributes.convert_to_list(item)) + + +class TestResDict(base.BaseTestCase): + class _MyException(Exception): + pass + _EXC_CLS = _MyException + + def _test_fill_default_value(self, attr_info, expected, res_dict): + attributes.fill_default_value(attr_info, res_dict) + self.assertEqual(expected, res_dict) + + def test_fill_default_value(self): + attr_info = { + 'key': { + 'allow_post': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + }, + } + self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'}) + self._test_fill_default_value( + attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {}) + + attr_info = { + 'key': { + 'allow_post': True, + }, + } + self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'}) + self.assertRaises(ValueError, self._test_fill_default_value, + attr_info, {'key': 'X'}, {}) + self.assertRaises(self._EXC_CLS, attributes.fill_default_value, + attr_info, {}, self._EXC_CLS) + attr_info = { + 'key': { + 'allow_post': False, + }, + } + self.assertRaises(ValueError, self._test_fill_default_value, + attr_info, {'key': 'X'}, {'key': 'X'}) + self._test_fill_default_value(attr_info, {}, {}) + self.assertRaises(self._EXC_CLS, attributes.fill_default_value, + attr_info, {'key': 'X'}, self._EXC_CLS) + + def _test_convert_value(self, attr_info, expected, res_dict): + attributes.convert_value(attr_info, res_dict) + self.assertEqual(expected, res_dict) + + def test_convert_value(self): + attr_info = { + 'key': { + }, + } + self._test_convert_value(attr_info, + {'key': attributes.ATTR_NOT_SPECIFIED}, + {'key': attributes.ATTR_NOT_SPECIFIED}) + self._test_convert_value(attr_info, {'key': 'X'}, {'key': 'X'}) + self._test_convert_value(attr_info, + {'other_key': 'X'}, {'other_key': 'X'}) + + attr_info = { + 'key': { + 'convert_to': attributes.convert_to_int, + }, + } + self._test_convert_value(attr_info, + {'key': attributes.ATTR_NOT_SPECIFIED}, + {'key': attributes.ATTR_NOT_SPECIFIED}) + self._test_convert_value(attr_info, {'key': 1}, {'key': '1'}) + self._test_convert_value(attr_info, {'key': 1}, {'key': 1}) + self.assertRaises(n_exc.InvalidInput, self._test_convert_value, + attr_info, {'key': 1}, {'key': 'a'}) + + attr_info = { + 'key': { + 'validate': {'type:uuid': None}, + }, + } + self._test_convert_value(attr_info, + {'key': attributes.ATTR_NOT_SPECIFIED}, + {'key': attributes.ATTR_NOT_SPECIFIED}) + uuid_str = '01234567-1234-1234-1234-1234567890ab' + self._test_convert_value(attr_info, + {'key': uuid_str}, {'key': uuid_str}) + self.assertRaises(ValueError, self._test_convert_value, + attr_info, {'key': 1}, {'key': 1}) + self.assertRaises(self._EXC_CLS, attributes.convert_value, + attr_info, {'key': 1}, self._EXC_CLS) diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py index 0ee9c2ec313..1491dd7ed68 100644 --- a/neutron/tests/unit/api/v2/test_base.py +++ b/neutron/tests/unit/api/v2/test_base.py @@ -1128,12 +1128,16 @@ class SubresourceTest(base.BaseTestCase): self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() - router.SUB_RESOURCES['dummy'] = { + api = router.APIRouter() + + SUB_RESOURCES = {} + RESOURCE_ATTRIBUTE_MAP = {} + SUB_RESOURCES['dummy'] = { 'collection_name': 'dummies', 'parent': {'collection_name': 'networks', 'member_name': 'network'} } - attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = { + RESOURCE_ATTRIBUTE_MAP['dummies'] = { 'foo': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, @@ -1142,11 +1146,33 @@ class SubresourceTest(base.BaseTestCase): 'required_by_policy': True, 'is_visible': True} } - api = router.APIRouter() + collection_name = SUB_RESOURCES['dummy'].get('collection_name') + resource_name = 'dummy' + parent = SUB_RESOURCES['dummy'].get('parent') + params = RESOURCE_ATTRIBUTE_MAP['dummies'] + member_actions = {'mactions': 'GET'} + _plugin = manager.NeutronManager.get_plugin() + controller = v2_base.create_resource(collection_name, resource_name, + _plugin, params, + member_actions=member_actions, + parent=parent, + allow_bulk=True, + allow_pagination=True, + allow_sorting=True) + + path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], + parent['member_name'], + collection_name) + mapper_kwargs = dict(controller=controller, + path_prefix=path_prefix) + api.map.collection(collection_name, resource_name, **mapper_kwargs) + api.map.resource(collection_name, collection_name, + controller=controller, + parent_resource=parent, + member=member_actions) self.api = webtest.TestApp(api) def tearDown(self): - router.SUB_RESOURCES = {} super(SubresourceTest, self).tearDown() def test_index_sub_resource(self): @@ -1210,6 +1236,16 @@ class SubresourceTest(base.BaseTestCase): dummy_id, network_id='id1') + def test_sub_resource_member_actions(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id, + action='mactions')) + instance.mactions.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1') + # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. @@ -1490,6 +1526,9 @@ class TestSubresourcePlugin(object): def delete_network_dummy(self, context, id, network_id): return + def mactions(self, context, id, network_id): + return + class ListArgsTestCase(base.BaseTestCase): def test_list_args(self): diff --git a/neutron/tests/unit/callbacks/test_manager.py b/neutron/tests/unit/callbacks/test_manager.py index e4e64323d55..cdf32e020fc 100644 --- a/neutron/tests/unit/callbacks/test_manager.py +++ b/neutron/tests/unit/callbacks/test_manager.py @@ -13,7 +13,6 @@ # under the License. import mock -import testtools from neutron.callbacks import events from neutron.callbacks import exceptions @@ -44,15 +43,6 @@ class CallBacksManagerTestCase(base.BaseTestCase): callback_1.counter = 0 callback_2.counter = 0 - def test_subscribe_invalid_resource_raise(self): - with testtools.ExpectedException(exceptions.Invalid): - self.manager.subscribe(mock.ANY, 'foo_resource', mock.ANY) - - def test_subscribe_invalid_event_raise(self): - self.assertRaises(exceptions.Invalid, - self.manager.subscribe, - mock.ANY, mock.ANY, 'foo_event') - def test_subscribe(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) @@ -60,6 +50,13 @@ class CallBacksManagerTestCase(base.BaseTestCase): self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) self.assertIn(callback_id_1, self.manager._index) + def test_subscribe_unknown(self): + self.manager.subscribe( + callback_1, 'my_resource', 'my-event') + self.assertIsNotNone( + self.manager._callbacks['my_resource']['my-event']) + self.assertIn(callback_id_1, self.manager._index) + def test_subscribe_is_idempotent(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) diff --git a/neutron/tests/unit/common/test_utils.py b/neutron/tests/unit/common/test_utils.py index 81634f979e3..f6aee3da935 100644 --- a/neutron/tests/unit/common/test_utils.py +++ b/neutron/tests/unit/common/test_utils.py @@ -137,7 +137,7 @@ class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin, class UtilTestParseVlanRanges(base.BaseTestCase): _err_prefix = "Invalid network VLAN range: '" _err_too_few = "' - 'need more than 2 values to unpack'" - _err_too_many = "' - 'too many values to unpack'" + _err_too_many_prefix = "' - 'too many values to unpack" _err_not_int = "' - 'invalid literal for int() with base 10: '%s''" _err_bad_vlan = "' - '%s is not a valid VLAN tag'" _err_range = "' - 'End of VLAN range is less than start of VLAN range'" @@ -145,8 +145,8 @@ class UtilTestParseVlanRanges(base.BaseTestCase): def _range_too_few_err(self, nv_range): return self._err_prefix + nv_range + self._err_too_few - def _range_too_many_err(self, nv_range): - return self._err_prefix + nv_range + self._err_too_many + def _range_too_many_err_prefix(self, nv_range): + return self._err_prefix + nv_range + self._err_too_many_prefix def _vlan_not_int_err(self, nv_range, vlan): return self._err_prefix + nv_range + (self._err_not_int % vlan) @@ -267,10 +267,13 @@ class TestParseOneVlanRange(UtilTestParseVlanRanges): def test_parse_one_net_range_too_many(self): config_str = "net1:100:150:200" - expected_msg = self._range_too_many_err(config_str) + expected_msg_prefix = self._range_too_many_err_prefix(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + # The error message is not same in Python 2 and Python 3. In Python 3, + # it depends on the amount of values used when unpacking, so it cannot + # be predicted as a fixed string. + self.assertTrue(str(err).startswith(expected_msg_prefix)) def test_parse_one_net_vlan1_not_int(self): config_str = "net1:foo:199" @@ -463,8 +466,8 @@ class TestCachingDecorator(base.BaseTestCase): class TestDict2Tuples(base.BaseTestCase): def test_dict(self): - input_dict = {'foo': 'bar', 42: 'baz', 'aaa': 'zzz'} - expected = ((42, 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) + input_dict = {'foo': 'bar', '42': 'baz', 'aaa': 'zzz'} + expected = (('42', 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) output_tuple = utils.dict2tuple(input_dict) self.assertEqual(expected, output_tuple) @@ -679,3 +682,24 @@ class TestEnsureDir(base.BaseTestCase): def test_ensure_dir_calls_makedirs(self, makedirs): utils.ensure_dir("/etc/create/directory") makedirs.assert_called_once_with("/etc/create/directory", 0o755) + + +class TestCamelize(base.BaseTestCase): + def test_camelize(self): + data = {'bandwidth_limit': 'BandwidthLimit', + 'test': 'Test', + 'some__more__dashes': 'SomeMoreDashes', + 'a_penguin_walks_into_a_bar': 'APenguinWalksIntoABar'} + + for s, expected in data.items(): + self.assertEqual(expected, utils.camelize(s)) + + +class TestRoundVal(base.BaseTestCase): + def test_round_val_ok(self): + for expected, value in ((0, 0), + (0, 0.1), + (1, 0.5), + (1, 1.49), + (2, 1.5)): + self.assertEqual(expected, utils.round_val(value)) diff --git a/neutron/tests/unit/core_extensions/__init__.py b/neutron/tests/unit/core_extensions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/core_extensions/test_qos.py b/neutron/tests/unit/core_extensions/test_qos.py new file mode 100644 index 00000000000..07ba6398cca --- /dev/null +++ b/neutron/tests/unit/core_extensions/test_qos.py @@ -0,0 +1,195 @@ +# Copyright (c) 2015 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron import context +from neutron.core_extensions import base as base_core +from neutron.core_extensions import qos as qos_core +from neutron.plugins.common import constants as plugin_constants +from neutron.services.qos import qos_consts +from neutron.tests import base + + +def _get_test_dbdata(qos_policy_id): + return {'id': None, 'qos_policy_binding': {'policy_id': qos_policy_id, + 'network_id': 'fake_net_id'}} + + +class QosCoreResourceExtensionTestCase(base.BaseTestCase): + + def setUp(self): + super(QosCoreResourceExtensionTestCase, self).setUp() + self.core_extension = qos_core.QosCoreResourceExtension() + policy_p = mock.patch('neutron.objects.qos.policy.QosPolicy') + self.policy_m = policy_p.start() + self.context = context.get_admin_context() + + def test_process_fields_no_qos_policy_id(self): + self.core_extension.process_fields( + self.context, base_core.PORT, {}, None) + self.assertFalse(self.policy_m.called) + + def _mock_plugin_loaded(self, plugin_loaded): + plugins = {} + if plugin_loaded: + plugins[plugin_constants.QOS] = None + return mock.patch('neutron.manager.NeutronManager.get_service_plugins', + return_value=plugins) + + def test_process_fields_no_qos_plugin_loaded(self): + with self._mock_plugin_loaded(False): + self.core_extension.process_fields( + self.context, base_core.PORT, + {qos_consts.QOS_POLICY_ID: None}, None) + self.assertFalse(self.policy_m.called) + + def test_process_fields_port_new_policy(self): + with self._mock_plugin_loaded(True): + qos_policy_id = mock.Mock() + actual_port = {'id': mock.Mock(), + qos_consts.QOS_POLICY_ID: qos_policy_id} + qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=qos_policy) + self.core_extension.process_fields( + self.context, base_core.PORT, + {qos_consts.QOS_POLICY_ID: qos_policy_id}, + actual_port) + + qos_policy.attach_port.assert_called_once_with(actual_port['id']) + + def test_process_fields_port_updated_policy(self): + with self._mock_plugin_loaded(True): + qos_policy1_id = mock.Mock() + qos_policy2_id = mock.Mock() + port_id = mock.Mock() + actual_port = {'id': port_id, + qos_consts.QOS_POLICY_ID: qos_policy1_id} + old_qos_policy = mock.MagicMock() + self.policy_m.get_port_policy = mock.Mock( + return_value=old_qos_policy) + new_qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy) + self.core_extension.process_fields( + self.context, base_core.PORT, + {qos_consts.QOS_POLICY_ID: qos_policy2_id}, + actual_port) + + old_qos_policy.detach_port.assert_called_once_with(port_id) + new_qos_policy.attach_port.assert_called_once_with(port_id) + self.assertEqual(qos_policy2_id, actual_port['qos_policy_id']) + + def test_process_resource_port_updated_no_policy(self): + with self._mock_plugin_loaded(True): + port_id = mock.Mock() + qos_policy_id = mock.Mock() + actual_port = {'id': port_id, + qos_consts.QOS_POLICY_ID: qos_policy_id} + old_qos_policy = mock.MagicMock() + self.policy_m.get_port_policy = mock.Mock( + return_value=old_qos_policy) + new_qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy) + self.core_extension.process_fields( + self.context, base_core.PORT, + {qos_consts.QOS_POLICY_ID: None}, + actual_port) + + old_qos_policy.detach_port.assert_called_once_with(port_id) + self.assertIsNone(actual_port['qos_policy_id']) + + def test_process_resource_network_updated_no_policy(self): + with self._mock_plugin_loaded(True): + network_id = mock.Mock() + qos_policy_id = mock.Mock() + actual_network = {'id': network_id, + qos_consts.QOS_POLICY_ID: qos_policy_id} + old_qos_policy = mock.MagicMock() + self.policy_m.get_network_policy = mock.Mock( + return_value=old_qos_policy) + new_qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy) + self.core_extension.process_fields( + self.context, base_core.NETWORK, + {qos_consts.QOS_POLICY_ID: None}, + actual_network) + + old_qos_policy.detach_network.assert_called_once_with(network_id) + self.assertIsNone(actual_network['qos_policy_id']) + + def test_process_fields_network_new_policy(self): + with self._mock_plugin_loaded(True): + qos_policy_id = mock.Mock() + actual_network = {'id': mock.Mock(), + qos_consts.QOS_POLICY_ID: qos_policy_id} + qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=qos_policy) + self.core_extension.process_fields( + self.context, base_core.NETWORK, + {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) + + qos_policy.attach_network.assert_called_once_with( + actual_network['id']) + + def test_process_fields_network_updated_policy(self): + with self._mock_plugin_loaded(True): + qos_policy_id = mock.Mock() + network_id = mock.Mock() + actual_network = {'id': network_id, + qos_consts.QOS_POLICY_ID: qos_policy_id} + old_qos_policy = mock.MagicMock() + self.policy_m.get_network_policy = mock.Mock( + return_value=old_qos_policy) + new_qos_policy = mock.MagicMock() + self.policy_m.get_by_id = mock.Mock(return_value=new_qos_policy) + self.core_extension.process_fields( + self.context, base_core.NETWORK, + {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) + + old_qos_policy.detach_network.assert_called_once_with(network_id) + new_qos_policy.attach_network.assert_called_once_with(network_id) + + def test_extract_fields_plugin_not_loaded(self): + with self._mock_plugin_loaded(False): + fields = self.core_extension.extract_fields(None, None) + self.assertEqual({}, fields) + + def _test_extract_fields_for_port(self, qos_policy_id): + with self._mock_plugin_loaded(True): + fields = self.core_extension.extract_fields( + base_core.PORT, _get_test_dbdata(qos_policy_id)) + self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields) + + def test_extract_fields_no_port_policy(self): + self._test_extract_fields_for_port(None) + + def test_extract_fields_port_policy_exists(self): + qos_policy_id = mock.Mock() + self._test_extract_fields_for_port(qos_policy_id) + + def _test_extract_fields_for_network(self, qos_policy_id): + with self._mock_plugin_loaded(True): + fields = self.core_extension.extract_fields( + base_core.NETWORK, _get_test_dbdata(qos_policy_id)) + self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields) + + def test_extract_fields_no_network_policy(self): + self._test_extract_fields_for_network(None) + + def test_extract_fields_network_policy_exists(self): + qos_policy_id = mock.Mock() + qos_policy = mock.Mock() + qos_policy.id = qos_policy_id + self._test_extract_fields_for_network(qos_policy_id) diff --git a/neutron/tests/unit/db/quota/test_api.py b/neutron/tests/unit/db/quota/test_api.py index a64e2b98b44..c527a663179 100644 --- a/neutron/tests/unit/db/quota/test_api.py +++ b/neutron/tests/unit/db/quota/test_api.py @@ -12,6 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime + +import mock + from neutron import context from neutron.db.quota import api as quota_api from neutron.tests.unit import testlib_api @@ -24,6 +28,12 @@ class TestQuotaDbApi(testlib_api.SqlTestCaseLight): self.context = context.Context('Gonzalo', self.tenant_id, is_admin=False, is_advsvc=False) + def _create_reservation(self, resource_deltas, + tenant_id=None, expiration=None): + tenant_id = tenant_id or self.tenant_id + return quota_api.create_reservation( + self.context, tenant_id, resource_deltas, expiration) + def _create_quota_usage(self, resource, used, reserved, tenant_id=None): tenant_id = tenant_id or self.tenant_id return quota_api.set_quota_usage( @@ -203,6 +213,125 @@ class TestQuotaDbApi(testlib_api.SqlTestCaseLight): self.assertIsNone(quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id)) + def _verify_reserved_resources(self, expected, actual): + for (resource, delta) in actual.items(): + self.assertIn(resource, expected) + self.assertEqual(delta, expected[resource]) + del expected[resource] + self.assertFalse(expected) + + def test_create_reservation(self): + resources = {'goals': 2, 'assists': 1} + resv = self._create_reservation(resources) + self.assertEqual(self.tenant_id, resv.tenant_id) + self._verify_reserved_resources(resources, resv.deltas) + + def test_create_reservation_with_expirtion(self): + resources = {'goals': 2, 'assists': 1} + exp_date = datetime.datetime(2016, 3, 31, 14, 30) + resv = self._create_reservation(resources, expiration=exp_date) + self.assertEqual(self.tenant_id, resv.tenant_id) + self.assertEqual(exp_date, resv.expiration) + self._verify_reserved_resources(resources, resv.deltas) + + def _test_remove_reservation(self, set_dirty): + resources = {'goals': 2, 'assists': 1} + resv = self._create_reservation(resources) + self.assertEqual(1, quota_api.remove_reservation( + self.context, resv.reservation_id, set_dirty=set_dirty)) + + def test_remove_reservation(self): + self._test_remove_reservation(False) + + def test_remove_reservation_and_set_dirty(self): + routine = 'neutron.db.quota.api.set_resources_quota_usage_dirty' + with mock.patch(routine) as mock_routine: + self._test_remove_reservation(False) + mock_routine.assert_called_once_with( + self.context, mock.ANY, self.tenant_id) + + def test_remove_non_existent_reservation(self): + self.assertIsNone(quota_api.remove_reservation(self.context, 'meh')) + + def _get_reservations_for_resource_helper(self): + # create three reservation, 1 expired + resources_1 = {'goals': 2, 'assists': 1} + resources_2 = {'goals': 3, 'bookings': 1} + resources_3 = {'bookings': 2, 'assists': 2} + exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) + exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) + self._create_reservation(resources_1, expiration=exp_date_1) + self._create_reservation(resources_2, expiration=exp_date_1) + self._create_reservation(resources_3, expiration=exp_date_2) + + def test_get_reservations_for_resources(self): + with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: + self._get_reservations_for_resource_helper() + mock_utcnow.return_value = datetime.datetime( + 2015, 5, 20, 0, 0) + deltas = quota_api.get_reservations_for_resources( + self.context, self.tenant_id, ['goals', 'assists', 'bookings']) + self.assertIn('goals', deltas) + self.assertEqual(5, deltas['goals']) + self.assertIn('assists', deltas) + self.assertEqual(1, deltas['assists']) + self.assertIn('bookings', deltas) + self.assertEqual(1, deltas['bookings']) + self.assertEqual(3, len(deltas)) + + def test_get_expired_reservations_for_resources(self): + with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: + mock_utcnow.return_value = datetime.datetime( + 2015, 5, 20, 0, 0) + self._get_reservations_for_resource_helper() + deltas = quota_api.get_reservations_for_resources( + self.context, self.tenant_id, + ['goals', 'assists', 'bookings'], + expired=True) + self.assertIn('assists', deltas) + self.assertEqual(2, deltas['assists']) + self.assertIn('bookings', deltas) + self.assertEqual(2, deltas['bookings']) + self.assertEqual(2, len(deltas)) + + def test_get_reservation_for_resources_with_empty_list(self): + self.assertIsNone(quota_api.get_reservations_for_resources( + self.context, self.tenant_id, [])) + + def test_remove_expired_reservations(self): + with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: + mock_utcnow.return_value = datetime.datetime( + 2015, 5, 20, 0, 0) + resources = {'goals': 2, 'assists': 1} + exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) + resv_1 = self._create_reservation(resources, expiration=exp_date_1) + exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) + resv_2 = self._create_reservation(resources, expiration=exp_date_2) + self.assertEqual(1, quota_api.remove_expired_reservations( + self.context, self.tenant_id)) + self.assertIsNone(quota_api.get_reservation( + self.context, resv_2.reservation_id)) + self.assertIsNotNone(quota_api.get_reservation( + self.context, resv_1.reservation_id)) + + def test_remove_expired_reservations_no_tenant(self): + with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: + mock_utcnow.return_value = datetime.datetime( + 2015, 5, 20, 0, 0) + resources = {'goals': 2, 'assists': 1} + exp_date_1 = datetime.datetime(2014, 3, 31, 14, 30) + resv_1 = self._create_reservation(resources, expiration=exp_date_1) + exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) + resv_2 = self._create_reservation(resources, + expiration=exp_date_2, + tenant_id='Callejon') + self.assertEqual(2, quota_api.remove_expired_reservations( + self.context)) + self.assertIsNone(quota_api.get_reservation( + self.context, resv_2.reservation_id)) + self.assertIsNone(quota_api.get_reservation( + self.context, resv_1.reservation_id)) + class TestQuotaDbApiAdminContext(TestQuotaDbApi): diff --git a/neutron/tests/unit/db/quota/test_driver.py b/neutron/tests/unit/db/quota/test_driver.py index 31a741721ce..dafee362a6d 100644 --- a/neutron/tests/unit/db/quota/test_driver.py +++ b/neutron/tests/unit/db/quota/test_driver.py @@ -27,16 +27,22 @@ class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver): class TestResource(object): """Describe a test resource for quota checking.""" - def __init__(self, name, default): + def __init__(self, name, default, fake_count=0): self.name = name self.quota = default + self.fake_count = fake_count @property def default(self): return self.quota + def count(self, *args, **kwargs): + return self.fake_count + + PROJECT = 'prj_test' RESOURCE = 'res_test' +ALT_RESOURCE = 'res_test_meh' class TestDbQuotaDriver(testlib_api.SqlTestCase): @@ -132,3 +138,63 @@ class TestDbQuotaDriver(testlib_api.SqlTestCase): self.assertRaises(exceptions.InvalidQuotaValue, self.plugin.limit_check, context.get_admin_context(), PROJECT, resources, values) + + def _test_make_reservation_success(self, quota_driver, + resource_name, deltas): + resources = {resource_name: TestResource(resource_name, 2)} + self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2) + reservation = quota_driver.make_reservation( + self.context, + self.context.tenant_id, + resources, + deltas, + self.plugin) + self.assertIn(resource_name, reservation.deltas) + self.assertEqual(deltas[resource_name], + reservation.deltas[resource_name]) + self.assertEqual(self.context.tenant_id, + reservation.tenant_id) + + def test_make_reservation_single_resource(self): + quota_driver = driver.DbQuotaDriver() + self._test_make_reservation_success( + quota_driver, RESOURCE, {RESOURCE: 1}) + + def test_make_reservation_fill_quota(self): + quota_driver = driver.DbQuotaDriver() + self._test_make_reservation_success( + quota_driver, RESOURCE, {RESOURCE: 2}) + + def test_make_reservation_multiple_resources(self): + quota_driver = driver.DbQuotaDriver() + resources = {RESOURCE: TestResource(RESOURCE, 2), + ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)} + deltas = {RESOURCE: 1, ALT_RESOURCE: 2} + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2) + reservation = quota_driver.make_reservation( + self.context, + self.context.tenant_id, + resources, + deltas, + self.plugin) + self.assertIn(RESOURCE, reservation.deltas) + self.assertIn(ALT_RESOURCE, reservation.deltas) + self.assertEqual(1, reservation.deltas[RESOURCE]) + self.assertEqual(2, reservation.deltas[ALT_RESOURCE]) + self.assertEqual(self.context.tenant_id, + reservation.tenant_id) + + def test_make_reservation_over_quota_fails(self): + quota_driver = driver.DbQuotaDriver() + resources = {RESOURCE: TestResource(RESOURCE, 2, + fake_count=2)} + deltas = {RESOURCE: 1} + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.assertRaises(exceptions.OverQuota, + quota_driver.make_reservation, + self.context, + self.context.tenant_id, + resources, + deltas, + self.plugin) diff --git a/neutron/tests/unit/db/test_agents_db.py b/neutron/tests/unit/db/test_agents_db.py index a4726631458..3aeea2b3ab4 100644 --- a/neutron/tests/unit/db/test_agents_db.py +++ b/neutron/tests/unit/db/test_agents_db.py @@ -16,6 +16,7 @@ import datetime import mock +from oslo_config import cfg from oslo_db import exception as exc from oslo_utils import timeutils import testscenarios @@ -154,6 +155,12 @@ class TestAgentsDbMixin(TestAgentsDbBase): self.assertEqual(add_mock.call_count, 2, "Agent entry creation hasn't been retried") + def test_create_or_update_agent_disable_new_agents(self): + cfg.CONF.set_override('enable_new_agents', False) + self.plugin.create_or_update_agent(self.context, self.agent_status) + agent = self.plugin.get_agents(self.context)[0] + self.assertFalse(agent['admin_state_up']) + class TestAgentsDbGetAgents(TestAgentsDbBase): scenarios = [ diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py b/neutron/tests/unit/db/test_agentschedulers_db.py similarity index 91% rename from neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py rename to neutron/tests/unit/db/test_agentschedulers_db.py index e512b102fb7..2a56241ab34 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py +++ b/neutron/tests/unit/db/test_agentschedulers_db.py @@ -749,6 +749,30 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB) self.assertFalse(ret_b) + def test_router_is_not_rescheduled_from_dvr_agent(self): + router = {'name': 'router1', + 'admin_state_up': True, + 'distributed': True} + r = self.l3plugin.create_router( + self.adminContext, {'router': router}) + dvr_agent = self._register_dvr_agents()[1] + + with mock.patch.object( + self.l3plugin, + 'check_ports_exist_on_l3agent') as port_exists: + port_exists.return_value = True + self.l3plugin.schedule_router( + self.adminContext, r['id']) + agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(agents['agents'])) + self.assertIn(dvr_agent['host'], + [a['host'] for a in agents['agents']]) + self._take_down_agent_and_run_reschedule(dvr_agent['host']) + agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(agents['agents'])) + self.assertIn(dvr_agent['host'], + [a['host'] for a in agents['agents']]) + def test_router_auto_schedule_with_invalid_router(self): with self.router() as router: l3_rpc_cb = l3_rpc.L3RpcCallback() @@ -997,6 +1021,117 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): set([a['configurations']['agent_mode'] for a in l3agents['agents']])) + def test_dvr_router_snat_scheduling_late_ext_gw_add(self): + """Test snat scheduling for the case when dvr router is already + scheduled to all dvr_snat agents and then external gateway is added. + """ + helpers.register_l3_agent( + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + helpers.register_l3_agent( + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + with self.subnet() as s_int,\ + self.subnet(cidr='20.0.0.0/24') as s_ext: + net_id = s_ext['subnet']['network_id'] + self._set_net_external(net_id) + + router = {'name': 'router1', + 'admin_state_up': True, + 'distributed': True} + r = self.l3plugin.create_router(self.adminContext, + {'router': router}) + # add router interface first + self.l3plugin.add_router_interface(self.adminContext, r['id'], + {'subnet_id': s_int['subnet']['id']}) + # check that router is scheduled to both dvr_snat agents + l3agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(l3agents['agents'])) + # check that snat is not scheduled as router is not connected to + # external network + snat_agents = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']]) + self.assertEqual(0, len(snat_agents)) + + # connect router to external network + self.l3plugin.update_router(self.adminContext, r['id'], + {'router': {'external_gateway_info': {'network_id': net_id}}}) + # router should still be scheduled to both dvr_snat agents + l3agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(l3agents['agents'])) + # now snat portion should be scheduled as router is connected + # to external network + snat_agents = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']]) + self.assertEqual(1, len(snat_agents)) + + def test_dvr_router_csnat_rescheduling(self): + helpers.register_l3_agent( + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + helpers.register_l3_agent( + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + with self.subnet() as s: + net_id = s['subnet']['network_id'] + self._set_net_external(net_id) + + router = {'name': 'router1', + 'external_gateway_info': {'network_id': net_id}, + 'admin_state_up': True, + 'distributed': True} + r = self.l3plugin.create_router(self.adminContext, + {'router': router}) + self.l3plugin.schedule_router( + self.adminContext, r['id']) + l3agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(l3agents['agents'])) + csnat_agent_host = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']])[0]['l3_agent']['host'] + self._take_down_agent_and_run_reschedule(csnat_agent_host) + l3agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(1, len(l3agents['agents'])) + new_csnat_agent_host = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']])[0]['l3_agent']['host'] + self.assertNotEqual(csnat_agent_host, new_csnat_agent_host) + + def test_dvr_router_csnat_manual_rescheduling(self): + helpers.register_l3_agent( + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + helpers.register_l3_agent( + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) + with self.subnet() as s: + net_id = s['subnet']['network_id'] + self._set_net_external(net_id) + + router = {'name': 'router1', + 'external_gateway_info': {'network_id': net_id}, + 'admin_state_up': True, + 'distributed': True} + r = self.l3plugin.create_router(self.adminContext, + {'router': router}) + self.l3plugin.schedule_router( + self.adminContext, r['id']) + l3agents = self.l3plugin.list_l3_agents_hosting_router( + self.adminContext, r['id']) + self.assertEqual(2, len(l3agents['agents'])) + csnat_agent = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']])[0]['l3_agent'] + + self.l3plugin.remove_router_from_l3_agent( + self.adminContext, csnat_agent['id'], r['id']) + + l3agents = self.l3plugin.list_l3_agents_hosting_router( + self.adminContext, r['id']) + self.assertEqual(1, len(l3agents['agents'])) + self.assertFalse(self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']])) + + self.l3plugin.add_router_to_l3_agent( + self.adminContext, csnat_agent['id'], r['id']) + + l3agents = self._list_l3_agents_hosting_router(r['id']) + self.assertEqual(2, len(l3agents['agents'])) + new_csnat_agent = self.l3plugin.get_snat_bindings( + self.adminContext, [r['id']])[0]['l3_agent'] + self.assertEqual(csnat_agent['id'], new_csnat_agent['id']) + def test_router_sync_data(self): with self.subnet() as s1,\ self.subnet(cidr='10.0.2.0/24') as s2,\ diff --git a/neutron/tests/unit/db/test_db_base_plugin_common.py b/neutron/tests/unit/db/test_db_base_plugin_common.py new file mode 100644 index 00000000000..21866522ad7 --- /dev/null +++ b/neutron/tests/unit/db/test_db_base_plugin_common.py @@ -0,0 +1,93 @@ +# Copyright (c) 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.db import db_base_plugin_common +from neutron.tests import base + + +class DummyObject(object): + def __init__(self, **kwargs): + self.kwargs = kwargs + + def to_dict(self): + return self.kwargs + + +class ConvertToDictTestCase(base.BaseTestCase): + + @db_base_plugin_common.convert_result_to_dict + def method_dict(self, fields=None): + return DummyObject(one=1, two=2, three=3) + + @db_base_plugin_common.convert_result_to_dict + def method_list(self): + return [DummyObject(one=1, two=2, three=3)] * 3 + + def test_simple_object(self): + expected = {'one': 1, 'two': 2, 'three': 3} + observed = self.method_dict() + self.assertEqual(expected, observed) + + def test_list_of_objects(self): + expected = [{'one': 1, 'two': 2, 'three': 3}] * 3 + observed = self.method_list() + self.assertEqual(expected, observed) + + +class FilterFieldsTestCase(base.BaseTestCase): + + @db_base_plugin_common.filter_fields + def method_dict(self, fields=None): + return {'one': 1, 'two': 2, 'three': 3} + + @db_base_plugin_common.filter_fields + def method_list(self, fields=None): + return [self.method_dict() for _ in range(3)] + + @db_base_plugin_common.filter_fields + def method_multiple_arguments(self, not_used, fields=None, + also_not_used=None): + return {'one': 1, 'two': 2, 'three': 3} + + def test_no_fields(self): + expected = {'one': 1, 'two': 2, 'three': 3} + observed = self.method_dict() + self.assertEqual(expected, observed) + + def test_dict(self): + expected = {'two': 2} + observed = self.method_dict(['two']) + self.assertEqual(expected, observed) + + def test_list(self): + expected = [{'two': 2}, {'two': 2}, {'two': 2}] + observed = self.method_list(['two']) + self.assertEqual(expected, observed) + + def test_multiple_arguments_positional(self): + expected = {'two': 2} + observed = self.method_multiple_arguments(list(), ['two']) + self.assertEqual(expected, observed) + + def test_multiple_arguments_positional_and_keywords(self): + expected = {'two': 2} + observed = self.method_multiple_arguments(fields=['two'], + not_used=None) + self.assertEqual(expected, observed) + + def test_multiple_arguments_keyword(self): + expected = {'two': 2} + observed = self.method_multiple_arguments(list(), fields=['two']) + self.assertEqual(expected, observed) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 8bdb54bbd04..bb505acb4a7 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -40,7 +40,6 @@ from neutron.common import test_lib from neutron.common import utils from neutron import context from neutron.db import db_base_plugin_common -from neutron.db import db_base_plugin_v2 from neutron.db import ipam_non_pluggable_backend as non_ipam from neutron.db import models_v2 from neutron import manager @@ -4167,6 +4166,19 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + def _verify_updated_subnet_allocation_pools(self, res, with_gateway_ip): + res = self.deserialize(self.fmt, res) + self.assertEqual(len(res['subnet']['allocation_pools']), 2) + res_vals = ( + list(res['subnet']['allocation_pools'][0].values()) + + list(res['subnet']['allocation_pools'][1].values()) + ) + for pool_val in ['10', '20', '30', '40']: + self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) + if with_gateway_ip: + self.assertEqual((res['subnet']['gateway_ip']), + '192.168.0.9') + def _test_update_subnet_allocation_pools(self, with_gateway_ip=False): """Test that we can successfully update with sane params. @@ -4187,22 +4199,17 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): data['subnet']['gateway_ip'] = '192.168.0.9' req = self.new_update_request('subnets', data, subnet['subnet']['id']) - #check res code but then do GET on subnet for verification + #check res code and contents res = req.get_response(self.api) self.assertEqual(res.status_code, 200) + self._verify_updated_subnet_allocation_pools(res, + with_gateway_ip) + #GET subnet to verify DB updated correctly req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) - res = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual(len(res['subnet']['allocation_pools']), 2) - res_vals = ( - list(res['subnet']['allocation_pools'][0].values()) + - list(res['subnet']['allocation_pools'][1].values()) - ) - for pool_val in ['10', '20', '30', '40']: - self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) - if with_gateway_ip: - self.assertEqual((res['subnet']['gateway_ip']), - '192.168.0.9') + res = req.get_response(self.api) + self._verify_updated_subnet_allocation_pools(res, + with_gateway_ip) def test_update_subnet_allocation_pools(self): self._test_update_subnet_allocation_pools() @@ -5482,199 +5489,6 @@ class DbModelTestCase(base.BaseTestCase): self.assertEqual(actual_repr_output, final_exp) -class TestNeutronDbPluginV2(base.BaseTestCase): - """Unit Tests for NeutronDbPluginV2 IPAM Logic.""" - - def test_generate_ip(self): - with mock.patch.object(non_ipam.IpamNonPluggableBackend, - '_try_generate_ip') as generate: - with mock.patch.object(non_ipam.IpamNonPluggableBackend, - '_rebuild_availability_ranges') as rebuild: - - non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') - - generate.assert_called_once_with('c', 's') - self.assertEqual(0, rebuild.call_count) - - def test_generate_ip_exhausted_pool(self): - with mock.patch.object(non_ipam.IpamNonPluggableBackend, - '_try_generate_ip') as generate: - with mock.patch.object(non_ipam.IpamNonPluggableBackend, - '_rebuild_availability_ranges') as rebuild: - - exception = n_exc.IpAddressGenerationFailure(net_id='n') - # fail first call but not second - generate.side_effect = [exception, None] - non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') - - self.assertEqual(2, generate.call_count) - rebuild.assert_called_once_with('c', 's') - - def _validate_rebuild_availability_ranges(self, pools, allocations, - expected): - ip_qry = mock.Mock() - ip_qry.with_lockmode.return_value = ip_qry - ip_qry.filter_by.return_value = allocations - - pool_qry = mock.Mock() - pool_qry.options.return_value = pool_qry - pool_qry.with_lockmode.return_value = pool_qry - pool_qry.filter_by.return_value = pools - - def return_queries_side_effect(*args, **kwargs): - if args[0] == models_v2.IPAllocation: - return ip_qry - if args[0] == models_v2.IPAllocationPool: - return pool_qry - - context = mock.Mock() - context.session.query.side_effect = return_queries_side_effect - subnets = [mock.MagicMock()] - - non_ipam.IpamNonPluggableBackend._rebuild_availability_ranges( - context, subnets) - - actual = [[args[0].allocation_pool_id, - args[0].first_ip, args[0].last_ip] - for _name, args, _kwargs in context.session.add.mock_calls] - self.assertEqual(expected, actual) - - def test_rebuild_availability_ranges(self): - pools = [{'id': 'a', - 'first_ip': '192.168.1.3', - 'last_ip': '192.168.1.10'}, - {'id': 'b', - 'first_ip': '192.168.1.100', - 'last_ip': '192.168.1.120'}] - - allocations = [{'ip_address': '192.168.1.3'}, - {'ip_address': '192.168.1.78'}, - {'ip_address': '192.168.1.7'}, - {'ip_address': '192.168.1.110'}, - {'ip_address': '192.168.1.11'}, - {'ip_address': '192.168.1.4'}, - {'ip_address': '192.168.1.111'}] - - expected = [['a', '192.168.1.5', '192.168.1.6'], - ['a', '192.168.1.8', '192.168.1.10'], - ['b', '192.168.1.100', '192.168.1.109'], - ['b', '192.168.1.112', '192.168.1.120']] - - self._validate_rebuild_availability_ranges(pools, allocations, - expected) - - def test_rebuild_ipv6_availability_ranges(self): - pools = [{'id': 'a', - 'first_ip': '2001::1', - 'last_ip': '2001::50'}, - {'id': 'b', - 'first_ip': '2001::100', - 'last_ip': '2001::ffff:ffff:ffff:fffe'}] - - allocations = [{'ip_address': '2001::10'}, - {'ip_address': '2001::45'}, - {'ip_address': '2001::60'}, - {'ip_address': '2001::111'}, - {'ip_address': '2001::200'}, - {'ip_address': '2001::ffff:ffff:ffff:ff10'}, - {'ip_address': '2001::ffff:ffff:ffff:f2f0'}] - - expected = [['a', '2001::1', '2001::f'], - ['a', '2001::11', '2001::44'], - ['a', '2001::46', '2001::50'], - ['b', '2001::100', '2001::110'], - ['b', '2001::112', '2001::1ff'], - ['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'], - ['b', '2001::ffff:ffff:ffff:f2f1', - '2001::ffff:ffff:ffff:ff0f'], - ['b', '2001::ffff:ffff:ffff:ff11', - '2001::ffff:ffff:ffff:fffe']] - - self._validate_rebuild_availability_ranges(pools, allocations, - expected) - - def _test__allocate_ips_for_port(self, subnets, port, expected): - # this test is incompatible with pluggable ipam, because subnets - # were not actually created, so no ipam_subnet exists - cfg.CONF.set_override("ipam_driver", None) - plugin = db_base_plugin_v2.NeutronDbPluginV2() - with mock.patch.object(db_base_plugin_common.DbBasePluginCommon, - '_get_subnets') as get_subnets: - with mock.patch.object(non_ipam.IpamNonPluggableBackend, - '_check_unique_ip') as check_unique: - context = mock.Mock() - get_subnets.return_value = subnets - check_unique.return_value = True - actual = plugin.ipam._allocate_ips_for_port(context, port) - self.assertEqual(expected, actual) - - def test__allocate_ips_for_port_2_slaac_subnets(self): - subnets = [ - { - 'cidr': u'2001:100::/64', - 'enable_dhcp': True, - 'gateway_ip': u'2001:100::1', - 'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13', - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'ip_version': 6, - 'ipv6_address_mode': None, - 'ipv6_ra_mode': u'slaac'}, - { - 'cidr': u'2001:200::/64', - 'enable_dhcp': True, - 'gateway_ip': u'2001:200::1', - 'id': u'dc813d3d-ed66-4184-8570-7325c8195e28', - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'ip_version': 6, - 'ipv6_address_mode': None, - 'ipv6_ra_mode': u'slaac'}] - port = {'port': { - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, - 'mac_address': '12:34:56:78:44:ab', - 'device_owner': 'compute'}} - expected = [] - for subnet in subnets: - addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( - subnet['cidr'], port['port']['mac_address'])) - expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) - - self._test__allocate_ips_for_port(subnets, port, expected) - - def test__allocate_ips_for_port_2_slaac_pd_subnets(self): - subnets = [ - { - 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, - 'enable_dhcp': True, - 'gateway_ip': '::1', - 'id': 'd1a28edd-bd83-480a-bd40-93d036c89f13', - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'ip_version': 6, - 'ipv6_address_mode': None, - 'ipv6_ra_mode': 'slaac'}, - { - 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, - 'enable_dhcp': True, - 'gateway_ip': '::1', - 'id': 'dc813d3d-ed66-4184-8570-7325c8195e28', - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'ip_version': 6, - 'ipv6_address_mode': None, - 'ipv6_ra_mode': 'slaac'}] - port = {'port': { - 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', - 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, - 'mac_address': '12:34:56:78:44:ab', - 'device_owner': 'compute'}} - expected = [] - for subnet in subnets: - addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( - subnet['cidr'], port['port']['mac_address'])) - expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) - - self._test__allocate_ips_for_port(subnets, port, expected) - - class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase, testlib_api.SqlTestCase): """Tests for NeutronDbPluginV2 as Mixin. diff --git a/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py new file mode 100644 index 00000000000..3678e7978ec --- /dev/null +++ b/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py @@ -0,0 +1,220 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron.db import db_base_plugin_common +from neutron.db import db_base_plugin_v2 +from neutron.db import ipam_non_pluggable_backend as non_ipam +from neutron.db import models_v2 +from neutron.tests import base + + +class TestIpamNonPluggableBackend(base.BaseTestCase): + """Unit Tests for non pluggable IPAM Logic.""" + + def test_generate_ip(self): + with mock.patch.object(non_ipam.IpamNonPluggableBackend, + '_try_generate_ip') as generate: + with mock.patch.object(non_ipam.IpamNonPluggableBackend, + '_rebuild_availability_ranges') as rebuild: + + non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') + + generate.assert_called_once_with('c', 's') + self.assertEqual(0, rebuild.call_count) + + def test_generate_ip_exhausted_pool(self): + with mock.patch.object(non_ipam.IpamNonPluggableBackend, + '_try_generate_ip') as generate: + with mock.patch.object(non_ipam.IpamNonPluggableBackend, + '_rebuild_availability_ranges') as rebuild: + + exception = n_exc.IpAddressGenerationFailure(net_id='n') + # fail first call but not second + generate.side_effect = [exception, None] + non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') + + self.assertEqual(2, generate.call_count) + rebuild.assert_called_once_with('c', 's') + + def _validate_rebuild_availability_ranges(self, pools, allocations, + expected): + ip_qry = mock.Mock() + ip_qry.with_lockmode.return_value = ip_qry + ip_qry.filter_by.return_value = allocations + + pool_qry = mock.Mock() + pool_qry.options.return_value = pool_qry + pool_qry.with_lockmode.return_value = pool_qry + pool_qry.filter_by.return_value = pools + + def return_queries_side_effect(*args, **kwargs): + if args[0] == models_v2.IPAllocation: + return ip_qry + if args[0] == models_v2.IPAllocationPool: + return pool_qry + + context = mock.Mock() + context.session.query.side_effect = return_queries_side_effect + subnets = [mock.MagicMock()] + + non_ipam.IpamNonPluggableBackend._rebuild_availability_ranges( + context, subnets) + + actual = [[args[0].allocation_pool_id, + args[0].first_ip, args[0].last_ip] + for _name, args, _kwargs in context.session.add.mock_calls] + self.assertEqual(expected, actual) + + def test_rebuild_availability_ranges(self): + pools = [{'id': 'a', + 'first_ip': '192.168.1.3', + 'last_ip': '192.168.1.10'}, + {'id': 'b', + 'first_ip': '192.168.1.100', + 'last_ip': '192.168.1.120'}] + + allocations = [{'ip_address': '192.168.1.3'}, + {'ip_address': '192.168.1.78'}, + {'ip_address': '192.168.1.7'}, + {'ip_address': '192.168.1.110'}, + {'ip_address': '192.168.1.11'}, + {'ip_address': '192.168.1.4'}, + {'ip_address': '192.168.1.111'}] + + expected = [['a', '192.168.1.5', '192.168.1.6'], + ['a', '192.168.1.8', '192.168.1.10'], + ['b', '192.168.1.100', '192.168.1.109'], + ['b', '192.168.1.112', '192.168.1.120']] + + self._validate_rebuild_availability_ranges(pools, allocations, + expected) + + def test_rebuild_ipv6_availability_ranges(self): + pools = [{'id': 'a', + 'first_ip': '2001::1', + 'last_ip': '2001::50'}, + {'id': 'b', + 'first_ip': '2001::100', + 'last_ip': '2001::ffff:ffff:ffff:fffe'}] + + allocations = [{'ip_address': '2001::10'}, + {'ip_address': '2001::45'}, + {'ip_address': '2001::60'}, + {'ip_address': '2001::111'}, + {'ip_address': '2001::200'}, + {'ip_address': '2001::ffff:ffff:ffff:ff10'}, + {'ip_address': '2001::ffff:ffff:ffff:f2f0'}] + + expected = [['a', '2001::1', '2001::f'], + ['a', '2001::11', '2001::44'], + ['a', '2001::46', '2001::50'], + ['b', '2001::100', '2001::110'], + ['b', '2001::112', '2001::1ff'], + ['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'], + ['b', '2001::ffff:ffff:ffff:f2f1', + '2001::ffff:ffff:ffff:ff0f'], + ['b', '2001::ffff:ffff:ffff:ff11', + '2001::ffff:ffff:ffff:fffe']] + + self._validate_rebuild_availability_ranges(pools, allocations, + expected) + + def _test__allocate_ips_for_port(self, subnets, port, expected): + # this test is incompatible with pluggable ipam, because subnets + # were not actually created, so no ipam_subnet exists + cfg.CONF.set_override("ipam_driver", None) + plugin = db_base_plugin_v2.NeutronDbPluginV2() + with mock.patch.object(db_base_plugin_common.DbBasePluginCommon, + '_get_subnets') as get_subnets: + with mock.patch.object(non_ipam.IpamNonPluggableBackend, + '_check_unique_ip') as check_unique: + context = mock.Mock() + get_subnets.return_value = subnets + check_unique.return_value = True + actual = plugin.ipam._allocate_ips_for_port(context, port) + self.assertEqual(expected, actual) + + def test__allocate_ips_for_port_2_slaac_subnets(self): + subnets = [ + { + 'cidr': u'2001:100::/64', + 'enable_dhcp': True, + 'gateway_ip': u'2001:100::1', + 'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'ip_version': 6, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': u'slaac'}, + { + 'cidr': u'2001:200::/64', + 'enable_dhcp': True, + 'gateway_ip': u'2001:200::1', + 'id': u'dc813d3d-ed66-4184-8570-7325c8195e28', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'ip_version': 6, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': u'slaac'}] + port = {'port': { + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'mac_address': '12:34:56:78:44:ab', + 'device_owner': 'compute'}} + expected = [] + for subnet in subnets: + addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( + subnet['cidr'], port['port']['mac_address'])) + expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) + + self._test__allocate_ips_for_port(subnets, port, expected) + + def test__allocate_ips_for_port_2_slaac_pd_subnets(self): + subnets = [ + { + 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, + 'enable_dhcp': True, + 'gateway_ip': '::1', + 'id': 'd1a28edd-bd83-480a-bd40-93d036c89f13', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'ip_version': 6, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': 'slaac'}, + { + 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, + 'enable_dhcp': True, + 'gateway_ip': '::1', + 'id': 'dc813d3d-ed66-4184-8570-7325c8195e28', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'ip_version': 6, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': 'slaac'}] + port = {'port': { + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'mac_address': '12:34:56:78:44:ab', + 'device_owner': 'compute'}} + expected = [] + for subnet in subnets: + addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( + subnet['cidr'], port['port']['mac_address'])) + expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) + + self._test__allocate_ips_for_port(subnets, port, expected) diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py index 80d826c7977..dd5d111123f 100644 --- a/neutron/tests/unit/db/test_ipam_pluggable_backend.py +++ b/neutron/tests/unit/db/test_ipam_pluggable_backend.py @@ -20,6 +20,7 @@ import webob.exc from oslo_config import cfg from oslo_utils import uuidutils +from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin @@ -283,6 +284,23 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) + @mock.patch('neutron.ipam.driver.Pool') + def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock): + mocks = self._prepare_mocks_with_pool_mock(pool_mock) + cfg.CONF.set_override('default_ipv6_subnet_pool', + constants.IPV6_PD_POOL_ID) + cidr = constants.PROVISIONAL_IPV6_PD_PREFIX + allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')] + with self.subnet(cidr=None, ip_version=6, + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC): + pool_mock.get_instance.assert_called_once_with(None, mock.ANY) + self.assertTrue(mocks['driver'].allocate_subnet.called) + request = mocks['driver'].allocate_subnet.call_args[0][0] + self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) + self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) + self.assertEqual(allocation_pools, request.allocation_pools) + @mock.patch('neutron.ipam.driver.Pool') def test_create_subnet_over_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) diff --git a/neutron/tests/unit/db/test_l3_dvr_db.py b/neutron/tests/unit/db/test_l3_dvr_db.py index b10ee8d9c3d..94e330cfc23 100644 --- a/neutron/tests/unit/db/test_l3_dvr_db.py +++ b/neutron/tests/unit/db/test_l3_dvr_db.py @@ -19,21 +19,32 @@ from oslo_utils import uuidutils from neutron.common import constants as l3_const from neutron.common import exceptions from neutron import context +from neutron.db import agents_db from neutron.db import common_db_mixin +from neutron.db import l3_agentschedulers_db from neutron.db import l3_dvr_db from neutron import manager from neutron.plugins.common import constants as plugin_const -from neutron.tests.unit import testlib_api +from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid -class L3DvrTestCase(testlib_api.SqlTestCase): +class FakeL3Plugin(common_db_mixin.CommonDbMixin, + l3_dvr_db.L3_NAT_with_dvr_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agents_db.AgentDbMixin): + pass + + +class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): - super(L3DvrTestCase, self).setUp() + core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' + super(L3DvrTestCase, self).setUp(plugin=core_plugin) + self.core_plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() - self.mixin = l3_dvr_db.L3_NAT_with_dvr_db_mixin() + self.mixin = FakeL3Plugin() def _create_router(self, router): with self.ctx.session.begin(subtransactions=True): @@ -89,6 +100,14 @@ class L3DvrTestCase(testlib_api.SqlTestCase): self.mixin._validate_router_migration, self.ctx, router_db, {'distributed': False}) + def test_upgrade_active_router_to_distributed_validation_failure(self): + router = {'name': 'foo_router', 'admin_state_up': True} + router_db = self._create_router(router) + update = {'distributed': True} + self.assertRaises(exceptions.BadRequest, + self.mixin._validate_router_migration, + self.ctx, router_db, update) + def test_update_router_db_centralized_to_distributed(self): router = {'name': 'foo_router', 'admin_state_up': True} agent = {'id': _uuid()} @@ -173,7 +192,7 @@ class L3DvrTestCase(testlib_api.SqlTestCase): with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: plugin = mock.Mock() gp.return_value = plugin - plugin._get_port.return_value = port + plugin.get_port.return_value = port self.assertRaises(exceptions.ServicePortInUse, self.mixin.prevent_l3_port_deletion, self.ctx, @@ -542,8 +561,57 @@ class L3DvrTestCase(testlib_api.SqlTestCase): self.assertTrue(plugin.check_ports_exist_on_l3agent.called) self.assertTrue(plugin.remove_router_from_l3_agent.called) + def test_remove_router_interface_csnat_ports_removal(self): + router_dict = {'name': 'test_router', 'admin_state_up': True, + 'distributed': True} + router = self._create_router(router_dict) + with self.network() as net_ext,\ + self.subnet() as subnet1,\ + self.subnet(cidr='20.0.0.0/24') as subnet2: + ext_net_id = net_ext['network']['id'] + self.core_plugin.update_network( + self.ctx, ext_net_id, + {'network': {'router:external': True}}) + self.mixin.update_router( + self.ctx, router['id'], + {'router': {'external_gateway_info': + {'network_id': ext_net_id}}}) + self.mixin.add_router_interface(self.ctx, router['id'], + {'subnet_id': subnet1['subnet']['id']}) + self.mixin.add_router_interface(self.ctx, router['id'], + {'subnet_id': subnet2['subnet']['id']}) + + csnat_filters = {'device_owner': + [l3_const.DEVICE_OWNER_ROUTER_SNAT]} + csnat_ports = self.core_plugin.get_ports( + self.ctx, filters=csnat_filters) + self.assertEqual(2, len(csnat_ports)) + + dvr_filters = {'device_owner': + [l3_const.DEVICE_OWNER_DVR_INTERFACE]} + dvr_ports = self.core_plugin.get_ports( + self.ctx, filters=dvr_filters) + self.assertEqual(2, len(dvr_ports)) + + with mock.patch.object(manager.NeutronManager, + 'get_service_plugins') as get_svc_plugin: + get_svc_plugin.return_value = { + plugin_const.L3_ROUTER_NAT: self.mixin} + self.mixin.remove_router_interface( + self.ctx, router['id'], {'port_id': dvr_ports[0]['id']}) + + csnat_ports = self.core_plugin.get_ports( + self.ctx, filters=csnat_filters) + self.assertEqual(1, len(csnat_ports)) + self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'], + csnat_ports[0]['fixed_ips'][0]['subnet_id']) + + dvr_ports = self.core_plugin.get_ports( + self.ctx, filters=dvr_filters) + self.assertEqual(1, len(dvr_ports)) + def test__validate_router_migration_notify_advanced_services(self): - router = {'name': 'foo_router', 'admin_state_up': True} + router = {'name': 'foo_router', 'admin_state_up': False} router_db = self._create_router(router) with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify: self.mixin._validate_router_migration( diff --git a/neutron/tests/unit/db/test_l3_hamode_db.py b/neutron/tests/unit/db/test_l3_hamode_db.py index e988c400726..69a6826dfe8 100644 --- a/neutron/tests/unit/db/test_l3_hamode_db.py +++ b/neutron/tests/unit/db/test_l3_hamode_db.py @@ -29,6 +29,7 @@ from neutron.db import l3_hamode_db from neutron.extensions import l3 from neutron.extensions import l3_ext_ha_mode from neutron.extensions import portbindings +from neutron.extensions import providernet from neutron import manager from neutron.scheduler import l3_agent_scheduler from neutron.tests.common import helpers @@ -178,6 +179,16 @@ class L3HATestCase(L3HATestFramework): router = self._create_router(ha=False) self.assertFalse(router['ha']) + def test_add_ha_network_settings(self): + cfg.CONF.set_override('l3_ha_network_type', 'abc') + cfg.CONF.set_override('l3_ha_network_physical_name', 'def') + + network = {} + self.plugin._add_ha_network_settings(network) + + self.assertEqual('abc', network[providernet.NETWORK_TYPE]) + self.assertEqual('def', network[providernet.PHYSICAL_NETWORK]) + def test_router_create_with_ha_conf_enabled(self): cfg.CONF.set_override('l3_ha', True) diff --git a/neutron/tests/unit/db/test_migration.py b/neutron/tests/unit/db/test_migration.py index 955605aadca..3de29cb7bbc 100644 --- a/neutron/tests/unit/db/test_migration.py +++ b/neutron/tests/unit/db/test_migration.py @@ -13,9 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +import copy +import os import sys +from alembic import config as alembic_config +import fixtures import mock +import pkg_resources from neutron.db import migration from neutron.db.migration import cli @@ -26,6 +31,31 @@ class FakeConfig(object): service = '' +class FakeRevision(object): + path = 'fakepath' + + def __init__(self, labels=None, down_revision=None): + if not labels: + labels = set() + self.branch_labels = labels + self.down_revision = down_revision + + +class MigrationEntrypointsMemento(fixtures.Fixture): + '''Create a copy of the migration entrypoints map so it can be restored + during test cleanup. + ''' + + def _setUp(self): + self.ep_backup = {} + for proj, ep in cli.migration_entrypoints.items(): + self.ep_backup[proj] = copy.copy(ep) + self.addCleanup(self.restore) + + def restore(self): + cli.migration_entrypoints = self.ep_backup + + class TestDbMigration(base.BaseTestCase): def setUp(self): @@ -79,9 +109,37 @@ class TestCli(base.BaseTestCase): self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_err.side_effect = SystemExit + def mocked_root_dir(cfg): + return os.path.join('/fake/dir', cli._get_project_base(cfg)) + mock_root = mock.patch.object(cli, '_get_package_root_dir').start() + mock_root.side_effect = mocked_root_dir + # Avoid creating fake directories + mock.patch('neutron.common.utils.ensure_dir').start() + + # Set up some configs and entrypoints for tests to chew on + self.configs = [] + self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') + ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') + self.useFixture(MigrationEntrypointsMemento()) + cli.migration_entrypoints = {} + for project in self.projects: + config = alembic_config.Config(ini) + config.set_main_option('neutron_project', project) + module_name = project.replace('-', '_') + '.db.migration' + attrs = ('alembic_migrations',) + script_location = ':'.join([module_name, attrs[0]]) + config.set_main_option('script_location', script_location) + self.configs.append(config) + entrypoint = pkg_resources.EntryPoint(project, + module_name, + attrs=attrs) + cli.migration_entrypoints[project] = entrypoint + def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs=[{}]): - with mock.patch.object(sys, 'argv', argv), mock.patch.object( - cli, 'run_sanity_checks'): + with mock.patch.object(sys, 'argv', argv),\ + mock.patch.object(cli, 'run_sanity_checks'),\ + mock.patch.object(cli, 'validate_labels'): + cli.main() self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, *exp_args, **kwargs) @@ -112,17 +170,20 @@ class TestCli(base.BaseTestCase): def test_check_migration(self): with mock.patch.object(cli, 'validate_heads_file') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') - validate.assert_called_once_with(mock.ANY) + self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): - with mock.patch.object(cli, 'update_heads_file') as update: - fake_config = FakeConfig() + with mock.patch.object(cli, 'update_heads_file') as update,\ + mock.patch.object(cli, '_use_separate_migration_branches', + return_value=separate_branches): if separate_branches: + mock.patch('os.path.exists').start() expected_kwargs = [ {'message': 'message', 'sql': False, 'autogenerate': True, 'version_path': - cli._get_version_branch_path(fake_config, branch), + cli._get_version_branch_path(config, branch), 'head': cli._get_branch_head(branch)} + for config in self.configs for branch in cli.MIGRATION_BRANCHES] else: expected_kwargs = [{ @@ -133,7 +194,7 @@ class TestCli(base.BaseTestCase): 'revision', (), expected_kwargs ) - update.assert_called_once_with(mock.ANY) + self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: @@ -145,14 +206,12 @@ class TestCli(base.BaseTestCase): 'revision', (), expected_kwargs ) - update.assert_called_once_with(mock.ANY) + self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() - @mock.patch.object(cli, '_use_separate_migration_branches', - return_value=False) - def test_database_sync_revision_no_branches(self, *args): + def test_database_sync_revision_no_branches(self): # Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) @@ -201,8 +260,10 @@ class TestCli(base.BaseTestCase): branchless=False): if file_heads is None: file_heads = [] - fake_config = FakeConfig() - with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fake_config = self.configs[0] + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\ + mock.patch.object(cli, '_use_separate_migration_branches', + return_value=not branchless): fc.return_value.get_heads.return_value = heads with mock.patch('six.moves.builtins.open') as mock_open: mock_open.return_value.__enter__ = lambda s: s @@ -210,24 +271,23 @@ class TestCli(base.BaseTestCase): mock_open.return_value.read.return_value = ( '\n'.join(file_heads)) - with mock.patch('os.path.isfile') as is_file: - is_file.return_value = bool(file_heads) + if all(head in file_heads for head in heads): + cli.validate_heads_file(fake_config) + else: + self.assertRaises( + SystemExit, + cli.validate_heads_file, + fake_config + ) + self.assertTrue(self.mock_alembic_err.called) - if all(head in file_heads for head in heads): - cli.validate_heads_file(fake_config) - else: - self.assertRaises( - SystemExit, - cli.validate_heads_file, - fake_config - ) - self.mock_alembic_err.assert_called_once_with(mock.ANY) if branchless: mock_open.assert_called_with( cli._get_head_file_path(fake_config)) else: mock_open.assert_called_with( cli._get_heads_file_path(fake_config)) + fc.assert_called_once_with(fake_config) def test_validate_heads_file_multiple_heads(self): @@ -260,22 +320,13 @@ class TestCli(base.BaseTestCase): mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - cli.update_heads_file(mock.sentinel.config) + cli.update_heads_file(self.configs[0]) mock_open.return_value.write.assert_called_once_with( '\n'.join(sorted(heads))) - def test_update_heads_file_excessive_heads_negative(self): - with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: - heads = ('b', 'a', 'c', 'kilo') - fc.return_value.get_heads.return_value = heads - self.assertRaises( - SystemExit, - cli.update_heads_file, - mock.sentinel.config - ) - self.mock_alembic_err.assert_called_once_with(mock.ANY) - - def test_update_heads_file_success(self): + @mock.patch('os.path.exists') + @mock.patch('os.remove') + def test_update_heads_file_success(self, *os_mocks): with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: heads = ('a', 'b') fc.return_value.get_heads.return_value = heads @@ -283,6 +334,129 @@ class TestCli(base.BaseTestCase): mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - cli.update_heads_file(mock.sentinel.config) + cli.update_heads_file(self.configs[0]) mock_open.return_value.write.assert_called_once_with( '\n'.join(heads)) + + old_head_file = cli._get_head_file_path(self.configs[0]) + for mock_ in os_mocks: + mock_.assert_called_with(old_head_file) + + def test_get_project_base(self): + config = alembic_config.Config() + config.set_main_option('script_location', 'a.b.c:d') + proj_base = cli._get_project_base(config) + self.assertEqual('a', proj_base) + + def test_get_root_versions_dir(self): + config = alembic_config.Config() + config.set_main_option('script_location', 'a.b.c:d') + versions_dir = cli._get_root_versions_dir(config) + self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) + + def test_get_subproject_script_location(self): + foo_ep = cli._get_subproject_script_location('networking-foo') + expected = 'networking_foo.db.migration:alembic_migrations' + self.assertEqual(expected, foo_ep) + + def test_get_subproject_script_location_not_installed(self): + self.assertRaises( + SystemExit, cli._get_subproject_script_location, 'not-installed') + + def test_get_service_script_location(self): + fwaas_ep = cli._get_service_script_location('fwaas') + expected = 'neutron_fwaas.db.migration:alembic_migrations' + self.assertEqual(expected, fwaas_ep) + + def test_get_service_script_location_not_installed(self): + self.assertRaises( + SystemExit, cli._get_service_script_location, 'myaas') + + def test_get_subproject_base_not_installed(self): + self.assertRaises( + SystemExit, cli._get_subproject_base, 'not-installed') + + def test__compare_labels_ok(self): + labels = {'label1', 'label2'} + fake_revision = FakeRevision(labels) + cli._compare_labels(fake_revision, {'label1', 'label2'}) + + def test__compare_labels_fail_unexpected_labels(self): + labels = {'label1', 'label2', 'label3'} + fake_revision = FakeRevision(labels) + self.assertRaises( + SystemExit, + cli._compare_labels, fake_revision, {'label1', 'label2'}) + + @mock.patch.object(cli, '_compare_labels') + def test__validate_single_revision_labels_branchless_fail_different_labels( + self, compare_mock): + + fake_down_revision = FakeRevision() + fake_revision = FakeRevision(down_revision=fake_down_revision) + + script_dir = mock.Mock() + script_dir.get_revision.return_value = fake_down_revision + cli._validate_single_revision_labels(script_dir, fake_revision, + label=None) + + expected_labels = set() + compare_mock.assert_has_calls( + [mock.call(revision, expected_labels) + for revision in (fake_revision, fake_down_revision)] + ) + + @mock.patch.object(cli, '_compare_labels') + def test__validate_single_revision_labels_branches_fail_different_labels( + self, compare_mock): + + fake_down_revision = FakeRevision() + fake_revision = FakeRevision(down_revision=fake_down_revision) + + script_dir = mock.Mock() + script_dir.get_revision.return_value = fake_down_revision + cli._validate_single_revision_labels( + script_dir, fake_revision, label='fakebranch') + + expected_labels = {'fakebranch'} + compare_mock.assert_has_calls( + [mock.call(revision, expected_labels) + for revision in (fake_revision, fake_down_revision)] + ) + + @mock.patch.object(cli, '_validate_single_revision_labels') + def test__validate_revision_validates_branches(self, validate_mock): + script_dir = mock.Mock() + fake_revision = FakeRevision() + branch = cli.MIGRATION_BRANCHES[0] + fake_revision.path = os.path.join('/fake/path', branch) + cli._validate_revision(script_dir, fake_revision) + validate_mock.assert_called_with( + script_dir, fake_revision, label=branch) + + @mock.patch.object(cli, '_validate_single_revision_labels') + def test__validate_revision_validates_branchless_migrations( + self, validate_mock): + + script_dir = mock.Mock() + fake_revision = FakeRevision() + cli._validate_revision(script_dir, fake_revision) + validate_mock.assert_called_with(script_dir, fake_revision) + + @mock.patch.object(cli, '_validate_revision') + @mock.patch('alembic.script.ScriptDirectory.walk_revisions') + def test_validate_labels_walks_thru_all_revisions( + self, walk_mock, validate_mock): + + revisions = [mock.Mock() for i in range(10)] + walk_mock.return_value = revisions + cli.validate_labels(self.configs[0]) + validate_mock.assert_has_calls( + [mock.call(mock.ANY, revision) for revision in revisions] + ) + + +class TestSafetyChecks(base.BaseTestCase): + + def test_validate_labels(self, *mocks): + cli.validate_labels(cli.get_neutron_config()) diff --git a/neutron/tests/unit/extensions/foxinsocks.py b/neutron/tests/unit/extensions/foxinsocks.py index 39d2bd829bb..88908a4902c 100644 --- a/neutron/tests/unit/extensions/foxinsocks.py +++ b/neutron/tests/unit/extensions/foxinsocks.py @@ -77,7 +77,7 @@ class Foxinsocks(object): # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:googoose'] = req.GET.get('chewing') - res.body = jsonutils.dumps(data) + res.body = jsonutils.dumps(data).encode('utf-8') return res req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', @@ -89,7 +89,7 @@ class Foxinsocks(object): # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:big_bands'] = 'Pig Bands!' - res.body = jsonutils.dumps(data) + res.body = jsonutils.dumps(data).encode('utf-8') return res req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', diff --git a/neutron/tests/unit/extensions/test_dns.py b/neutron/tests/unit/extensions/test_dns.py new file mode 100644 index 00000000000..797da83af57 --- /dev/null +++ b/neutron/tests/unit/extensions/test_dns.py @@ -0,0 +1,469 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import math +import netaddr + +from oslo_config import cfg + +from neutron.common import constants +from neutron.common import utils +from neutron import context +from neutron.db import db_base_plugin_v2 +from neutron.extensions import dns +from neutron.tests.unit.db import test_db_base_plugin_v2 + + +class DnsExtensionManager(object): + + def get_resources(self): + return [] + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + def get_extended_resources(self, version): + return dns.get_extended_resources(version) + + +class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2): + """Test plugin to mixin the DNS Integration extensions. + """ + + supported_extension_aliases = ["dns-integration"] + + +class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): + """Test API extension dns attributes. + """ + + def setUp(self): + plugin = ('neutron.tests.unit.extensions.test_dns.' + + 'DnsExtensionTestPlugin') + ext_mgr = DnsExtensionManager() + super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def _create_port(self, fmt, net_id, expected_res_status=None, + arg_list=None, **kwargs): + data = {'port': {'network_id': net_id, + 'tenant_id': self._tenant_id}} + + for arg in (('admin_state_up', 'device_id', + 'mac_address', 'name', 'fixed_ips', + 'tenant_id', 'device_owner', 'security_groups', + 'dns_name') + (arg_list or ())): + # Arg must be present + if arg in kwargs: + data['port'][arg] = kwargs[arg] + # create a dhcp port device id if one hasn't been supplied + if ('device_owner' in kwargs and + kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and + 'host' in kwargs and + 'device_id' not in kwargs): + device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) + data['port']['device_id'] = device_id + port_req = self.new_create_request('ports', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + port_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + port_res = port_req.get_response(self.api) + if expected_res_status: + self.assertEqual(port_res.status_int, expected_res_status) + return port_res + + def _test_list_resources(self, resource, items, neutron_context=None, + query_params=None): + res = self._list('%ss' % resource, + neutron_context=neutron_context, + query_params=query_params) + resource = resource.replace('-', '_') + self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], + [i[resource]['id'] for i in items]) + return res + + def test_create_port_json(self): + keys = [('admin_state_up', True), ('status', self.port_create_status)] + with self.port(name='myname') as port: + for k, v in keys: + self.assertEqual(port['port'][k], v) + self.assertIn('mac_address', port['port']) + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual('myname', port['port']['name']) + self._verify_dns_assigment(port['port'], + ips_list=['10.0.0.2']) + + def test_list_ports(self): + # for this test we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + with self.port() as v1, self.port() as v2, self.port() as v3: + ports = (v1, v2, v3) + res = self._test_list_resources('port', ports) + for port in res['ports']: + self._verify_dns_assigment( + port, ips_list=[port['fixed_ips'][0]['ip_address']]) + + def test_show_port(self): + with self.port() as port: + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port']['id'], sport['port']['id']) + self._verify_dns_assigment( + sport['port'], + ips_list=[sport['port']['fixed_ips'][0]['ip_address']]) + + def test_update_port_non_default_dns_domain_with_dns_name(self): + with self.port() as port: + cfg.CONF.set_override('dns_domain', 'example.com') + data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + self._verify_dns_assigment(res['port'], + ips_list=['10.0.0.2'], + dns_name='vm1') + + def test_update_port_default_dns_domain_with_dns_name(self): + with self.port() as port: + data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + self._verify_dns_assigment(res['port'], + ips_list=['10.0.0.2']) + + def _verify_dns_assigment(self, port, ips_list=[], exp_ips_ipv4=0, + exp_ips_ipv6=0, ipv4_cidrs=[], ipv6_cidrs=[], + dns_name=''): + self.assertEqual(port['dns_name'], dns_name) + dns_assignment = port['dns_assignment'] + if ips_list: + self.assertEqual(len(dns_assignment), len(ips_list)) + ips_set = set(ips_list) + else: + self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6) + ipv4_count = 0 + ipv6_count = 0 + subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs] + subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs] + + request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn( + dns_name) + for assignment in dns_assignment: + if ips_list: + self.assertIn(assignment['ip_address'], ips_set) + ips_set.remove(assignment['ip_address']) + else: + ip = netaddr.IPAddress(assignment['ip_address']) + if ip.version == 4: + self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4)) + ipv4_count += 1 + else: + self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6)) + ipv6_count += 1 + hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name, + request_fqdn, + assignment) + self.assertEqual(assignment['hostname'], hostname) + self.assertEqual(assignment['fqdn'], fqdn) + if ips_list: + self.assertFalse(ips_set) + else: + self.assertEqual(ipv4_count, exp_ips_ipv4) + self.assertEqual(ipv6_count, exp_ips_ipv6) + + def _get_dns_domain(self): + if not cfg.CONF.dns_domain: + return '' + if cfg.CONF.dns_domain.endswith('.'): + return cfg.CONF.dns_domain + return '%s.' % cfg.CONF.dns_domain + + def _get_request_hostname_and_fqdn(self, dns_name): + request_dns_name = '' + request_fqdn = '' + dns_domain = self._get_dns_domain() + if dns_name and dns_domain and dns_domain != 'openstacklocal.': + request_dns_name = dns_name + request_fqdn = request_dns_name + if not request_dns_name.endswith('.'): + request_fqdn = '%s.%s' % (dns_name, dns_domain) + return request_dns_name, request_fqdn + + def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn, + assignment): + dns_domain = self._get_dns_domain() + if request_dns_name: + hostname = request_dns_name + fqdn = request_fqdn + else: + hostname = 'host-%s' % assignment['ip_address'].replace( + '.', '-').replace(':', '-') + fqdn = hostname + if dns_domain: + fqdn = '%s.%s' % (hostname, dns_domain) + return hostname, fqdn + + def _verify_ip_in_subnet(self, ip, subnets_list): + for subnet in subnets_list: + if ip in subnet: + return True + return False + + def test_update_port_update_ip(self): + """Test update of port IP. + + Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. + """ + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + data = {'port': {'fixed_ips': [{'subnet_id': + subnet['subnet']['id'], + 'ip_address': "10.0.0.10"}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.10') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10']) + + def test_update_port_update_ip_address_only(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + data = {'port': {'fixed_ips': [{'subnet_id': + subnet['subnet']['id'], + 'ip_address': "10.0.0.10"}, + {'ip_address': "10.0.0.2"}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + self.assertIn({'ip_address': '10.0.0.2', + 'subnet_id': subnet['subnet']['id']}, ips) + self.assertIn({'ip_address': '10.0.0.10', + 'subnet_id': subnet['subnet']['id']}, ips) + self._verify_dns_assigment(res['port'], + ips_list=['10.0.0.10', + '10.0.0.2']) + + def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period( + self): + cfg.CONF.set_override('dns_domain', 'example.com') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name='vm1') + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period( + self): + cfg.CONF.set_override('dns_domain', 'example.com.') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name='vm1') + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain( + self): + cfg.CONF.set_override('dns_domain', '') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period( + self): + cfg.CONF.set_override('dns_domain', 'example.com') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name='vm1.example.com.') + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period( + self): + cfg.CONF.set_override('dns_domain', 'example.com.') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name='vm1.example.com.') + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period( + self): + cfg.CONF.set_override('dns_domain', 'openstacklocal.') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() + self.assertEqual(res.status_code, 201) + + def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain( + self): + cfg.CONF.set_override('dns_domain', 'example.com') + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name='vm1.bad-domain.com.') + self.assertEqual(res.status_code, 400) + expected_error = ('The dns_name passed is a FQDN. Its higher level ' + 'labels must be equal to the dns_domain option in ' + 'neutron.conf') + self.assertIn(expected_error, res.text) + + def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain( + self): + cfg.CONF.set_override('dns_domain', 'example.com') + num_labels = int( + math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN)) + filler_len = int( + math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN)) + dns_name = (('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * + num_labels + 'a' * filler_len) + res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( + dns_name=dns_name) + self.assertEqual(res.status_code, 400) + expected_error = ("When the two are concatenated to form a FQDN " + "(with a '.' at the end), the resulting length " + "exceeds the maximum size") + self.assertIn(expected_error, res.text) + + def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self, + dns_name=''): + """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + sub_dicts = [ + {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', + 'ip_version': 4, 'ra_addr_mode': None}, + {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', + 'ip_version': 4, 'ra_addr_mode': None}, + {'gateway': 'fe80::1', 'cidr': 'fe80::/64', + 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, + {'gateway': 'fe81::1', 'cidr': 'fe81::/64', + 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, + {'gateway': 'fe82::1', 'cidr': 'fe82::/64', + 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, + {'gateway': 'fe83::1', 'cidr': 'fe83::/64', + 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] + subnets = {} + for sub_dict in sub_dicts: + subnet = self._make_subnet( + self.fmt, network, + gateway=sub_dict['gateway'], + cidr=sub_dict['cidr'], + ip_version=sub_dict['ip_version'], + ipv6_ra_mode=sub_dict['ra_addr_mode'], + ipv6_address_mode=sub_dict['ra_addr_mode']) + subnets[subnet['subnet']['id']] = sub_dict + res = self._create_port(self.fmt, net_id=network['network']['id'], + dns_name=dns_name) + if res.status_code != 201: + return res + port = self.deserialize(self.fmt, res) + # Since the create port request was made without a list of fixed IPs, + # the port should be associated with addresses for one of the + # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 + # SLAAC subnets. + self.assertEqual(4, len(port['port']['fixed_ips'])) + addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, + constants.IPV6_SLAAC: 0} + for fixed_ip in port['port']['fixed_ips']: + subnet_id = fixed_ip['subnet_id'] + if subnet_id in subnets: + addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 + self.assertEqual(1, addr_mode_count[None]) + self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) + self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) + self._verify_dns_assigment(port['port'], exp_ips_ipv4=1, + exp_ips_ipv6=1, + ipv4_cidrs=[sub_dicts[0]['cidr'], + sub_dicts[1]['cidr']], + ipv6_cidrs=[sub_dicts[4]['cidr'], + sub_dicts[5]['cidr']], + dns_name=dns_name) + return res + + def test_api_extension_validation_with_bad_dns_names(self): + num_labels = int( + math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN)) + filler_len = int( + math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN)) + dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-', + '-vm01.test1', 'vm01.-test1', 'vm01._test1', + 'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.', + 'vm01.123.', '-' + 'a' * dns.DNS_LABEL_MAX_LEN, + 'a' * (dns.DNS_LABEL_MAX_LEN + 1), + ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * + num_labels + 'a' * (filler_len + 1)] + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', + 'ip_version': 4, 'ra_addr_mode': None} + self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], + cidr=sub_dict['cidr'], + ip_version=sub_dict['ip_version'], + ipv6_ra_mode=sub_dict['ra_addr_mode'], + ipv6_address_mode=sub_dict['ra_addr_mode']) + for dns_name in dns_names: + res = self._create_port(self.fmt, net_id=network['network']['id'], + dns_name=dns_name) + self.assertEqual(res.status_code, 400) + is_expected_message = ( + 'cannot be converted to lowercase string' in res.text or + 'not a valid PQDN or FQDN. Reason:' in res.text) + self.assertTrue(is_expected_message) + + def test_api_extension_validation_with_good_dns_names(self): + cfg.CONF.set_override('dns_domain', 'example.com') + higher_labels_len = len('example.com.') + num_labels = int( + math.floor((dns.FQDN_MAX_LEN - higher_labels_len) / + dns.DNS_LABEL_MAX_LEN)) + filler_len = int( + math.floor((dns.FQDN_MAX_LEN - higher_labels_len) % + dns.DNS_LABEL_MAX_LEN)) + dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.', + '8vm01', 'vm-01.example.com.', 'vm01.test', + 'vm01.test.example.com.', 'vm01.test-100', + 'vm01.test-100.example.com.', + 'a' * dns.DNS_LABEL_MAX_LEN, + ('a' * dns.DNS_LABEL_MAX_LEN) + '.example.com.', + ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * + num_labels + 'a' * (filler_len - 1)] + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', + 'ip_version': 4, 'ra_addr_mode': None} + self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], + cidr=sub_dict['cidr'], + ip_version=sub_dict['ip_version'], + ipv6_ra_mode=sub_dict['ra_addr_mode'], + ipv6_address_mode=sub_dict['ra_addr_mode']) + for dns_name in dns_names: + res = self._create_port(self.fmt, net_id=network['network']['id'], + dns_name=dns_name) + self.assertEqual(res.status_code, 201) diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index 77e55682b97..7c6f47cad38 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -2477,6 +2477,23 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): fip['floatingip']['floating_ip_address']) self.assertEqual(floating_ip.version, 4) + def test_create_router_gateway_fails(self): + # Force _update_router_gw_info failure + plugin = manager.NeutronManager.get_service_plugins()[ + service_constants.L3_ROUTER_NAT] + ctx = context.Context('', 'foo') + plugin._update_router_gw_info = mock.Mock( + side_effect=n_exc.NeutronException) + data = {'router': { + 'name': 'router1', 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'some_uuid'}}} + + # Verify router doesn't persist on failure + self.assertRaises(n_exc.NeutronException, + plugin.create_router, ctx, data) + routers = plugin.get_routers(ctx) + self.assertEqual(0, len(routers)) + class L3AgentDbTestCaseBase(L3NatTestCaseMixin): diff --git a/neutron/tests/unit/extensions/test_quotasv2.py b/neutron/tests/unit/extensions/test_quotasv2.py index e0780e1ee78..8e0e55b3462 100644 --- a/neutron/tests/unit/extensions/test_quotasv2.py +++ b/neutron/tests/unit/extensions/test_quotasv2.py @@ -344,6 +344,24 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase): extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) + def test_make_reservation_resource_unknown_raises(self): + tenant_id = 'tenant_id1' + self.assertRaises(exceptions.QuotaResourceUnknown, + quota.QUOTAS.make_reservation, + context.get_admin_context(load_admin_roles=False), + tenant_id, + {'foobar': 1}, + plugin=None) + + def test_make_reservation_negative_delta_raises(self): + tenant_id = 'tenant_id1' + self.assertRaises(exceptions.InvalidQuotaValue, + quota.QUOTAS.make_reservation, + context.get_admin_context(load_admin_roles=False), + tenant_id, + {'network': -1}, + plugin=None) + class QuotaExtensionCfgTestCase(QuotaExtensionTestCase): fmt = 'json' diff --git a/neutron/tests/unit/notifiers/test_nova.py b/neutron/tests/unit/notifiers/test_nova.py index 15727b5aec5..0bb9693645c 100644 --- a/neutron/tests/unit/notifiers/test_nova.py +++ b/neutron/tests/unit/notifiers/test_nova.py @@ -290,3 +290,18 @@ class TestNovaNotify(base.BaseTestCase): self.nova_notifier.batch_notifier.pending_events[0], event_dis) self.assertEqual( self.nova_notifier.batch_notifier.pending_events[1], event_assoc) + + def test_delete_port_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' + returned_obj = {'port': + {'device_owner': 'compute:dfd', + 'id': port_id, + 'device_id': device_id}} + + expected_event = {'server_uuid': device_id, + 'name': nova.VIF_DELETED, + 'tag': port_id} + event = self.nova_notifier.create_port_changed_event('delete_port', + {}, returned_obj) + self.assertEqual(expected_event, event) diff --git a/neutron/tests/unit/objects/__init__.py b/neutron/tests/unit/objects/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/objects/qos/__init__.py b/neutron/tests/unit/objects/qos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/objects/qos/test_policy.py b/neutron/tests/unit/objects/qos/test_policy.py new file mode 100644 index 00000000000..6b29b06bb59 --- /dev/null +++ b/neutron/tests/unit/objects/qos/test_policy.py @@ -0,0 +1,295 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.common import exceptions as n_exc +from neutron.db import api as db_api +from neutron.db import models_v2 +from neutron.objects.qos import policy +from neutron.objects.qos import rule +from neutron.tests.unit.objects import test_base +from neutron.tests.unit import testlib_api + + +class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase): + + _test_class = policy.QosPolicy + + def setUp(self): + super(QosPolicyObjectTestCase, self).setUp() + # qos_policy_ids will be incorrect, but we don't care in this test + self.db_qos_bandwidth_rules = [ + self.get_random_fields(rule.QosBandwidthLimitRule) + for _ in range(3)] + + self.model_map = { + self._test_class.db_model: self.db_objs, + rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules} + + def fake_get_objects(self, context, model, **kwargs): + return self.model_map[model] + + def fake_get_object(self, context, model, id): + objects = self.model_map[model] + return [obj for obj in objects if obj['id'] == id][0] + + def test_get_objects(self): + admin_context = self.context.elevated() + with mock.patch.object( + db_api, 'get_objects', + side_effect=self.fake_get_objects) as get_objects_mock: + + with mock.patch.object( + db_api, 'get_object', + side_effect=self.fake_get_object): + + with mock.patch.object( + self.context, + 'elevated', + return_value=admin_context) as context_mock: + + objs = self._test_class.get_objects(self.context) + context_mock.assert_called_once_with() + get_objects_mock.assert_any_call( + admin_context, self._test_class.db_model) + self._validate_objects(self.db_objs, objs) + + def test_get_objects_valid_fields(self): + admin_context = self.context.elevated() + + with mock.patch.object( + db_api, 'get_objects', + return_value=[self.db_obj]) as get_objects_mock: + + with mock.patch.object( + self.context, + 'elevated', + return_value=admin_context) as context_mock: + + objs = self._test_class.get_objects( + self.context, + **self.valid_field_filter) + context_mock.assert_called_once_with() + get_objects_mock.assert_any_call( + admin_context, self._test_class.db_model, + **self.valid_field_filter) + self._validate_objects([self.db_obj], objs) + + def test_get_by_id(self): + admin_context = self.context.elevated() + with mock.patch.object(db_api, 'get_object', + return_value=self.db_obj) as get_object_mock: + with mock.patch.object(self.context, + 'elevated', + return_value=admin_context) as context_mock: + obj = self._test_class.get_by_id(self.context, id='fake_id') + self.assertTrue(self._is_test_class(obj)) + self.assertEqual(self.db_obj, test_base.get_obj_db_fields(obj)) + context_mock.assert_called_once_with() + get_object_mock.assert_called_once_with( + admin_context, self._test_class.db_model, id='fake_id') + + +class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = policy.QosPolicy + + def setUp(self): + super(QosPolicyDbObjectTestCase, self).setUp() + self._create_test_network() + self._create_test_port(self._network) + + def _create_test_policy(self): + policy_obj = policy.QosPolicy(self.context, **self.db_obj) + policy_obj.create() + return policy_obj + + def _create_test_policy_with_rule(self): + policy_obj = self._create_test_policy() + + rule_fields = self.get_random_fields( + obj_cls=rule.QosBandwidthLimitRule) + rule_fields['qos_policy_id'] = policy_obj.id + + rule_obj = rule.QosBandwidthLimitRule(self.context, **rule_fields) + rule_obj.create() + + return policy_obj, rule_obj + + def _create_test_network(self): + # TODO(ihrachys): replace with network.create() once we get an object + # implementation for networks + self._network = db_api.create_object(self.context, models_v2.Network, + {'name': 'test-network1'}) + + def _create_test_port(self, network): + # TODO(ihrachys): replace with port.create() once we get an object + # implementation for ports + self._port = db_api.create_object(self.context, models_v2.Port, + {'name': 'test-port1', + 'network_id': network['id'], + 'mac_address': 'fake_mac', + 'admin_state_up': True, + 'status': 'ACTIVE', + 'device_id': 'fake_device', + 'device_owner': 'fake_owner'}) + + def test_attach_network_get_network_policy(self): + + obj = self._create_test_policy() + + policy_obj = policy.QosPolicy.get_network_policy(self.context, + self._network['id']) + self.assertIsNone(policy_obj) + + # Now attach policy and repeat + obj.attach_network(self._network['id']) + + policy_obj = policy.QosPolicy.get_network_policy(self.context, + self._network['id']) + self.assertEqual(obj, policy_obj) + + def test_attach_network_nonexistent_network(self): + + obj = self._create_test_policy() + self.assertRaises(n_exc.NetworkQosBindingNotFound, + obj.attach_network, 'non-existent-network') + + def test_attach_port_nonexistent_port(self): + + obj = self._create_test_policy() + self.assertRaises(n_exc.PortQosBindingNotFound, + obj.attach_port, 'non-existent-port') + + def test_attach_network_nonexistent_policy(self): + + policy_obj = policy.QosPolicy(self.context, **self.db_obj) + self.assertRaises(n_exc.NetworkQosBindingNotFound, + policy_obj.attach_network, self._network['id']) + + def test_attach_port_nonexistent_policy(self): + + policy_obj = policy.QosPolicy(self.context, **self.db_obj) + self.assertRaises(n_exc.PortQosBindingNotFound, + policy_obj.attach_port, self._port['id']) + + def test_attach_port_get_port_policy(self): + + obj = self._create_test_policy() + + policy_obj = policy.QosPolicy.get_network_policy(self.context, + self._network['id']) + + self.assertIsNone(policy_obj) + + # Now attach policy and repeat + obj.attach_port(self._port['id']) + + policy_obj = policy.QosPolicy.get_port_policy(self.context, + self._port['id']) + self.assertEqual(obj, policy_obj) + + def test_detach_port(self): + obj = self._create_test_policy() + obj.attach_port(self._port['id']) + obj.detach_port(self._port['id']) + + policy_obj = policy.QosPolicy.get_port_policy(self.context, + self._port['id']) + self.assertIsNone(policy_obj) + + def test_detach_network(self): + obj = self._create_test_policy() + obj.attach_network(self._network['id']) + obj.detach_network(self._network['id']) + + policy_obj = policy.QosPolicy.get_network_policy(self.context, + self._network['id']) + self.assertIsNone(policy_obj) + + def test_detach_port_nonexistent_port(self): + obj = self._create_test_policy() + self.assertRaises(n_exc.PortQosBindingNotFound, + obj.detach_port, 'non-existent-port') + + def test_detach_network_nonexistent_network(self): + obj = self._create_test_policy() + self.assertRaises(n_exc.NetworkQosBindingNotFound, + obj.detach_network, 'non-existent-port') + + def test_detach_port_nonexistent_policy(self): + policy_obj = policy.QosPolicy(self.context, **self.db_obj) + self.assertRaises(n_exc.PortQosBindingNotFound, + policy_obj.detach_port, self._port['id']) + + def test_detach_network_nonexistent_policy(self): + policy_obj = policy.QosPolicy(self.context, **self.db_obj) + self.assertRaises(n_exc.NetworkQosBindingNotFound, + policy_obj.detach_network, self._network['id']) + + def test_synthetic_rule_fields(self): + policy_obj, rule_obj = self._create_test_policy_with_rule() + policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id) + self.assertEqual([rule_obj], policy_obj.rules) + + def test_get_by_id_fetches_rules_non_lazily(self): + policy_obj, rule_obj = self._create_test_policy_with_rule() + policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id) + + primitive = policy_obj.obj_to_primitive() + self.assertNotEqual([], (primitive['versioned_object.data']['rules'])) + + def test_to_dict_returns_rules_as_dicts(self): + policy_obj, rule_obj = self._create_test_policy_with_rule() + policy_obj = policy.QosPolicy.get_by_id(self.context, policy_obj.id) + + obj_dict = policy_obj.to_dict() + rule_dict = rule_obj.to_dict() + + # first make sure that to_dict() is still sane and does not return + # objects + for obj in (rule_dict, obj_dict): + self.assertIsInstance(obj, dict) + + self.assertEqual(rule_dict, obj_dict['rules'][0]) + + def test_shared_default(self): + self.db_obj.pop('shared') + obj = self._test_class(self.context, **self.db_obj) + self.assertEqual(False, obj.shared) + + def test_delete_not_allowed_if_policy_in_use_by_port(self): + obj = self._create_test_policy() + obj.attach_port(self._port['id']) + + self.assertRaises(n_exc.QosPolicyInUse, obj.delete) + + obj.detach_port(self._port['id']) + obj.delete() + + def test_delete_not_allowed_if_policy_in_use_by_network(self): + obj = self._create_test_policy() + obj.attach_network(self._network['id']) + + self.assertRaises(n_exc.QosPolicyInUse, obj.delete) + + obj.detach_network(self._network['id']) + obj.delete() + + def test_reload_rules_reloads_rules(self): + policy_obj, rule_obj = self._create_test_policy_with_rule() + self.assertEqual([], policy_obj.rules) + + policy_obj.reload_rules() + self.assertEqual([rule_obj], policy_obj.rules) diff --git a/neutron/tests/unit/objects/qos/test_rule.py b/neutron/tests/unit/objects/qos/test_rule.py new file mode 100644 index 00000000000..5edc812167a --- /dev/null +++ b/neutron/tests/unit/objects/qos/test_rule.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.objects.qos import policy +from neutron.objects.qos import rule +from neutron.services.qos import qos_consts +from neutron.tests.unit.objects import test_base +from neutron.tests.unit import testlib_api + + +class QosBandwidthLimitRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): + + _test_class = rule.QosBandwidthLimitRule + + def test_to_dict_returns_type(self): + obj = rule.QosBandwidthLimitRule(self.context, **self.db_obj) + dict_ = obj.to_dict() + self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, dict_['type']) + + +class QosBandwidthLimitRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = rule.QosBandwidthLimitRule + + def setUp(self): + super(QosBandwidthLimitRuleDbObjectTestCase, self).setUp() + + # Prepare policy to be able to insert a rule + generated_qos_policy_id = self.db_obj['qos_policy_id'] + policy_obj = policy.QosPolicy(self.context, + id=generated_qos_policy_id) + policy_obj.create() diff --git a/neutron/tests/unit/objects/qos/test_rule_type.py b/neutron/tests/unit/objects/qos/test_rule_type.py new file mode 100644 index 00000000000..b9a31590395 --- /dev/null +++ b/neutron/tests/unit/objects/qos/test_rule_type.py @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# rule types are so different from other objects that we don't base the test +# class on the common base class for all objects + +import mock + +from neutron import manager +from neutron.objects.qos import rule_type +from neutron.services.qos import qos_consts +from neutron.tests import base as test_base + + +DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + + +class QosRuleTypeObjectTestCase(test_base.BaseTestCase): + + def setUp(self): + self.config_parse() + self.setup_coreplugin(DB_PLUGIN_KLASS) + super(QosRuleTypeObjectTestCase, self).setUp() + + def test_get_objects(self): + core_plugin = manager.NeutronManager.get_plugin() + rule_types_mock = mock.PropertyMock( + return_value=qos_consts.VALID_RULE_TYPES) + with mock.patch.object(core_plugin, 'supported_qos_rule_types', + new_callable=rule_types_mock, + create=True): + types = rule_type.QosRuleType.get_objects() + self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES), + sorted(type_['type'] for type_ in types)) + + def test_wrong_type(self): + self.assertRaises(ValueError, rule_type.QosRuleType, type='bad_type') diff --git a/neutron/tests/unit/objects/test_base.py b/neutron/tests/unit/objects/test_base.py new file mode 100644 index 00000000000..aa9059422c0 --- /dev/null +++ b/neutron/tests/unit/objects/test_base.py @@ -0,0 +1,353 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import random +import string + +import mock +from oslo_db import exception as obj_exc +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields + +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as db_api +from neutron.objects import base +from neutron.tests import base as test_base + + +SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl' +OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.' + 'VersionedObject.obj_from_primitive') + + +class FakeModel(object): + def __init__(self, *args, **kwargs): + pass + + +@obj_base.VersionedObjectRegistry.register +class FakeNeutronObject(base.NeutronDbObject): + + db_model = FakeModel + + fields = { + 'id': obj_fields.UUIDField(), + 'field1': obj_fields.StringField(), + 'field2': obj_fields.StringField() + } + + fields_no_update = ['id'] + + synthetic_fields = ['field2'] + + +def _random_string(n=10): + return ''.join(random.choice(string.ascii_lowercase) for _ in range(n)) + + +def _random_boolean(): + return bool(random.getrandbits(1)) + + +def _random_integer(): + return random.randint(0, 1000) + + +FIELD_TYPE_VALUE_GENERATOR_MAP = { + obj_fields.BooleanField: _random_boolean, + obj_fields.IntegerField: _random_integer, + obj_fields.StringField: _random_string, + obj_fields.UUIDField: _random_string, + obj_fields.ListOfObjectsField: lambda: [] +} + + +def get_obj_db_fields(obj): + return {field: getattr(obj, field) for field in obj.fields + if field not in obj.synthetic_fields} + + +class _BaseObjectTestCase(object): + + _test_class = FakeNeutronObject + + def setUp(self): + super(_BaseObjectTestCase, self).setUp() + self.context = context.get_admin_context() + self.db_objs = list(self.get_random_fields() for _ in range(3)) + self.db_obj = self.db_objs[0] + + valid_field = [f for f in self._test_class.fields + if f not in self._test_class.synthetic_fields][0] + self.valid_field_filter = {valid_field: self.db_obj[valid_field]} + + @classmethod + def get_random_fields(cls, obj_cls=None): + obj_cls = obj_cls or cls._test_class + fields = {} + for field, field_obj in obj_cls.fields.items(): + if field not in obj_cls.synthetic_fields: + generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] + fields[field] = generator() + return fields + + def get_updatable_fields(self, fields): + return base.get_updatable_fields(self._test_class, fields) + + @classmethod + def _is_test_class(cls, obj): + return isinstance(obj, cls._test_class) + + +class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase): + + def test_get_by_id(self): + with mock.patch.object(db_api, 'get_object', + return_value=self.db_obj) as get_object_mock: + obj = self._test_class.get_by_id(self.context, id='fake_id') + self.assertTrue(self._is_test_class(obj)) + self.assertEqual(self.db_obj, get_obj_db_fields(obj)) + get_object_mock.assert_called_once_with( + self.context, self._test_class.db_model, id='fake_id') + + def test_get_by_id_missing_object(self): + with mock.patch.object(db_api, 'get_object', return_value=None): + obj = self._test_class.get_by_id(self.context, id='fake_id') + self.assertIsNone(obj) + + def test_get_objects(self): + with mock.patch.object(db_api, 'get_objects', + return_value=self.db_objs) as get_objects_mock: + objs = self._test_class.get_objects(self.context) + self._validate_objects(self.db_objs, objs) + get_objects_mock.assert_called_once_with( + self.context, self._test_class.db_model) + + def test_get_objects_valid_fields(self): + with mock.patch.object( + db_api, 'get_objects', + return_value=[self.db_obj]) as get_objects_mock: + + objs = self._test_class.get_objects(self.context, + **self.valid_field_filter) + self._validate_objects([self.db_obj], objs) + + get_objects_mock.assert_called_with( + self.context, self._test_class.db_model, + **self.valid_field_filter) + + def test_get_objects_mixed_fields(self): + synthetic_fields = self._test_class.synthetic_fields + if not synthetic_fields: + self.skipTest('No synthetic fields found in test class %r' % + self._test_class) + + filters = copy.copy(self.valid_field_filter) + filters[synthetic_fields[0]] = 'xxx' + + with mock.patch.object(db_api, 'get_objects', + return_value=self.db_objs): + self.assertRaises(base.exceptions.InvalidInput, + self._test_class.get_objects, self.context, + **filters) + + def test_get_objects_synthetic_fields(self): + synthetic_fields = self._test_class.synthetic_fields + if not synthetic_fields: + self.skipTest('No synthetic fields found in test class %r' % + self._test_class) + + with mock.patch.object(db_api, 'get_objects', + return_value=self.db_objs): + self.assertRaises(base.exceptions.InvalidInput, + self._test_class.get_objects, self.context, + **{synthetic_fields[0]: 'xxx'}) + + def test_get_objects_invalid_fields(self): + with mock.patch.object(db_api, 'get_objects', + return_value=self.db_objs): + self.assertRaises(base.exceptions.InvalidInput, + self._test_class.get_objects, self.context, + fake_field='xxx') + + def _validate_objects(self, expected, observed): + self.assertTrue(all(self._is_test_class(obj) for obj in observed)) + self.assertEqual( + sorted(expected), + sorted(get_obj_db_fields(obj) for obj in observed)) + + def _check_equal(self, obj, db_obj): + self.assertEqual( + sorted(db_obj), + sorted(get_obj_db_fields(obj))) + + def test_create(self): + with mock.patch.object(db_api, 'create_object', + return_value=self.db_obj) as create_mock: + obj = self._test_class(self.context, **self.db_obj) + self._check_equal(obj, self.db_obj) + obj.create() + self._check_equal(obj, self.db_obj) + create_mock.assert_called_once_with( + self.context, self._test_class.db_model, self.db_obj) + + def test_create_updates_from_db_object(self): + with mock.patch.object(db_api, 'create_object', + return_value=self.db_obj): + obj = self._test_class(self.context, **self.db_objs[1]) + self._check_equal(obj, self.db_objs[1]) + obj.create() + self._check_equal(obj, self.db_obj) + + def test_create_duplicates(self): + with mock.patch.object(db_api, 'create_object', + side_effect=obj_exc.DBDuplicateEntry): + obj = self._test_class(self.context, **self.db_obj) + self.assertRaises(base.NeutronDbObjectDuplicateEntry, obj.create) + + @mock.patch.object(db_api, 'update_object') + def test_update_no_changes(self, update_mock): + with mock.patch.object(base.NeutronDbObject, + '_get_changed_persistent_fields', + return_value={}): + obj = self._test_class(self.context) + obj.update() + self.assertFalse(update_mock.called) + + @mock.patch.object(db_api, 'update_object') + def test_update_changes(self, update_mock): + fields_to_update = self.get_updatable_fields(self.db_obj) + with mock.patch.object(base.NeutronDbObject, + '_get_changed_persistent_fields', + return_value=fields_to_update): + obj = self._test_class(self.context, **self.db_obj) + obj.update() + update_mock.assert_called_once_with( + self.context, self._test_class.db_model, + self.db_obj['id'], fields_to_update) + + @mock.patch.object(base.NeutronDbObject, + '_get_changed_persistent_fields', + return_value={'a': 'a', 'b': 'b', 'c': 'c'}) + def test_update_changes_forbidden(self, *mocks): + with mock.patch.object( + self._test_class, + 'fields_no_update', + new_callable=mock.PropertyMock(return_value=['a', 'c']), + create=True): + obj = self._test_class(self.context, **self.db_obj) + self.assertRaises(base.NeutronObjectUpdateForbidden, obj.update) + + def test_update_updates_from_db_object(self): + with mock.patch.object(db_api, 'update_object', + return_value=self.db_obj): + obj = self._test_class(self.context, **self.db_objs[1]) + fields_to_update = self.get_updatable_fields(self.db_objs[1]) + with mock.patch.object(base.NeutronDbObject, + '_get_changed_persistent_fields', + return_value=fields_to_update): + obj.update() + self._check_equal(obj, self.db_obj) + + @mock.patch.object(db_api, 'delete_object') + def test_delete(self, delete_mock): + obj = self._test_class(self.context, **self.db_obj) + self._check_equal(obj, self.db_obj) + obj.delete() + self._check_equal(obj, self.db_obj) + delete_mock.assert_called_once_with( + self.context, self._test_class.db_model, self.db_obj['id']) + + @mock.patch(OBJECTS_BASE_OBJ_FROM_PRIMITIVE) + def test_clean_obj_from_primitive(self, get_prim_m): + expected_obj = get_prim_m.return_value + observed_obj = self._test_class.clean_obj_from_primitive('foo', 'bar') + self.assertIs(expected_obj, observed_obj) + self.assertTrue(observed_obj.obj_reset_changes.called) + + +class BaseDbObjectTestCase(_BaseObjectTestCase): + + def test_get_by_id_create_update_delete(self): + obj = self._test_class(self.context, **self.db_obj) + obj.create() + + new = self._test_class.get_by_id(self.context, id=obj.id) + self.assertEqual(obj, new) + + obj = new + + for key, val in self.get_updatable_fields(self.db_objs[1]).items(): + setattr(obj, key, val) + obj.update() + + new = self._test_class.get_by_id(self.context, id=obj.id) + self.assertEqual(obj, new) + + obj = new + new.delete() + + new = self._test_class.get_by_id(self.context, id=obj.id) + self.assertIsNone(new) + + def test_update_non_existent_object_raises_not_found(self): + obj = self._test_class(self.context, **self.db_obj) + obj.obj_reset_changes() + + for key, val in self.get_updatable_fields(self.db_obj).items(): + setattr(obj, key, val) + + self.assertRaises(n_exc.ObjectNotFound, obj.update) + + def test_delete_non_existent_object_raises_not_found(self): + obj = self._test_class(self.context, **self.db_obj) + self.assertRaises(n_exc.ObjectNotFound, obj.delete) + + @mock.patch(SQLALCHEMY_COMMIT) + def test_create_single_transaction(self, mock_commit): + obj = self._test_class(self.context, **self.db_obj) + obj.create() + self.assertEqual(1, mock_commit.call_count) + + def test_update_single_transaction(self): + obj = self._test_class(self.context, **self.db_obj) + obj.create() + + for key, val in self.get_updatable_fields(self.db_obj).items(): + setattr(obj, key, val) + + with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: + obj.update() + self.assertEqual(1, mock_commit.call_count) + + def test_delete_single_transaction(self): + obj = self._test_class(self.context, **self.db_obj) + obj.create() + + with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: + obj.delete() + self.assertEqual(1, mock_commit.call_count) + + @mock.patch(SQLALCHEMY_COMMIT) + def test_get_objects_single_transaction(self, mock_commit): + self._test_class.get_objects(self.context) + self.assertEqual(1, mock_commit.call_count) + + @mock.patch(SQLALCHEMY_COMMIT) + def test_get_by_id_single_transaction(self, mock_commit): + obj = self._test_class(self.context, **self.db_obj) + obj.create() + + obj = self._test_class.get_by_id(self.context, obj.id) + self.assertEqual(2, mock_commit.call_count) diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py b/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py deleted file mode 100644 index 08d689e127d..00000000000 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from neutron.agent.linux import ip_lib -from neutron.plugins.ibm.agent import sdnve_neutron_agent -from neutron.tests import base - - -NOTIFIER = ('neutron.plugins.ibm.' - 'sdnve_neutron_plugin.AgentNotifierApi') - - -class CreateAgentConfigMap(base.BaseTestCase): - - def test_create_agent_config_map_succeeds(self): - self.assertTrue(sdnve_neutron_agent.create_agent_config_map(cfg.CONF)) - - def test_create_agent_config_using_controller_ips(self): - cfg.CONF.set_override('controller_ips', - ['10.10.10.1', '10.10.10.2'], group='SDNVE') - cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) - self.assertEqual(cfgmap['controller_ip'], '10.10.10.1') - - def test_create_agent_config_using_interface_mappings(self): - cfg.CONF.set_override('interface_mappings', - ['interface1 : eth1', 'interface2 : eth2'], - group='SDNVE') - cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) - self.assertEqual(cfgmap['interface_mappings'], - {'interface1': 'eth1', 'interface2': 'eth2'}) - - -class TestSdnveNeutronAgent(base.BaseTestCase): - - def setUp(self): - super(TestSdnveNeutronAgent, self).setUp() - notifier_p = mock.patch(NOTIFIER) - notifier_cls = notifier_p.start() - self.notifier = mock.Mock() - notifier_cls.return_value = self.notifier - cfg.CONF.set_override('integration_bridge', - 'br_int', group='SDNVE') - kwargs = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) - - class MockFixedIntervalLoopingCall(object): - def __init__(self, f): - self.f = f - - def start(self, interval=0): - self.f() - - with mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.' - 'SdnveNeutronAgent.setup_integration_br', - return_value=mock.Mock()),\ - mock.patch('oslo_service.loopingcall.' - 'FixedIntervalLoopingCall', - new=MockFixedIntervalLoopingCall): - self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs) - - def test_setup_physical_interfaces(self): - with mock.patch.object(self.agent.int_br, - 'add_port') as add_port_func: - with mock.patch.object(ip_lib, - 'device_exists', - return_valxue=True): - self.agent.setup_physical_interfaces({"interface1": "eth1"}) - add_port_func.assert_called_once_with('eth1') - - def test_setup_physical_interfaces_none(self): - with mock.patch.object(self.agent.int_br, - 'add_port') as add_port_func: - with mock.patch.object(ip_lib, - 'device_exists', - return_valxue=True): - self.agent.setup_physical_interfaces({}) - self.assertFalse(add_port_func.called) - - def test_get_info_set_controller(self): - with mock.patch.object(self.agent.int_br, - 'set_controller') as set_controller_func: - kwargs = {} - kwargs['info'] = {'new_controller': '10.10.10.1'} - self.agent.info_update('dummy', **kwargs) - set_controller_func.assert_called_once_with(['tcp:10.10.10.1']) - - def test_get_info(self): - with mock.patch.object(self.agent.int_br, - 'set_controller') as set_controller_func: - kwargs = {} - self.agent.info_update('dummy', **kwargs) - self.assertFalse(set_controller_func.called) diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_api.py b/neutron/tests/unit/plugins/ibm/test_sdnve_api.py deleted file mode 100644 index 22dfcb340c3..00000000000 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_api.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -from oslo_utils import uuidutils - -from neutron.plugins.ibm.common import constants -from neutron.plugins.ibm import sdnve_api -from neutron.tests import base - -RESOURCE_PATH = { - 'network': "ln/networks/", -} -RESOURCE = 'network' -HTTP_OK = 200 -TENANT_ID = uuidutils.generate_uuid() - - -class TestSdnveApi(base.BaseTestCase): - - def setUp(self): - super(TestSdnveApi, self).setUp() - - class MockKeystoneClient(object): - def __init__(self, **kwargs): - pass - - def get_tenant_name(self, id): - return 'test tenant name' - - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'KeystoneClient', - new=MockKeystoneClient): - self.api = sdnve_api.Client() - - def mock_do_request(self, method, url, body=None, headers=None, - params=None, connection_type=None): - return (HTTP_OK, url) - - def mock_do_request_tenant(self, method, url, body=None, headers=None, - params=None, connection_type=None): - return (HTTP_OK, {'id': TENANT_ID, - 'network_type': constants.TENANT_TYPE_OF}) - - def mock_do_request_no_tenant(self, method, url, body=None, headers=None, - params=None, connection_type=None): - return (None, None) - - def mock_process_request(self, body): - return body - - def test_sdnve_api_list(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request): - result = self.api.sdnve_list(RESOURCE) - self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE])) - - def test_sdnve_api_show(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request): - result = self.api.sdnve_show(RESOURCE, TENANT_ID) - self.assertEqual(result, - (HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID)) - - def test_sdnve_api_create(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.process_request', - new=self.mock_process_request): - result = self.api.sdnve_create(RESOURCE, '') - self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE])) - - def test_sdnve_api_update(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.process_request', - new=self.mock_process_request): - result = self.api.sdnve_update(RESOURCE, TENANT_ID, '') - self.assertEqual(result, - (HTTP_OK, - RESOURCE_PATH[RESOURCE] + TENANT_ID)) - - def test_sdnve_api_delete(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request): - result = self.api.sdnve_delete(RESOURCE, TENANT_ID) - self.assertEqual(result, - (HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID)) - - def test_sdnve_get_tenant_by_id(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request_tenant): - id = TENANT_ID - result = self.api.sdnve_get_tenant_byid(id) - self.assertEqual(result, - (TENANT_ID, constants.TENANT_TYPE_OF)) - - def test_sdnve_check_and_create_tenant(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request_tenant): - id = TENANT_ID - result = self.api.sdnve_check_and_create_tenant(id) - self.assertEqual(result, TENANT_ID) - - def test_sdnve_check_and_create_tenant_fail(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client.do_request', - new=self.mock_do_request_no_tenant): - id = TENANT_ID - result = self.api.sdnve_check_and_create_tenant( - id, constants.TENANT_TYPE_OF) - self.assertIsNone(result) - - def test_process_request(self): - my_request = {'key_1': 'value_1', 'router:external': 'True', - 'key_2': 'value_2'} - expected = {'key_1': 'value_1', 'router_external': 'True', - 'key_2': 'value_2'} - result = self.api.process_request(my_request) - self.assertEqual(expected, result) diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py b/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py deleted file mode 100644 index ff79eafffbe..00000000000 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from neutron.extensions import portbindings -from neutron.tests.unit import _test_extension_portbindings as test_bindings -from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin -from neutron.tests.unit.extensions import test_l3 as test_l3 - -from neutron.plugins.ibm.common import constants - - -_plugin_name = ('neutron.plugins.ibm.' - 'sdnve_neutron_plugin.SdnvePluginV2') -HTTP_OK = 200 - - -class MockClient(object): - def sdnve_list(self, resource, **params): - return (HTTP_OK, 'body') - - def sdnve_show(self, resource, specific, **params): - return (HTTP_OK, 'body') - - def sdnve_create(self, resource, body): - return (HTTP_OK, 'body') - - def sdnve_update(self, resource, specific, body=None): - return (HTTP_OK, 'body') - - def sdnve_delete(self, resource, specific): - return (HTTP_OK, 'body') - - def sdnve_get_tenant_byid(self, os_tenant_id): - return (os_tenant_id, constants.TENANT_TYPE_OF) - - def sdnve_check_and_create_tenant( - self, os_tenant_id, network_type=None): - return os_tenant_id - - def sdnve_get_controller(self): - return - - -class MockKeystoneClient(object): - def __init__(self, **kwargs): - pass - - def get_tenant_type(self, id): - return constants.TENANT_TYPE_OF - - def get_tenant_name(self, id): - return "tenant name" - - -class IBMPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): - def setUp(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient', - new=MockKeystoneClient),\ - mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client', - new=MockClient): - super(IBMPluginV2TestCase, self).setUp(plugin=_plugin_name) - - -class TestIBMBasicGet(test_plugin.TestBasicGet, - IBMPluginV2TestCase): - pass - - -class TestIBMV2HTTPResponse(test_plugin.TestV2HTTPResponse, - IBMPluginV2TestCase): - pass - - -class TestIBMNetworksV2(test_plugin.TestNetworksV2, - IBMPluginV2TestCase): - pass - - -class TestIBMPortsV2(test_plugin.TestPortsV2, - IBMPluginV2TestCase): - pass - - -class TestIBMSubnetsV2(test_plugin.TestSubnetsV2, - IBMPluginV2TestCase): - pass - - -class TestIBMPortBinding(IBMPluginV2TestCase, - test_bindings.PortBindingsTestCase): - VIF_TYPE = portbindings.VIF_TYPE_OVS - - -class IBMPluginRouterTestCase(test_l3.L3NatDBIntTestCase): - - def setUp(self): - with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient', - new=MockKeystoneClient),\ - mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client', - new=MockClient): - super(IBMPluginRouterTestCase, self).setUp(plugin=_plugin_name) - - def test_floating_port_status_not_applicable(self): - self.skipTest('Plugin changes floating port status') diff --git a/neutron/tests/unit/plugins/ml2/base.py b/neutron/tests/unit/plugins/ml2/base.py new file mode 100644 index 00000000000..6c193a4a095 --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/base.py @@ -0,0 +1,39 @@ +# Copyright (c) 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron import manager +from neutron.plugins.common import constants as plugin_constants +from neutron.tests.unit.plugins.ml2 import test_plugin + + +class ML2TestFramework(test_plugin.Ml2PluginV2TestCase): + l3_plugin = ('neutron.services.l3_router.l3_router_plugin.' + 'L3RouterPlugin') + _mechanism_drivers = ['openvswitch'] + + def setUp(self): + super(ML2TestFramework, self).setUp() + self.core_plugin = manager.NeutronManager.get_instance().get_plugin() + self.l3_plugin = manager.NeutronManager.get_service_plugins().get( + plugin_constants.L3_ROUTER_NAT) + + def _create_router(self, distributed=False, ha=False): + return self.l3_plugin.create_router( + self.context, + {'router': + {'name': 'router', + 'admin_state_up': True, + 'tenant_id': self._tenant_id, + 'ha': ha, + 'distributed': distributed}}) diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py index 647ca2b99e2..c9f170f058e 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py @@ -785,7 +785,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() with mock.patch.object(l2pop_mech, - '_update_port_down', + '_get_agent_fdb', return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, 'remove_fdb_entries'): @@ -892,7 +892,7 @@ class TestL2PopulationMechDriver(base.BaseTestCase): [constants.FLOODING_ENTRY]}} self.assertEqual(expected_result, result) - def test_update_port_postcommit_mac_address_changed_raises(self): + def test_update_port_precommit_mac_address_changed_raises(self): port = {'status': u'ACTIVE', 'device_owner': u'compute:None', 'mac_address': u'12:34:56:78:4b:0e', @@ -912,4 +912,4 @@ class TestL2PopulationMechDriver(base.BaseTestCase): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() with testtools.ExpectedException(ml2_exc.MechanismDriverError): - mech_driver.update_port_postcommit(ctx) + mech_driver.update_port_precommit(ctx) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py new file mode 100755 index 00000000000..7ccb74507c3 --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py @@ -0,0 +1,92 @@ +# Copyright 2015 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo_utils import uuidutils + +from neutron import context +from neutron.objects.qos import policy +from neutron.objects.qos import rule +from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions +from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import ( + qos_driver) +from neutron.tests import base + + +class QosSRIOVAgentDriverTestCase(base.BaseTestCase): + + ASSIGNED_MAC = '00:00:00:00:00:66' + PCI_SLOT = '0000:06:00.1' + + def setUp(self): + super(QosSRIOVAgentDriverTestCase, self).setUp() + self.context = context.get_admin_context() + self.qos_driver = qos_driver.QosSRIOVAgentDriver() + self.qos_driver.initialize() + self.qos_driver.eswitch_mgr = mock.Mock() + self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock() + self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate + self.rule = self._create_bw_limit_rule_obj() + self.qos_policy = self._create_qos_policy_obj([self.rule]) + self.port = self._create_fake_port() + + def _create_bw_limit_rule_obj(self): + rule_obj = rule.QosBandwidthLimitRule() + rule_obj.id = uuidutils.generate_uuid() + rule_obj.max_kbps = 2 + rule_obj.max_burst_kbps = 200 + rule_obj.obj_reset_changes() + return rule_obj + + def _create_qos_policy_obj(self, rules): + policy_dict = {'id': uuidutils.generate_uuid(), + 'tenant_id': uuidutils.generate_uuid(), + 'name': 'test', + 'description': 'test', + 'shared': False, + 'rules': rules} + policy_obj = policy.QosPolicy(self.context, **policy_dict) + policy_obj.obj_reset_changes() + return policy_obj + + def _create_fake_port(self): + return {'port_id': uuidutils.generate_uuid(), + 'profile': {'pci_slot': self.PCI_SLOT}, + 'device': self.ASSIGNED_MAC} + + def test_create_rule(self): + self.qos_driver.create(self.port, self.qos_policy) + self.max_rate_mock.assert_called_once_with( + self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) + + def test_update_rule(self): + self.qos_driver.update(self.port, self.qos_policy) + self.max_rate_mock.assert_called_once_with( + self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) + + def test_delete_rules(self): + self.qos_driver.delete(self.port, self.qos_policy) + self.max_rate_mock.assert_called_once_with( + self.ASSIGNED_MAC, self.PCI_SLOT, 0) + + def test__set_vf_max_rate_captures_sriov_failure(self): + self.max_rate_mock.side_effect = exceptions.SriovNicError() + self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) + + def test__set_vf_max_rate_unknown_device(self): + with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists', + return_value=False): + self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) + self.assertFalse(self.max_rate_mock.called) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py index a9a5b3a67a9..b3a7d958a87 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py @@ -42,7 +42,8 @@ class TestCreateESwitchManager(base.BaseTestCase): return_value=True): with testtools.ExpectedException(exc.InvalidDeviceError): - esm.ESwitchManager(device_mappings, None) + esm.ESwitchManager().discover_devices( + device_mappings, None) def test_create_eswitch_mgr_ok(self): device_mappings = {'physnet1': 'p6p1'} @@ -53,7 +54,7 @@ class TestCreateESwitchManager(base.BaseTestCase): "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): - esm.ESwitchManager(device_mappings, None) + esm.ESwitchManager().discover_devices(device_mappings, None) class TestESwitchManagerApi(base.BaseTestCase): @@ -75,7 +76,8 @@ class TestESwitchManagerApi(base.BaseTestCase): mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): - self.eswitch_mgr = esm.ESwitchManager(device_mappings, None) + self.eswitch_mgr = esm.ESwitchManager() + self.eswitch_mgr.discover_devices(device_mappings, None) def test_get_assigned_devices(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." @@ -132,6 +134,19 @@ class TestESwitchManagerApi(base.BaseTestCase): self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC, self.PCI_SLOT, True) + def test_set_device_max_rate(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC) as get_pci_mock,\ + mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.EmbSwitch.set_device_max_rate")\ + as set_device_max_rate_mock: + self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC, + self.PCI_SLOT, 1000) + get_pci_mock.assert_called_once_with(self.PCI_SLOT) + set_device_max_rate_mock.assert_called_once_with( + self.PCI_SLOT, 1000) + def test_set_device_status_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", @@ -179,6 +194,26 @@ class TestESwitchManagerApi(base.BaseTestCase): 'device_mac': self.WRONG_MAC}) self.assertFalse(result) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[ASSIGNED_MAC]) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.PciOsWrapper.is_assigned_vf", + return_value=True) + def test_get_pci_slot_by_existing_mac(self, *args): + pci_slot = self.eswitch_mgr.get_pci_slot_by_mac(self.ASSIGNED_MAC) + self.assertIsNotNone(pci_slot) + + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[ASSIGNED_MAC]) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.PciOsWrapper.is_assigned_vf", + return_value=True) + def test_get_pci_slot_by_not_existing_mac(self, *args): + pci_slot = self.eswitch_mgr.get_pci_slot_by_mac(self.WRONG_MAC) + self.assertIsNone(pci_slot) + class TestEmbSwitch(base.BaseTestCase): DEV_NAME = "eth2" @@ -260,6 +295,49 @@ class TestEmbSwitch(base.BaseTestCase): self.emb_switch.set_device_spoofcheck, self.WRONG_PCI_SLOT, True) + def test_set_device_max_rate_ok(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2000) + pci_lib_mock.assert_called_with(0, 2) + + def test_set_device_max_rate_ok2(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 99) + pci_lib_mock.assert_called_with(0, 1) + + def test_set_device_max_rate_rounded_ok(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2001) + pci_lib_mock.assert_called_with(0, 2) + + def test_set_device_max_rate_rounded_ok2(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2499) + pci_lib_mock.assert_called_with(0, 2) + + def test_set_device_max_rate_rounded_ok3(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2500) + pci_lib_mock.assert_called_with(0, 3) + + def test_set_device_max_rate_disable(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: + self.emb_switch.set_device_max_rate(self.PCI_SLOT, 0) + pci_lib_mock.assert_called_with(0, 0) + + def test_set_device_max_rate_fail(self): + with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.set_vf_max_rate"): + self.assertRaises(exc.InvalidPciSlotError, + self.emb_switch.set_device_max_rate, + self.WRONG_PCI_SLOT, 1000) + def test_get_pci_device(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py index 67ef92de2c4..5512ea95f6b 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py @@ -114,3 +114,20 @@ class TestPciLib(base.BaseTestCase): self.pci_wrapper.set_vf_spoofcheck, self.VF_INDEX, True) + + def test_set_vf_max_rate(self): + with mock.patch.object(self.pci_wrapper, "_as_root") \ + as mock_as_root: + result = self.pci_wrapper.set_vf_max_rate(self.VF_INDEX, 1000) + self.assertIsNone(result) + mock_as_root.assert_called_once_with([], "link", + ("set", self.DEV_NAME, "vf", str(self.VF_INDEX), "rate", '1000')) + + def test_set_vf_max_rate_fail(self): + with mock.patch.object(self.pci_wrapper, + "_execute") as mock_exec: + mock_exec.side_effect = Exception() + self.assertRaises(exc.IpCommandError, + self.pci_wrapper.set_vf_max_rate, + self.VF_INDEX, + 1000) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py index ccbb04435ae..8ebc73ce5fb 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py @@ -49,7 +49,13 @@ class TestSriovAgent(base.BaseTestCase): self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) - def test_treat_devices_removed_with_existed_device(self): + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[DEVICE_MAC]) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.PciOsWrapper.is_assigned_vf", + return_value=True) + def test_treat_devices_removed_with_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [DEVICE_MAC] with mock.patch.object(agent.plugin_rpc, @@ -63,7 +69,13 @@ class TestSriovAgent(base.BaseTestCase): self.assertFalse(resync) self.assertTrue(fn_udd.called) - def test_treat_devices_removed_with_not_existed_device(self): + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[DEVICE_MAC]) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.PciOsWrapper.is_assigned_vf", + return_value=True) + def test_treat_devices_removed_with_not_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [DEVICE_MAC] with mock.patch.object(agent.plugin_rpc, @@ -77,7 +89,13 @@ class TestSriovAgent(base.BaseTestCase): self.assertFalse(resync) self.assertTrue(fn_udd.called) - def test_treat_devices_removed_failed(self): + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[DEVICE_MAC]) + @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." + "eswitch_manager.PciOsWrapper.is_assigned_vf", + return_value=True) + def test_treat_devices_removed_failed(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [DEVICE_MAC] with mock.patch.object(agent.plugin_rpc, diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py index 8b28eb087ed..15033b56e90 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py @@ -56,7 +56,7 @@ class TestFakePortContext(base.FakePortContext): class SriovNicSwitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): - VIF_TYPE = portbindings.VIF_TYPE_HW_VEB + VIF_TYPE = mech_driver.VIF_TYPE_HW_VEB CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_NIC_SWITCH VLAN_SEGMENTS = base.AgentMechanismVlanTestCase.VLAN_SEGMENTS @@ -143,11 +143,11 @@ class SriovSwitchMechVnicTypeTestCase(SriovNicSwitchMechanismBaseTestCase): def test_vnic_type_direct(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT, - portbindings.VIF_TYPE_HW_VEB) + mech_driver.VIF_TYPE_HW_VEB) def test_vnic_type_macvtap(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP, - portbindings.VIF_TYPE_HW_VEB) + mech_driver.VIF_TYPE_HW_VEB) class SriovSwitchMechProfileTestCase(SriovNicSwitchMechanismBaseTestCase): @@ -162,7 +162,7 @@ class SriovSwitchMechProfileTestCase(SriovNicSwitchMechanismBaseTestCase): def test_profile_supported_pci_info(self): self._check_vif_for_pci_info(MELLANOX_CONNECTX3_PCI_INFO, - portbindings.VIF_TYPE_HW_VEB) + mech_driver.VIF_TYPE_HW_VEB) def test_profile_unsupported_pci_info(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' diff --git a/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py b/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py index 1237b8444bb..7c18ff593ab 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py @@ -15,7 +15,6 @@ import sys import mock -from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_api as api from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base @@ -33,9 +32,9 @@ with mock.patch.dict(sys.modules, class MlnxMechanismBaseTestCase(base.AgentMechanismBaseTestCase): - VIF_TYPE = portbindings.VIF_TYPE_IB_HOSTDEV + VIF_TYPE = mech_mlnx.VIF_TYPE_IB_HOSTDEV CAP_PORT_FILTER = False - AGENT_TYPE = constants.AGENT_TYPE_MLNX + AGENT_TYPE = mech_mlnx.AGENT_TYPE_MLNX VNIC_TYPE = portbindings.VNIC_DIRECT GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} diff --git a/neutron/tests/unit/plugins/ml2/drivers/opendaylight/test_driver.py b/neutron/tests/unit/plugins/ml2/drivers/opendaylight/test_driver.py index 03b83546764..09f6d0ca530 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/opendaylight/test_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/opendaylight/test_driver.py @@ -90,3 +90,17 @@ class TestODLShim(test_plugin.Ml2PluginV2TestCase): self.driver.odl_drv.synchronize.assert_called_with('delete', const.ODL_PORTS, self.context) + + def test_bind_port_delegation(self): + # given front-end with attached back-end + front_end = self.driver + front_end.odl_drv = back_end = mock.MagicMock( + spec=driver.OpenDaylightMechanismDriver) + # given PortContext to be forwarded to back-end without using + context = object() + + # when binding port + front_end.bind_port(context) + + # then port is bound by back-end + back_end.bind_port.assert_called_once_with(context) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py new file mode 100644 index 00000000000..c9e276c72ab --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import uuidutils + +from neutron import context +from neutron.objects.qos import policy +from neutron.objects.qos import rule +from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers import ( + qos_driver) +from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import ( + ovs_test_base) + + +class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase): + + def setUp(self): + super(QosOVSAgentDriverTestCase, self).setUp() + self.context = context.get_admin_context() + self.qos_driver = qos_driver.QosOVSAgentDriver() + self.qos_driver.initialize() + self.qos_driver.br_int = mock.Mock() + self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( + return_value=(1000, 10)) + self.get = self.qos_driver.br_int.get_egress_bw_limit_for_port + self.qos_driver.br_int.del_egress_bw_limit_for_port = mock.Mock() + self.delete = self.qos_driver.br_int.delete_egress_bw_limit_for_port + self.qos_driver.br_int.create_egress_bw_limit_for_port = mock.Mock() + self.create = self.qos_driver.br_int.create_egress_bw_limit_for_port + self.rule = self._create_bw_limit_rule_obj() + self.qos_policy = self._create_qos_policy_obj([self.rule]) + self.port = self._create_fake_port() + + def _create_bw_limit_rule_obj(self): + rule_obj = rule.QosBandwidthLimitRule() + rule_obj.id = uuidutils.generate_uuid() + rule_obj.max_kbps = 2 + rule_obj.max_burst_kbps = 200 + rule_obj.obj_reset_changes() + return rule_obj + + def _create_qos_policy_obj(self, rules): + policy_dict = {'id': uuidutils.generate_uuid(), + 'tenant_id': uuidutils.generate_uuid(), + 'name': 'test', + 'description': 'test', + 'shared': False, + 'rules': rules} + policy_obj = policy.QosPolicy(self.context, **policy_dict) + policy_obj.obj_reset_changes() + return policy_obj + + def _create_fake_port(self): + self.port_name = 'fakeport' + + class FakeVifPort(object): + port_name = self.port_name + + return {'vif_port': FakeVifPort()} + + def test_create_new_rule(self): + self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( + return_value=(None, None)) + self.qos_driver.create(self.port, self.qos_policy) + # Assert create is the last call + self.assertEqual( + 'create_egress_bw_limit_for_port', + self.qos_driver.br_int.method_calls[-1][0]) + self.assertEqual(0, self.delete.call_count) + self.create.assert_called_once_with( + self.port_name, self.rule.max_kbps, + self.rule.max_burst_kbps) + + def test_create_existing_rules(self): + self.qos_driver.create(self.port, self.qos_policy) + self._assert_rule_create_updated() + + def test_update_rules(self): + self.qos_driver.update(self.port, self.qos_policy) + self._assert_rule_create_updated() + + def test_delete_rules(self): + self.qos_driver.delete(self.port, self.qos_policy) + self.delete.assert_called_once_with(self.port_name) + + def _assert_rule_create_updated(self): + # Assert create is the last call + self.assertEqual( + 'create_egress_bw_limit_for_port', + self.qos_driver.br_int.method_calls[-1][0]) + + self.create.assert_called_once_with( + self.port_name, self.rule.max_kbps, + self.rule.max_burst_kbps) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py index ad9de289fc3..b3961030037 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py @@ -80,6 +80,17 @@ class OVSBridgeTestBase(ovs_test_base.OVSOFCtlTestBase): ] self.assertEqual(expected, self.mock.mock_calls) + def test_dump_flows_for_table(self): + table = 23 + with mock.patch.object(self.br, 'run_ofctl') as run_ofctl: + self.br.dump_flows(table) + run_ofctl.assert_has_calls([mock.call("dump-flows", mock.ANY)]) + + def test_dump_all_flows(self): + with mock.patch.object(self.br, 'run_ofctl') as run_ofctl: + self.br.dump_flows_all_tables() + run_ofctl.assert_has_calls([mock.call("dump-flows", [])]) + class OVSDVRProcessTestMixin(object): def test_install_dvr_process_ipv4(self): diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py index 005112762f1..9bb3c8f2346 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py @@ -31,7 +31,6 @@ class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase): def test_setup_default_table(self): self.br.setup_default_table() expected = [ - call.delete_flows(), call.add_flow(priority=0, table=0, actions='normal'), call.add_flow(priority=0, table=23, actions='drop'), call.add_flow(priority=0, table=24, actions='drop'), diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py index 76769a34fde..9f730246e3c 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py @@ -37,65 +37,71 @@ class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, def test_setup_default_table(self): patch_int_ofport = 5555 - arp_responder_enabled = False + mock_do_action_flows = mock.patch.object(self.br, + 'do_action_flows').start() + self.mock.attach_mock(mock_do_action_flows, 'do_action_flows') self.br.setup_default_table(patch_int_ofport=patch_int_ofport, - arp_responder_enabled=arp_responder_enabled) - expected = [ - call.add_flow(priority=1, in_port=patch_int_ofport, - actions='resubmit(,2)'), - call.add_flow(priority=0, actions='drop'), - call.add_flow(priority=0, table=2, - dl_dst='00:00:00:00:00:00/01:00:00:00:00:00', - actions='resubmit(,20)'), - call.add_flow(priority=0, table=2, - dl_dst='01:00:00:00:00:00/01:00:00:00:00:00', - actions='resubmit(,22)'), - call.add_flow(priority=0, table=3, actions='drop'), - call.add_flow(priority=0, table=4, actions='drop'), - call.add_flow(priority=1, table=10, - actions='learn(table=20,priority=1,' - 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' - 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' - 'load:0->NXM_OF_VLAN_TCI[],' - 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' - 'output:NXM_OF_IN_PORT[]),' - 'output:%s' % patch_int_ofport), - call.add_flow(priority=0, table=20, actions='resubmit(,22)'), - call.add_flow(priority=0, table=22, actions='drop'), - ] + arp_responder_enabled=False) + flow_args = [{'priority': 1, 'in_port': patch_int_ofport, + 'actions': 'resubmit(,2)'}, + {'priority': 0, 'actions': 'drop'}, + {'priority': 0, 'table': 2, + 'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00', + 'actions': 'resubmit(,20)'}, + {'priority': 0, 'table': 2, + 'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00', + 'actions': 'resubmit(,22)'}, + {'priority': 0, 'table': 3, 'actions': 'drop'}, + {'priority': 0, 'table': 4, 'actions': 'drop'}, + {'priority': 0, 'table': 6, 'actions': 'drop'}, + {'priority': 1, 'table': 10, + 'actions': 'learn(cookie=0x0,table=20,priority=1,' + 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' + 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' + 'load:0->NXM_OF_VLAN_TCI[],' + 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' + 'output:NXM_OF_IN_PORT[]),' + 'output:%s' % patch_int_ofport}, + {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'} + ] + expected = [call.do_action_flows('add', flow_args), + call.add_flow(priority=0, table=22, actions='drop')] self.assertEqual(expected, self.mock.mock_calls) def test_setup_default_table_arp_responder_enabled(self): patch_int_ofport = 5555 - arp_responder_enabled = True + mock_do_action_flows = mock.patch.object(self.br, + 'do_action_flows').start() + self.mock.attach_mock(mock_do_action_flows, 'do_action_flows') self.br.setup_default_table(patch_int_ofport=patch_int_ofport, - arp_responder_enabled=arp_responder_enabled) - expected = [ - call.add_flow(priority=1, in_port=patch_int_ofport, - actions='resubmit(,2)'), - call.add_flow(priority=0, actions='drop'), - call.add_flow(priority=1, table=2, dl_dst='ff:ff:ff:ff:ff:ff', - actions='resubmit(,21)', proto='arp'), - call.add_flow(priority=0, table=2, - dl_dst='00:00:00:00:00:00/01:00:00:00:00:00', - actions='resubmit(,20)'), - call.add_flow(priority=0, table=2, - dl_dst='01:00:00:00:00:00/01:00:00:00:00:00', - actions='resubmit(,22)'), - call.add_flow(priority=0, table=3, actions='drop'), - call.add_flow(priority=0, table=4, actions='drop'), - call.add_flow(priority=1, table=10, - actions='learn(table=20,priority=1,' - 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' - 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' - 'load:0->NXM_OF_VLAN_TCI[],' - 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' - 'output:NXM_OF_IN_PORT[]),' - 'output:%s' % patch_int_ofport), - call.add_flow(priority=0, table=20, actions='resubmit(,22)'), - call.add_flow(priority=0, table=21, actions='resubmit(,22)'), - call.add_flow(priority=0, table=22, actions='drop'), - ] + arp_responder_enabled=True) + flow_args = [{'priority': 1, 'in_port': patch_int_ofport, + 'actions': 'resubmit(,2)'}, + {'priority': 0, 'actions': 'drop'}, + {'priority': 1, 'table': 2, 'dl_dst': 'ff:ff:ff:ff:ff:ff', + 'actions': 'resubmit(,21)', 'proto': 'arp'}, + {'priority': 0, 'table': 2, + 'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00', + 'actions': 'resubmit(,20)'}, + {'priority': 0, 'table': 2, + 'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00', + 'actions': 'resubmit(,22)'}, + {'priority': 0, 'table': 3, 'actions': 'drop'}, + {'priority': 0, 'table': 4, 'actions': 'drop'}, + {'priority': 0, 'table': 6, 'actions': 'drop'}, + {'priority': 1, 'table': 10, + 'actions': 'learn(cookie=0x0,table=20,priority=1,' + 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' + 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' + 'load:0->NXM_OF_VLAN_TCI[],' + 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' + 'output:NXM_OF_IN_PORT[]),' + 'output:%s' % patch_int_ofport}, + {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'}, + {'priority': 0, 'table': 21, 'actions': 'resubmit(,22)'} + ] + expected = [call.do_action_flows('add', flow_args), + call.add_flow(priority=0, table=22, actions='drop')] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index 527f8ab39d9..72b5e299261 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -31,6 +31,7 @@ from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ as ovs_agent +from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base @@ -186,6 +187,37 @@ class TestOvsNeutronAgent(object): else: self.assertFalse(provision_local_vlan.called) + def test_datapath_type_system(self): + # verify kernel datapath is default + expected = constants.OVS_DATAPATH_SYSTEM + self.assertEqual(expected, self.agent.int_br.datapath_type) + + def test_datapath_type_netdev(self): + + with mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'), \ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_ancillary_bridges', + return_value=[]), \ + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'), \ + mock.patch( + 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \ + mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall), \ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', + return_value=[]): + # validate setting non default datapath + expected = constants.OVS_DATAPATH_NETDEV + cfg.CONF.set_override('datapath_type', + expected, + group='OVS') + kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) + self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), + **kwargs) + self.assertEqual(expected, self.agent.int_br.datapath_type) + def test_restore_local_vlan_map_with_device_has_tag(self): self._test_restore_local_vlan_maps(2) @@ -404,8 +436,11 @@ class TestOvsNeutronAgent(object): 'devices_down': details, 'failed_devices_up': [], 'failed_devices_down': []}),\ + mock.patch.object(self.agent.int_br, + 'get_port_tag_dict', + return_value={}),\ mock.patch.object(self.agent, func_name) as func: - skip_devs, need_bound_devices = ( + skip_devs, need_bound_devices, insecure_ports = ( self.agent.treat_devices_added_or_updated([{}], False)) # The function should not raise self.assertFalse(skip_devs) @@ -438,6 +473,29 @@ class TestOvsNeutronAgent(object): self.assertTrue(self._mock_treat_devices_added_updated( details, mock.Mock(), 'treat_vif_port')) + def test_treat_devices_added_updated_sends_vif_port_into_extension_manager( + self, *args): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: True + port = mock.MagicMock() + + def fake_handle_port(context, port): + self.assertIn('vif_port', port) + + with mock.patch.object(self.agent.plugin_rpc, + 'get_devices_details_list_and_failed_devices', + return_value={'devices': [details], + 'failed_devices': None}),\ + mock.patch.object(self.agent.ext_manager, + 'handle_port', new=fake_handle_port),\ + mock.patch.object(self.agent.int_br, + 'get_vifs_by_ids', + return_value={details['device']: port}),\ + mock.patch.object(self.agent, 'treat_vif_port', + return_value=False): + + self.agent.treat_devices_added_or_updated([{}], False) + def test_treat_devices_added_updated_skips_if_port_not_found(self): dev_mock = mock.MagicMock() dev_mock.__getitem__.return_value = 'the_skipped_one' @@ -445,6 +503,9 @@ class TestOvsNeutronAgent(object): 'get_devices_details_list_and_failed_devices', return_value={'devices': [dev_mock], 'failed_devices': None}),\ + mock.patch.object(self.agent.int_br, + 'get_port_tag_dict', + return_value={}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={}),\ @@ -453,7 +514,7 @@ class TestOvsNeutronAgent(object): skip_devs = self.agent.treat_devices_added_or_updated([{}], False) # The function should return False for resync and no device # processed - self.assertEqual((['the_skipped_one'], []), skip_devs) + self.assertEqual((['the_skipped_one'], [], []), skip_devs) self.assertFalse(treat_vif_port.called) def test_treat_devices_added_updated_put_port_down(self): @@ -466,7 +527,8 @@ class TestOvsNeutronAgent(object): 'network_type': 'baz', 'fixed_ips': [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}], - 'device_owner': 'compute:None' + 'device_owner': 'compute:None', + 'port_security_enabled': True } with mock.patch.object(self.agent.plugin_rpc, @@ -476,9 +538,11 @@ class TestOvsNeutronAgent(object): mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={'xxx': mock.MagicMock()}),\ + mock.patch.object(self.agent.int_br, 'get_port_tag_dict', + return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: - skip_devs, need_bound_devices = ( + skip_devs, need_bound_devices, insecure_ports = ( self.agent.treat_devices_added_or_updated([{}], False)) # The function should return False for resync self.assertFalse(skip_devs) @@ -514,7 +578,7 @@ class TestOvsNeutronAgent(object): mock.patch.object( self.agent, "treat_devices_added_or_updated", - return_value=([], [])) as device_added_updated,\ + return_value=([], [], [])) as device_added_updated,\ mock.patch.object(self.agent.int_br, "get_ports_attributes", return_value=[]),\ mock.patch.object(self.agent, @@ -549,6 +613,24 @@ class TestOvsNeutronAgent(object): def test_process_network_port_with_empty_port(self): self._test_process_network_ports({}) + def test_process_network_ports_with_insecure_ports(self): + port_info = {'current': set(['tap0', 'tap1']), + 'updated': set(['tap1']), + 'removed': set([]), + 'added': set(['eth1'])} + with mock.patch.object(self.agent.sg_agent, + "setup_port_filters") as setup_port_filters,\ + mock.patch.object( + self.agent, + "treat_devices_added_or_updated", + return_value=([], [], ['eth1'])) as device_added_updated: + self.assertFalse(self.agent.process_network_ports(port_info, + False)) + device_added_updated.assert_called_once_with( + set(['eth1', 'tap1']), False) + setup_port_filters.assert_called_once_with( + set(), port_info.get('updated', set())) + def test_report_state(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: @@ -631,8 +713,11 @@ class TestOvsNeutronAgent(object): mock.call.phys_br_cls('br-eth'), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), - mock.call.int_br.delete_port('int-br-eth'), - mock.call.phys_br.delete_port('phy-br-eth'), + mock.call.int_br.db_get_val('Interface', 'int-br-eth', + 'type'), + # Have to use __getattr__ here to avoid mock._Call.__eq__ + # method being called + mock.call.int_br.db_get_val().__getattr__('__eq__')('veth'), mock.call.int_br.add_patch_port('int-br-eth', constants.NONEXISTENT_PEER), mock.call.phys_br.add_patch_port('phy-br-eth', @@ -689,6 +774,46 @@ class TestOvsNeutronAgent(object): self.assertEqual(self.agent.phys_ofports["physnet1"], "phys_veth_ofport") + def test_setup_physical_bridges_change_from_veth_to_patch_conf(self): + with mock.patch.object(sys, "exit"),\ + mock.patch.object(utils, "execute"),\ + mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ + mock.patch.object(self.agent, 'int_br') as int_br,\ + mock.patch.object(self.agent.int_br, 'db_get_val', + return_value='veth'): + phys_br = phys_br_cls() + parent = mock.MagicMock() + parent.attach_mock(phys_br_cls, 'phys_br_cls') + parent.attach_mock(phys_br, 'phys_br') + parent.attach_mock(int_br, 'int_br') + phys_br.add_patch_port.return_value = "phy_ofport" + int_br.add_patch_port.return_value = "int_ofport" + self.agent.setup_physical_bridges({"physnet1": "br-eth"}) + expected_calls = [ + mock.call.phys_br_cls('br-eth'), + mock.call.phys_br.setup_controllers(mock.ANY), + mock.call.phys_br.setup_default_table(), + mock.call.int_br.delete_port('int-br-eth'), + mock.call.phys_br.delete_port('phy-br-eth'), + mock.call.int_br.add_patch_port('int-br-eth', + constants.NONEXISTENT_PEER), + mock.call.phys_br.add_patch_port('phy-br-eth', + constants.NONEXISTENT_PEER), + mock.call.int_br.drop_port(in_port='int_ofport'), + mock.call.phys_br.drop_port(in_port='phy_ofport'), + mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', + 'options:peer', + 'phy-br-eth'), + mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', + 'options:peer', + 'int-br-eth'), + ] + parent.assert_has_calls(expected_calls) + self.assertEqual(self.agent.int_ofports["physnet1"], + "int_ofport") + self.assertEqual(self.agent.phys_ofports["physnet1"], + "phy_ofport") + def test_get_peer_name(self): bridge1 = "A_REALLY_LONG_BRIDGE_NAME1" bridge2 = "A_REALLY_LONG_BRIDGE_NAME2" @@ -704,15 +829,49 @@ class TestOvsNeutronAgent(object): self.tun_br = mock.Mock() with mock.patch.object(self.agent.int_br, "add_patch_port", - return_value=1) as intbr_patch_fn,\ - mock.patch.object(self.agent, - 'tun_br', - autospec=True) as tun_br,\ + return_value=1) as int_patch_port,\ + mock.patch.object(self.agent.tun_br, + "add_patch_port", + return_value=1) as tun_patch_port,\ + mock.patch.object(self.agent.tun_br, 'bridge_exists', + return_value=False),\ + mock.patch.object(self.agent.tun_br, 'create') as create_tun,\ + mock.patch.object(self.agent.tun_br, + 'setup_controllers') as setup_controllers,\ + mock.patch.object(self.agent.tun_br, 'port_exists', + return_value=False),\ + mock.patch.object(self.agent.int_br, 'port_exists', + return_value=False),\ mock.patch.object(sys, "exit"): - tun_br.add_patch_port.return_value = 2 - self.agent.reset_tunnel_br(None) + self.agent.setup_tunnel_br(None) self.agent.setup_tunnel_br() - self.assertTrue(intbr_patch_fn.called) + self.assertTrue(create_tun.called) + self.assertTrue(setup_controllers.called) + self.assertTrue(int_patch_port.called) + self.assertTrue(tun_patch_port.called) + + def test_setup_tunnel_br_ports_exits_drop_flows(self): + cfg.CONF.set_override('drop_flows_on_start', True, 'AGENT') + with mock.patch.object(self.agent.tun_br, 'port_exists', + return_value=True),\ + mock.patch.object(self.agent, 'tun_br'),\ + mock.patch.object(self.agent.int_br, 'port_exists', + return_value=True),\ + mock.patch.object(self.agent.tun_br, 'setup_controllers'),\ + mock.patch.object(self.agent, 'patch_tun_ofport', new=2),\ + mock.patch.object(self.agent, 'patch_int_ofport', new=2),\ + mock.patch.object(self.agent.tun_br, + 'delete_flows') as delete,\ + mock.patch.object(self.agent.int_br, + "add_patch_port") as int_patch_port,\ + mock.patch.object(self.agent.tun_br, + "add_patch_port") as tun_patch_port,\ + mock.patch.object(sys, "exit"): + self.agent.setup_tunnel_br(None) + self.agent.setup_tunnel_br() + self.assertFalse(int_patch_port.called) + self.assertFalse(tun_patch_port.called) + self.assertTrue(delete.called) def test_setup_tunnel_port(self): self.agent.tun_br = mock.Mock() @@ -975,12 +1134,15 @@ class TestOvsNeutronAgent(object): return_value=fake_tunnel_details),\ mock.patch.object( self.agent, - '_setup_tunnel_port') as _setup_tunnel_port_fn: + '_setup_tunnel_port') as _setup_tunnel_port_fn,\ + mock.patch.object(self.agent, + 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f', '100.101.31.15', 'vxlan')] _setup_tunnel_port_fn.assert_has_calls(expected_calls) + self.assertEqual([], cleanup.mock_calls) def test_tunnel_sync_invalid_ip_address(self): fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, @@ -990,13 +1152,16 @@ class TestOvsNeutronAgent(object): return_value=fake_tunnel_details),\ mock.patch.object( self.agent, - '_setup_tunnel_port') as _setup_tunnel_port_fn: + '_setup_tunnel_port') as _setup_tunnel_port_fn,\ + mock.patch.object(self.agent, + 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() _setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br, 'vxlan-64646464', '100.100.100.100', 'vxlan') + self.assertEqual([], cleanup.mock_calls) def test_tunnel_update(self): kwargs = {'tunnel_ip': '10.10.10.10', @@ -1046,8 +1211,11 @@ class TestOvsNeutronAgent(object): mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_physical_bridges') as setup_phys_br,\ mock.patch.object(time, 'sleep'),\ + mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'update_stale_ofport_rules') as update_stale, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'update_stale_ofport_rules') as update_stale: + 'cleanup_stale_flows') as cleanup: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') scan_ports.side_effect = [reply2, reply3] @@ -1067,6 +1235,7 @@ class TestOvsNeutronAgent(object): mock.call(reply2, False), mock.call(reply3, True) ]) + cleanup.assert_called_once_with() self.assertTrue(update_stale.called) # Verify the OVS restart we triggered in the loop # re-setup the bridges @@ -1089,6 +1258,26 @@ class TestOvsNeutronAgent(object): self.agent.state_rpc.client): self.assertEqual(10, rpc_client.timeout) + def test_cleanup_stale_flows_iter_0(self): + with mock.patch.object(self.agent.int_br, 'agent_uuid_stamp', + new=1234),\ + mock.patch.object(self.agent.int_br, + 'dump_flows_all_tables') as dump_flows,\ + mock.patch.object(self.agent.int_br, + 'delete_flows') as del_flow: + dump_flows.return_value = [ + 'cookie=0x4d2, duration=50.156s, table=0,actions=drop', + 'cookie=0x4321, duration=54.143s, table=2, priority=0', + 'cookie=0x2345, duration=50.125s, table=2, priority=0', + 'cookie=0x4d2, duration=52.112s, table=3, actions=drop', + ] + self.agent.cleanup_stale_flows() + expected = [ + mock.call(cookie='0x4321/-1', table='2'), + mock.call(cookie='0x2345/-1', table='2'), + ] + self.assertEqual(expected, del_flow.mock_calls) + def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: @@ -1973,7 +2162,6 @@ class TestOvsDvrNeutronAgent(object): ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]] expected_on_int_br = [ # setup_dvr_flows_on_integ_br - mock.call.delete_flows(), mock.call.setup_canary_table(), mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1), @@ -2140,7 +2328,7 @@ class TestOvsDvrNeutronAgent(object): # block RPC calls and bridge calls self.agent.setup_physical_bridges = mock.Mock() self.agent.setup_integration_br = mock.Mock() - self.agent.reset_tunnel_br = mock.Mock() + self.agent.setup_tunnel_br = mock.Mock() self.agent.state_rpc = mock.Mock() try: self.agent.rpc_loop(polling_manager=mock.Mock()) @@ -2178,3 +2366,26 @@ class TestOvsDvrNeutronAgent(object): class TestOvsDvrNeutronAgentOFCtl(TestOvsDvrNeutronAgent, ovs_test_base.OVSOFCtlTestBase): pass + + +class TestValidateTunnelLocalIP(base.BaseTestCase): + def test_validate_local_ip_no_tunneling(self): + cfg.CONF.set_override('tunnel_types', [], group='AGENT') + # The test will pass simply if no exception is raised by the next call: + ovs_agent.validate_local_ip(FAKE_IP1) + + def test_validate_local_ip_with_valid_ip(self): + cfg.CONF.set_override('tunnel_types', ['vxlan'], group='AGENT') + mock_get_device_by_ip = mock.patch.object( + ip_lib.IPWrapper, 'get_device_by_ip').start() + ovs_agent.validate_local_ip(FAKE_IP1) + mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) + + def test_validate_local_ip_with_invalid_ip(self): + cfg.CONF.set_override('tunnel_types', ['vxlan'], group='AGENT') + mock_get_device_by_ip = mock.patch.object( + ip_lib.IPWrapper, 'get_device_by_ip').start() + mock_get_device_by_ip.return_value = None + with testtools.ExpectedException(SystemExit): + ovs_agent.validate_local_ip(FAKE_IP1) + mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py index e6f7fadb0b7..72cef8cfb16 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py @@ -19,6 +19,7 @@ import time import mock from oslo_config import cfg from oslo_log import log +import six from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib @@ -28,6 +29,12 @@ from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base +def nonzero(f): + if six.PY3: + return f.__bool__() + else: + return f.__nonzero__() + # Useful global dummy variables. NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7' LS_ID = 420 @@ -156,17 +163,21 @@ class TunnelTest(object): def _define_expected_calls(self, arp_responder=False): self.mock_int_bridge_cls_expected = [ - mock.call(self.INT_BRIDGE), + mock.call(self.INT_BRIDGE, + datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ - mock.call(self.MAP_TUN_BRIDGE), + mock.call(self.MAP_TUN_BRIDGE, + datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ - mock.call(self.TUN_BRIDGE), + mock.call(self.TUN_BRIDGE, + datapath_type=mock.ANY), ] self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge_expected = [ + mock.call.set_agent_uuid_stamp(mock.ANY), mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), @@ -177,11 +188,11 @@ class TunnelTest(object): self.mock_map_tun_bridge_expected = [ mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), - mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ - mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE), + mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, + 'type'), mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] @@ -200,11 +211,17 @@ class TunnelTest(object): ] self.mock_tun_bridge_expected = [ - mock.call.reset_bridge(secure_mode=True), + mock.call.set_agent_uuid_stamp(mock.ANY), + mock.call.bridge_exists(mock.ANY), + nonzero(mock.call.bridge_exists()), mock.call.setup_controllers(mock.ANY), + mock.call.port_exists('patch-int'), + nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ + mock.call.port_exists('patch-tun'), + nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-tun', 'patch-int'), ] self.mock_int_bridge_expected += [ @@ -214,7 +231,6 @@ class TunnelTest(object): ] self.mock_tun_bridge_expected += [ - mock.call.delete_flows(), mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] @@ -510,8 +526,12 @@ class TunnelTest(object): mock.patch.object(self.mod_agent.OVSNeutronAgent, 'tunnel_sync'),\ mock.patch.object(time, 'sleep'),\ - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'update_stale_ofport_rules') as update_stale: + mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'update_stale_ofport_rules') as update_stale,\ + mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'cleanup_stale_flows') as cleanup: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') scan_ports.side_effect = [reply2, reply3] @@ -545,6 +565,8 @@ class TunnelTest(object): 'removed': set(['tap0']), 'added': set([])}, False) ]) + + cleanup.assert_called_once_with() self.assertTrue(update_stale.called) self._verify_mock_calls() @@ -558,16 +580,20 @@ class TunnelTestUseVethInterco(TunnelTest): def _define_expected_calls(self, arp_responder=False): self.mock_int_bridge_cls_expected = [ - mock.call(self.INT_BRIDGE), + mock.call(self.INT_BRIDGE, + datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ - mock.call(self.MAP_TUN_BRIDGE), + mock.call(self.MAP_TUN_BRIDGE, + datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ - mock.call(self.TUN_BRIDGE), + mock.call(self.TUN_BRIDGE, + datapath_type=mock.ANY), ] self.mock_int_bridge_expected = [ + mock.call.set_agent_uuid_stamp(mock.ANY), mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), @@ -578,11 +604,11 @@ class TunnelTestUseVethInterco(TunnelTest): self.mock_map_tun_bridge_expected = [ mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), - mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_port(self.intb), ] self.mock_int_bridge_expected += [ - mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE), + mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, + 'type'), mock.call.add_port(self.inta) ] @@ -594,11 +620,17 @@ class TunnelTestUseVethInterco(TunnelTest): ] self.mock_tun_bridge_expected = [ - mock.call.reset_bridge(secure_mode=True), + mock.call.set_agent_uuid_stamp(mock.ANY), + mock.call.bridge_exists(mock.ANY), + nonzero(mock.call.bridge_exists()), mock.call.setup_controllers(mock.ANY), + mock.call.port_exists('patch-int'), + nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ + mock.call.port_exists('patch-tun'), + nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-tun', 'patch-int') ] self.mock_int_bridge_expected += [ @@ -607,7 +639,6 @@ class TunnelTestUseVethInterco(TunnelTest): 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] self.mock_tun_bridge_expected += [ - mock.call.delete_flows(), mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py b/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py index 018d53bd02a..594f559e971 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py +++ b/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py @@ -14,9 +14,9 @@ # under the License. import fixtures -import logging as std_logging import mock from oslo_db import exception as exc +from oslo_log import log as logging from sqlalchemy.orm import query import neutron.db.api as db @@ -47,7 +47,7 @@ class HelpersTest(testlib_api.SqlTestCase): fixtures.FakeLogger( name=helpers.__name__, format=base.LOG_FORMAT, - level=std_logging.DEBUG + level=logging.DEBUG )) def check_raw_segment(self, expected, observed): diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py new file mode 100644 index 00000000000..fb0ffdfc43b --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py @@ -0,0 +1,55 @@ +# Copyright (c) 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers import type_geneve +from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel +from neutron.tests.unit.plugins.ml2 import test_rpc +from neutron.tests.unit import testlib_api + + +TUNNEL_IP_ONE = "10.10.10.77" +TUNNEL_IP_TWO = "10.10.10.78" +HOST_ONE = 'fake_host_one1' +HOST_TWO = 'fake_host_two2' + + +class GeneveTypeTest(base_type_tunnel.TunnelTypeTestMixin, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_geneve.GeneveTypeDriver + TYPE = p_const.TYPE_GENEVE + + def test_get_endpoints(self): + self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) + self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) + + endpoints = self.driver.get_endpoints() + for endpoint in endpoints: + if endpoint['ip_address'] == TUNNEL_IP_ONE: + self.assertEqual(HOST_ONE, endpoint['host']) + elif endpoint['ip_address'] == TUNNEL_IP_TWO: + self.assertEqual(HOST_TWO, endpoint['host']) + + +class GeneveTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_geneve.GeneveTypeDriver + + +class GeneveTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, + test_rpc.RpcCallbacksTestCase, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_geneve.GeneveTypeDriver + TYPE = p_const.TYPE_GENEVE diff --git a/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py b/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py index 443a82845db..174d3c9640a 100644 --- a/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py +++ b/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py @@ -13,25 +13,24 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ - import test_agent_scheduler +from neutron.tests.unit.db import test_agentschedulers_db from neutron.tests.unit.plugins.ml2 import test_plugin class Ml2AgentSchedulerTestCase( - test_agent_scheduler.OvsAgentSchedulerTestCase): + test_agentschedulers_db.OvsAgentSchedulerTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2L3AgentNotifierTestCase( - test_agent_scheduler.OvsL3AgentNotifierTestCase): + test_agentschedulers_db.OvsL3AgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2DhcpAgentNotifierTestCase( - test_agent_scheduler.OvsDhcpAgentNotifierTestCase): + test_agentschedulers_db.OvsDhcpAgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index d31dd98bfe7..56c0a3d270b 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -49,6 +49,7 @@ from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin +from neutron.services.qos import qos_consts from neutron.tests import base from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc @@ -94,12 +95,12 @@ class Ml2ConfFixture(PluginConfFixture): class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): _mechanism_drivers = ['logger', 'test'] + l3_plugin = ('neutron.tests.unit.extensions.test_l3.' + 'TestL3NatServicePlugin') def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" - l3_plugin = ('neutron.tests.unit.extensions.test_l3.' - 'TestL3NatServicePlugin') - service_plugins = {'l3_plugin_name': l3_plugin} + service_plugins = {'l3_plugin_name': self.l3_plugin} # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( @@ -139,6 +140,37 @@ class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase): self.assertFalse(self._skip_native_bulk) +class TestMl2SupportedQosRuleTypes(Ml2PluginV2TestCase): + + def test_empty_driver_list(self, *mocks): + mech_drivers_mock = mock.PropertyMock(return_value=[]) + with mock.patch.object(self.driver.mechanism_manager, + 'ordered_mech_drivers', + new_callable=mech_drivers_mock): + self.assertEqual( + [], self.driver.mechanism_manager.supported_qos_rule_types) + + def test_no_rule_types_in_common(self): + self.assertEqual( + [], self.driver.mechanism_manager.supported_qos_rule_types) + + @mock.patch.object(mech_logger.LoggerMechanismDriver, + 'supported_qos_rule_types', + new_callable=mock.PropertyMock, + create=True) + @mock.patch.object(mech_test.TestMechanismDriver, + 'supported_qos_rule_types', + new_callable=mock.PropertyMock, + create=True) + def test_rule_type_in_common(self, *mocks): + # make sure both plugins have the same supported qos rule types + for mock_ in mocks: + mock_.return_value = qos_consts.VALID_RULE_TYPES + self.assertEqual( + qos_consts.VALID_RULE_TYPES, + self.driver.mechanism_manager.supported_qos_rule_types) + + class TestMl2BasicGet(test_plugin.TestBasicGet, Ml2PluginV2TestCase): pass diff --git a/neutron/tests/unit/plugins/ml2/test_rpc.py b/neutron/tests/unit/plugins/ml2/test_rpc.py index 72775b9fe80..5e79eb7619b 100644 --- a/neutron/tests/unit/plugins/ml2/test_rpc.py +++ b/neutron/tests/unit/plugins/ml2/test_rpc.py @@ -32,6 +32,7 @@ from neutron.common import topics from neutron.plugins.ml2.drivers import type_tunnel from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import rpc as plugin_rpc +from neutron.services.qos import qos_consts from neutron.tests import base @@ -135,6 +136,34 @@ class RpcCallbacksTestCase(base.BaseTestCase): self.callbacks.get_device_details(mock.Mock()) self.assertTrue(self.plugin.update_port_status.called) + def test_get_device_details_qos_policy_id_none(self): + port = collections.defaultdict(lambda: 'fake_port') + self.plugin.get_bound_port_context().current = port + self.plugin.get_bound_port_context().network._network = ( + {"id": "fake_network"}) + res = self.callbacks.get_device_details(mock.Mock(), host='fake') + self.assertIsNone(res['qos_policy_id']) + + def test_get_device_details_qos_policy_id_inherited_from_network(self): + port = collections.defaultdict(lambda: 'fake_port') + self.plugin.get_bound_port_context().current = port + self.plugin.get_bound_port_context().network._network = ( + {"id": "fake_network", + qos_consts.QOS_POLICY_ID: 'test-policy-id'}) + res = self.callbacks.get_device_details(mock.Mock(), host='fake') + self.assertEqual('test-policy-id', res['qos_policy_id']) + + def test_get_device_details_qos_policy_id_taken_from_port(self): + port = collections.defaultdict( + lambda: 'fake_port', + {qos_consts.QOS_POLICY_ID: 'test-port-policy-id'}) + self.plugin.get_bound_port_context().current = port + self.plugin.get_bound_port_context().network._network = ( + {"id": "fake_network", + qos_consts.QOS_POLICY_ID: 'test-net-policy-id'}) + res = self.callbacks.get_device_details(mock.Mock(), host='fake') + self.assertEqual('test-port-policy-id', res['qos_policy_id']) + def _test_get_devices_list(self, callback, side_effect, expected): devices = [1, 2, 3, 4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} diff --git a/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py b/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py index b5ca8d18e1d..55b2abc2710 100644 --- a/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py +++ b/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py @@ -30,8 +30,8 @@ from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db from neutron.db import l3_db from neutron.db import securitygroups_db -from neutron.extensions import portbindings from neutron.extensions import securitygroup as ext_sg +from neutron.plugins.opencontrail import contrail_plugin from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin @@ -286,7 +286,7 @@ class TestContrailSecurityGroups(test_sg.TestSecurityGroups, class TestContrailPortBinding(ContrailPluginTestCase, test_bindings.PortBindingsTestCase): - VIF_TYPE = portbindings.VIF_TYPE_VROUTER + VIF_TYPE = contrail_plugin.VIF_TYPE_VROUTER HAS_PORT_FILTER = True def setUp(self): diff --git a/neutron/tests/unit/quota/test_resource.py b/neutron/tests/unit/quota/test_resource.py index 7f668539807..88a00bbc924 100644 --- a/neutron/tests/unit/quota/test_resource.py +++ b/neutron/tests/unit/quota/test_resource.py @@ -165,7 +165,8 @@ class TestTrackedResource(testlib_api.SqlTestCaseLight): res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( - self.context, self.resource, self.tenant_id, in_use=2) + self.context, self.resource, self.tenant_id, + reserved=0, in_use=2) def test_count_with_dirty_true_no_usage_info(self): res = self._create_resource() @@ -184,7 +185,8 @@ class TestTrackedResource(testlib_api.SqlTestCaseLight): self.tenant_id) res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( - self.context, self.resource, self.tenant_id, in_use=2) + self.context, self.resource, self.tenant_id, + reserved=0, in_use=2) def test_add_delete_data_triggers_event(self): res = self._create_resource() @@ -251,4 +253,5 @@ class TestTrackedResource(testlib_api.SqlTestCaseLight): # and now it should be in sync self.assertNotIn(self.tenant_id, res._out_of_sync_tenants) mock_set_quota_usage.assert_called_once_with( - self.context, self.resource, self.tenant_id, in_use=2) + self.context, self.resource, self.tenant_id, + reserved=0, in_use=2) diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index 4196f6ce359..71bc94c7a3c 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -333,7 +333,7 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, self.remove_networks_from_down_agents() def test_reschedule_doesnt_occur_if_no_agents(self): - agents = self._create_and_set_agents_down(['host-a'], 1) + agents = self._create_and_set_agents_down(['host-a', 'host-b'], 2) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object( self, 'remove_network_from_dhcp_agent') as rn: diff --git a/neutron/tests/unit/services/qos/__init__.py b/neutron/tests/unit/services/qos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/services/qos/base.py b/neutron/tests/unit/services/qos/base.py new file mode 100644 index 00000000000..633b35aadab --- /dev/null +++ b/neutron/tests/unit/services/qos/base.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks.consumer import registry as cons_registry +from neutron.api.rpc.callbacks.producer import registry as prod_registry +from neutron.api.rpc.callbacks import resource_manager +from neutron.tests.unit import testlib_api + + +class BaseQosTestCase(testlib_api.SqlTestCase): + def setUp(self): + super(BaseQosTestCase, self).setUp() + + with mock.patch.object( + resource_manager.ResourceCallbacksManager, '_singleton', + new_callable=mock.PropertyMock(return_value=False)): + + self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() + self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() + for mgr in (self.cons_mgr, self.prod_mgr): + mgr.clear() + + mock.patch.object( + cons_registry, '_get_manager', return_value=self.cons_mgr).start() + + mock.patch.object( + prod_registry, '_get_manager', return_value=self.prod_mgr).start() diff --git a/neutron/tests/unit/services/qos/notification_drivers/__init__.py b/neutron/tests/unit/services/qos/notification_drivers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/cmd/eventlet/plugins/nec_neutron_agent.py b/neutron/tests/unit/services/qos/notification_drivers/dummy.py similarity index 58% rename from neutron/cmd/eventlet/plugins/nec_neutron_agent.py rename to neutron/tests/unit/services/qos/notification_drivers/dummy.py index 7cd7503a93b..ce3de1f4875 100644 --- a/neutron/cmd/eventlet/plugins/nec_neutron_agent.py +++ b/neutron/tests/unit/services/qos/notification_drivers/dummy.py @@ -1,6 +1,3 @@ -# Copyright 2012 NEC Corporation. -# All Rights Reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,16 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -import sys - -from networking_nec.plugins.openflow.agent import l2_agent - -from neutron.common import config as common_config -from neutron.plugins.nec import config as nec_config +from neutron.services.qos.notification_drivers import qos_base -def main(): - nec_config.register_agent_opts() - common_config.init(sys.argv[1:]) - common_config.setup_logging() - l2_agent.run() +class DummyQosServiceNotificationDriver( + qos_base.QosServiceNotificationDriverBase): + """Dummy service notification driver for QoS.""" + + def get_description(self): + return "Dummy" + + def create_policy(self, policy): + pass + + def update_policy(self, policy): + pass + + def delete_policy(self, policy): + pass diff --git a/neutron/tests/unit/services/qos/notification_drivers/test_manager.py b/neutron/tests/unit/services/qos/notification_drivers/test_manager.py new file mode 100644 index 00000000000..c46e99a24db --- /dev/null +++ b/neutron/tests/unit/services/qos/notification_drivers/test_manager.py @@ -0,0 +1,107 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.api.rpc.callbacks import events +from neutron import context +from neutron.objects.qos import policy as policy_object +from neutron.services.qos.notification_drivers import manager as driver_mgr +from neutron.services.qos.notification_drivers import message_queue +from neutron.tests.unit.services.qos import base + +DUMMY_DRIVER = ("neutron.tests.unit.services.qos.notification_drivers." + "dummy.DummyQosServiceNotificationDriver") + + +def _load_multiple_drivers(): + cfg.CONF.set_override( + "notification_drivers", + ["message_queue", DUMMY_DRIVER], + "qos") + + +class TestQosDriversManagerBase(base.BaseQosTestCase): + + def setUp(self): + super(TestQosDriversManagerBase, self).setUp() + self.config_parse() + self.setup_coreplugin() + config = cfg.ConfigOpts() + config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos") + self.policy_data = {'policy': { + 'id': 7777777, + 'tenant_id': 888888, + 'name': 'test-policy', + 'description': 'test policy description', + 'shared': True}} + + self.context = context.get_admin_context() + self.policy = policy_object.QosPolicy(self.context, + **self.policy_data['policy']) + ctxt = None + self.kwargs = {'context': ctxt} + + +class TestQosDriversManager(TestQosDriversManagerBase): + + def setUp(self): + super(TestQosDriversManager, self).setUp() + #TODO(Qos): Fix this unittest to test manager and not message_queue + # notification driver + rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc' + '.ResourcesPushRpcApi').start() + self.rpc_api = rpc_api_cls.return_value + self.driver_manager = driver_mgr.QosServiceNotificationDriverManager() + + def _validate_registry_params(self, event_type, policy): + self.rpc_api.push.assert_called_with(self.context, policy, + event_type) + + def test_create_policy_default_configuration(self): + #RPC driver should be loaded by default + self.driver_manager.create_policy(self.context, self.policy) + self.assertFalse(self.rpc_api.push.called) + + def test_update_policy_default_configuration(self): + #RPC driver should be loaded by default + self.driver_manager.update_policy(self.context, self.policy) + self._validate_registry_params(events.UPDATED, self.policy) + + def test_delete_policy_default_configuration(self): + #RPC driver should be loaded by default + self.driver_manager.delete_policy(self.context, self.policy) + self._validate_registry_params(events.DELETED, self.policy) + + +class TestQosDriversManagerMulti(TestQosDriversManagerBase): + + def _test_multi_drivers_configuration_op(self, op): + _load_multiple_drivers() + driver_manager = driver_mgr.QosServiceNotificationDriverManager() + handler = '%s_policy' % op + with mock.patch('.'.join([DUMMY_DRIVER, handler])) as dummy_mock: + rpc_driver = message_queue.RpcQosServiceNotificationDriver + with mock.patch.object(rpc_driver, handler) as rpc_mock: + getattr(driver_manager, handler)(self.context, self.policy) + for mock_ in (dummy_mock, rpc_mock): + mock_.assert_called_with(self.context, self.policy) + + def test_multi_drivers_configuration_create(self): + self._test_multi_drivers_configuration_op('create') + + def test_multi_drivers_configuration_update(self): + self._test_multi_drivers_configuration_op('update') + + def test_multi_drivers_configuration_delete(self): + self._test_multi_drivers_configuration_op('delete') diff --git a/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py b/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py new file mode 100644 index 00000000000..0a95cae4108 --- /dev/null +++ b/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks import events +from neutron import context +from neutron.objects.qos import policy as policy_object +from neutron.objects.qos import rule as rule_object +from neutron.services.qos.notification_drivers import message_queue +from neutron.tests.unit.services.qos import base + +DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + + +class TestQosRpcNotificationDriver(base.BaseQosTestCase): + + def setUp(self): + super(TestQosRpcNotificationDriver, self).setUp() + rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc' + '.ResourcesPushRpcApi').start() + self.rpc_api = rpc_api_cls.return_value + self.driver = message_queue.RpcQosServiceNotificationDriver() + + self.policy_data = {'policy': { + 'id': 7777777, + 'tenant_id': 888888, + 'name': 'testi-policy', + 'description': 'test policyi description', + 'shared': True}} + + self.rule_data = {'bandwidth_limit_rule': { + 'id': 7777777, + 'max_kbps': 100, + 'max_burst_kbps': 150}} + + self.context = context.get_admin_context() + self.policy = policy_object.QosPolicy(self.context, + **self.policy_data['policy']) + + self.rule = rule_object.QosBandwidthLimitRule( + self.context, + **self.rule_data['bandwidth_limit_rule']) + + def _validate_push_params(self, event_type, policy): + self.rpc_api.push.assert_called_once_with(self.context, policy, + event_type) + + def test_create_policy(self): + self.driver.create_policy(self.context, self.policy) + self.assertFalse(self.rpc_api.push.called) + + def test_update_policy(self): + self.driver.update_policy(self.context, self.policy) + self._validate_push_params(events.UPDATED, self.policy) + + def test_delete_policy(self): + self.driver.delete_policy(self.context, self.policy) + self._validate_push_params(events.DELETED, self.policy) diff --git a/neutron/tests/unit/services/qos/test_qos_plugin.py b/neutron/tests/unit/services/qos/test_qos_plugin.py new file mode 100644 index 00000000000..6dea3bdfa0e --- /dev/null +++ b/neutron/tests/unit/services/qos/test_qos_plugin.py @@ -0,0 +1,185 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.common import exceptions as n_exc +from neutron import context +from neutron import manager +from neutron.objects import base as base_object +from neutron.objects.qos import policy as policy_object +from neutron.objects.qos import rule as rule_object +from neutron.plugins.common import constants +from neutron.tests.unit.services.qos import base + + +DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + + +class TestQosPlugin(base.BaseQosTestCase): + + def setUp(self): + super(TestQosPlugin, self).setUp() + self.setup_coreplugin() + + mock.patch('neutron.db.api.create_object').start() + mock.patch('neutron.db.api.update_object').start() + mock.patch('neutron.db.api.delete_object').start() + mock.patch('neutron.db.api.get_object').start() + mock.patch( + 'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start() + + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + cfg.CONF.set_override("service_plugins", ["qos"]) + + mgr = manager.NeutronManager.get_instance() + self.qos_plugin = mgr.get_service_plugins().get( + constants.QOS) + + self.qos_plugin.notification_driver_manager = mock.Mock() + + self.ctxt = context.Context('fake_user', 'fake_tenant') + self.policy_data = { + 'policy': {'id': 7777777, + 'tenant_id': 888888, + 'name': 'test-policy', + 'description': 'Test policy description', + 'shared': True}} + + self.rule_data = { + 'bandwidth_limit_rule': {'id': 7777777, + 'max_kbps': 100, + 'max_burst_kbps': 150}} + + self.policy = policy_object.QosPolicy( + self.ctxt, **self.policy_data['policy']) + + self.rule = rule_object.QosBandwidthLimitRule( + self.ctxt, **self.rule_data['bandwidth_limit_rule']) + + def _validate_notif_driver_params(self, method_name): + method = getattr(self.qos_plugin.notification_driver_manager, + method_name) + self.assertTrue(method.called) + self.assertIsInstance( + method.call_args[0][1], policy_object.QosPolicy) + + def test_add_policy(self): + self.qos_plugin.create_policy(self.ctxt, self.policy_data) + self._validate_notif_driver_params('create_policy') + + def test_update_policy(self): + fields = base_object.get_updatable_fields( + policy_object.QosPolicy, self.policy_data['policy']) + self.qos_plugin.update_policy( + self.ctxt, self.policy.id, {'policy': fields}) + self._validate_notif_driver_params('update_policy') + + @mock.patch('neutron.db.api.get_object', return_value=None) + def test_delete_policy(self, *mocks): + self.qos_plugin.delete_policy(self.ctxt, self.policy.id) + self._validate_notif_driver_params('delete_policy') + + def test_create_policy_rule(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=self.policy): + self.qos_plugin.create_policy_bandwidth_limit_rule( + self.ctxt, self.policy.id, self.rule_data) + self._validate_notif_driver_params('update_policy') + + def test_update_policy_rule(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=self.policy): + self.qos_plugin.update_policy_bandwidth_limit_rule( + self.ctxt, self.rule.id, self.policy.id, self.rule_data) + self._validate_notif_driver_params('update_policy') + + def test_delete_policy_rule(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=self.policy): + self.qos_plugin.delete_policy_bandwidth_limit_rule( + self.ctxt, self.rule.id, self.policy.id) + self._validate_notif_driver_params('update_policy') + + def test_get_policy_bandwidth_limit_rules_for_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=self.policy): + with mock.patch('neutron.objects.qos.rule.' + 'QosBandwidthLimitRule.' + 'get_objects') as get_object_mock: + self.qos_plugin.get_policy_bandwidth_limit_rules( + self.ctxt, self.policy.id) + get_object_mock.assert_called_once_with( + self.ctxt, qos_policy_id=self.policy.id) + + def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=self.policy): + with mock.patch('neutron.objects.qos.rule.' + 'QosBandwidthLimitRule.' + 'get_objects') as get_object_mock: + + filters = {'filter': 'filter_id'} + self.qos_plugin.get_policy_bandwidth_limit_rules( + self.ctxt, self.policy.id, filters=filters) + get_object_mock.assert_called_once_with( + self.ctxt, qos_policy_id=self.policy.id, + filter='filter_id') + + def test_get_policy_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.get_policy, + self.ctxt, self.policy.id) + + def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.get_policy_bandwidth_limit_rule, + self.ctxt, self.rule.id, self.policy.id) + + def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.get_policy_bandwidth_limit_rules, + self.ctxt, self.policy.id) + + def test_create_policy_rule_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.create_policy_bandwidth_limit_rule, + self.ctxt, self.policy.id, self.rule_data) + + def test_update_policy_rule_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.update_policy_bandwidth_limit_rule, + self.ctxt, self.rule.id, self.policy.id, self.rule_data) + + def test_delete_policy_rule_for_nonexistent_policy(self): + with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id', + return_value=None): + self.assertRaises( + n_exc.QosPolicyNotFound, + self.qos_plugin.delete_policy_bandwidth_limit_rule, + self.ctxt, self.rule.id, self.policy.id) diff --git a/neutron/tests/unit/test_wsgi.py b/neutron/tests/unit/test_wsgi.py index b64e03937aa..ebb231afa86 100644 --- a/neutron/tests/unit/test_wsgi.py +++ b/neutron/tests/unit/test_wsgi.py @@ -217,7 +217,7 @@ class SerializerTest(base.BaseTestCase): serializer = wsgi.Serializer() result = serializer.serialize(input_data, content_type) - self.assertEqual('{"servers": ["test=pass"]}', result) + self.assertEqual(b'{"servers": ["test=pass"]}', result) def test_deserialize_raise_bad_request(self): """Test serialize verifies that exception is raises.""" @@ -308,7 +308,7 @@ class ResponseSerializerTest(testtools.TestCase): class JSONSerializer(object): def serialize(self, data, action='default'): - return 'pew_json' + return b'pew_json' class HeadersSerializer(object): def serialize(self, response, data, action): @@ -342,7 +342,7 @@ class ResponseSerializerTest(testtools.TestCase): response = self.serializer.serialize({}, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) - self.assertEqual('pew_json', response.body) + self.assertEqual(b'pew_json', response.body) self.assertEqual(404, response.status_int) def test_serialize_response_None(self): @@ -350,7 +350,7 @@ class ResponseSerializerTest(testtools.TestCase): None, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) - self.assertEqual('', response.body) + self.assertEqual(b'', response.body) self.assertEqual(404, response.status_int) @@ -488,28 +488,28 @@ class JSONDictSerializerTest(base.BaseTestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) - expected_json = '{"servers":{"a":[2,3]}}' + expected_json = b'{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) - result = result.replace('\n', '').replace(' ', '') + result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) def test_json_with_utf8(self): input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c'))) - expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' + expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) - result = result.replace('\n', '').replace(' ', '') + result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) def test_json_with_unicode(self): input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc'))) - expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' + expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) - result = result.replace('\n', '').replace(' ', '') + result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) diff --git a/neutron/wsgi.py b/neutron/wsgi.py index dd71a9b907c..cb302d306dc 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -19,7 +19,6 @@ Utility methods for working with WSGI servers from __future__ import print_function import errno -import logging as std_logging import os import socket import ssl @@ -93,6 +92,16 @@ CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) +def encode_body(body): + """Encode unicode body. + + WebOb requires to encode unicode body used to update response body. + """ + if isinstance(body, six.text_type): + return body.encode('utf-8') + return body + + class WorkerService(common_service.ServiceBase): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, service, application): @@ -240,7 +249,7 @@ class Server(object): # The API service should run in the current process. self._server = service # Dump the initial option values - cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + cfg.CONF.log_opt_values(LOG, logging.DEBUG) service.start() systemd.notify_once() else: @@ -427,7 +436,7 @@ class JSONDictSerializer(DictSerializer): def default(self, data): def sanitizer(obj): return six.text_type(obj) - return jsonutils.dumps(data, default=sanitizer) + return encode_body(jsonutils.dumps(data, default=sanitizer)) class ResponseHeaderSerializer(ActionDispatcher): diff --git a/requirements.txt b/requirements.txt index 124ec85587e..8fbf815e8ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -pbr<2.0,>=1.3 +pbr<2.0,>=1.6 Paste PasteDeploy>=1.5.0 @@ -16,29 +16,30 @@ requests>=2.5.2 Jinja2>=2.6 # BSD License (3 clause) keystonemiddleware>=2.0.0 netaddr>=0.7.12 -python-neutronclient<3,>=2.3.11 +python-neutronclient<3,>=2.6.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 SQLAlchemy<1.1.0,>=0.9.7 WebOb>=1.2.3 python-keystoneclient>=1.6.0 -alembic>=0.7.2 +alembic>=0.8.0 six>=1.9.0 stevedore>=1.5.0 # Apache-2.0 oslo.concurrency>=2.3.0 # Apache-2.0 -oslo.config>=1.11.0 # Apache-2.0 +oslo.config>=2.3.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 -oslo.db>=1.12.0 # Apache-2.0 +oslo.db>=2.4.1 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 -oslo.log>=1.6.0 # Apache-2.0 +oslo.log>=1.8.0 # Apache-2.0 oslo.messaging!=1.17.0,!=1.17.1,>=1.16.0 # Apache-2.0 -oslo.middleware>=2.4.0 # Apache-2.0 +oslo.middleware>=2.8.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 -oslo.service>=0.1.0 # Apache-2.0 -oslo.utils>=1.9.0 # Apache-2.0 +oslo.service>=0.7.0 # Apache-2.0 +oslo.utils>=2.0.0 # Apache-2.0 +oslo.versionedobjects>=0.6.0 -python-novaclient>=2.22.0 +python-novaclient>=2.26.0 # Windows-only requirements pywin32;sys_platform=='win32' diff --git a/setup.cfg b/setup.cfg index f1bcddaf9f8..1c39d1c3cfc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,6 @@ data_files = etc/neutron/rootwrap.d/ipset-firewall.filters etc/neutron/rootwrap.d/l3.filters etc/neutron/rootwrap.d/linuxbridge-plugin.filters - etc/neutron/rootwrap.d/nec-plugin.filters etc/neutron/rootwrap.d/openvswitch-plugin.filters etc/init.d = etc/init.d/neutron-server etc/neutron/plugins/bigswitch = @@ -57,7 +56,6 @@ data_files = etc/neutron/plugins/cisco/cisco_router_plugin.ini etc/neutron/plugins/cisco/cisco_vpn_agent.ini etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini - etc/neutron/plugins/ibm = etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini etc/neutron/plugins/midonet = etc/neutron/plugins/midonet/midonet.ini etc/neutron/plugins/ml2 = etc/neutron/plugins/bigswitch/restproxy.ini @@ -65,18 +63,13 @@ data_files = etc/neutron/plugins/ml2/ml2_conf.ini etc/neutron/plugins/ml2/ml2_conf_brocade.ini etc/neutron/plugins/ml2/ml2_conf_brocade_fi_ni.ini - etc/neutron/plugins/ml2/ml2_conf_cisco.ini etc/neutron/plugins/ml2/ml2_conf_ofa.ini etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini etc/neutron/plugins/ml2/ml2_conf_sriov.ini - etc/neutron/plugins/nuage/nuage_plugin.ini etc/neutron/plugins/ml2/openvswitch_agent.ini etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini - etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini - etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini - etc/neutron/plugins/vmware = etc/neutron/plugins/vmware/nsx.ini etc/neutron/plugins/opencontrail = etc/neutron/plugins/opencontrail/contrailplugin.ini etc/neutron/plugins/ovsvapp = etc/neutron/plugins/ovsvapp/ovsvapp_agent.ini scripts = @@ -89,19 +82,18 @@ console_scripts = neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main neutron-hyperv-agent = neutron.cmd.eventlet.plugins.hyperv_neutron_agent:main neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main - neutron-ibm-agent = neutron.plugins.ibm.agent.sdnve_neutron_agent:main neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main neutron-linuxbridge-agent = neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent:main neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main neutron-mlnx-agent = neutron.cmd.eventlet.plugins.mlnx_neutron_agent:main - neutron-nec-agent = neutron.cmd.eventlet.plugins.nec_neutron_agent:main neutron-netns-cleanup = neutron.cmd.netns_cleanup:main neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main neutron-ovsvapp-agent = neutron.cmd.eventlet.plugins.ovsvapp_neutron_agent:main neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main + neutron-pd-notify = neutron.cmd.pd_notify:main neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main neutron-server = neutron.cmd.eventlet.server:main_wsgi_eventlet neutron-dev-server = neutron.cmd.eventlet.server:main_wsgi_pecan @@ -116,14 +108,10 @@ neutron.core_plugins = brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2 cisco = neutron.plugins.cisco.network_plugin:PluginV2 embrane = neutron.plugins.embrane.plugins.embrane_ml2_plugin:EmbraneMl2Plugin - ibm = neutron.plugins.ibm.sdnve_neutron_plugin:SdnvePluginV2 midonet = neutron.plugins.midonet.plugin:MidonetPluginV2 ml2 = neutron.plugins.ml2.plugin:Ml2Plugin - nec = neutron.plugins.nec.nec_plugin:NECPluginV2 nuage = neutron.plugins.nuage.plugin:NuagePlugin oneconvergence = neutron.plugins.oneconvergence.plugin:OneConvergencePluginV2 - plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2 - vmware = neutron.plugins.vmware.plugin:NsxMhPlugin neutron.service_plugins = dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin @@ -139,6 +127,7 @@ neutron.service_plugins = neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin ibm_l3 = neutron.services.l3_router.l3_sdnve:SdnveL3ServicePlugin + qos = neutron.services.qos.qos_plugin:QoSPlugin neutron.service_providers = # These are for backwards compat with Juno firewall service provider configuration values neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas:IptablesFwaasDriver @@ -152,13 +141,15 @@ neutron.service_providers = # These are for backwards compat with Juno vpnaas service provider configuration values neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver = neutron_vpnaas.services.vpn.service_drivers.cisco_ipsec:CiscoCsrIPsecVPNDriver neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver = neutron_vpnaas.services.vpn.service_drivers.ipsec:IPsecVPNDriver +neutron.qos.notification_drivers = + message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver neutron.ml2.type_drivers = flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver + geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver - nexus_vxlan = neutron.plugins.ml2.drivers.cisco.nexus.type_nexus_vxlan:NexusVxlanTypeDriver neutron.ml2.mechanism_drivers = ovsvapp = neutron.plugins.ml2.drivers.ovsvapp.mech_driver:OVSvAppAgentMechanismDriver opendaylight = neutron.plugins.ml2.drivers.opendaylight.driver:OpenDaylightMechanismDriver @@ -167,32 +158,32 @@ neutron.ml2.mechanism_drivers = linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver hyperv = neutron.plugins.ml2.drivers.hyperv.mech_hyperv:HypervMechanismDriver - # Note: ncs and cisco_ncs point to the same driver entrypoint - # TODO: The old name (ncs) can be dropped when it is no longer used - ncs = neutron.plugins.ml2.drivers.cisco.ncs.driver:NCSMechanismDriver - cisco_ncs = neutron.plugins.ml2.drivers.cisco.ncs.driver:NCSMechanismDriver - cisco_nexus = neutron.plugins.ml2.drivers.cisco.nexus.mech_cisco_nexus:CiscoNexusMechanismDriver - cisco_ucsm = neutron.plugins.ml2.drivers.cisco.ucsm.mech_cisco_ucsm:CiscoUcsmMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver - bigswitch = neutron.plugins.ml2.drivers.mech_bigswitch.driver:BigSwitchMechanismDriver ofagent = neutron.plugins.ml2.drivers.ofagent.driver:OfagentMechanismDriver mlnx = neutron.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver brocade = networking_brocade.vdx.ml2driver.mechanism_brocade:BrocadeMechanism brocade_fi_ni = neutron.plugins.ml2.drivers.brocade.fi_ni.mechanism_brocade_fi_ni:BrocadeFiNiMechanism fslsdn = neutron.plugins.ml2.drivers.freescale.mechanism_fslsdn:FslsdnMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver - nuage = neutron.plugins.ml2.drivers.mech_nuage.driver:NuageMechanismDriver fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver sdnve = neutron.plugins.ml2.drivers.ibm.mechanism_sdnve:SdnveMechanismDriver neutron.ml2.extension_drivers = test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver + qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver neutron.openstack.common.cache.backends = memory = neutron.openstack.common.cache._backends.memory:MemoryBackend neutron.ipam_drivers = fake = neutron.tests.unit.ipam.fake_driver:FakeDriver internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool +neutron.agent.l2.extensions = + qos = neutron.agent.l2.extensions.qos:QosAgentExtension +neutron.qos.agent_drivers = + ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver + sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver +neutron.agent.linux.pd_drivers = + dibbler = neutron.agent.linux.dibbler:PDDibbler # These are for backwards compat with Icehouse notification_driver configuration values oslo.messaging.notify.drivers = neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver @@ -200,6 +191,8 @@ oslo.messaging.notify.drivers = neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify._impl_messaging:MessagingV2Driver neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify._impl_messaging:MessagingDriver neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver +neutron.db.alembic_migrations = + neutron = neutron.db.migration:alembic_migrations [build_sphinx] all_files = 1 @@ -222,3 +215,6 @@ input_file = neutron/locale/neutron.pot [wheel] universal = 1 + +[pbr] +warnerrors = true diff --git a/test-requirements.txt b/test-requirements.txt index d26812f817d..db65578e0a9 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ # process, which may cause wedges in the gate later. hacking<0.11,>=0.10.0 -cliff>=1.13.0 # Apache-2.0 +cliff>=1.14.0 # Apache-2.0 coverage>=3.6 fixtures>=1.3.1 mock>=1.2 @@ -15,7 +15,7 @@ testrepository>=0.0.18 testtools>=1.4.0 testscenarios>=0.4 WebTest>=2.0 -oslotest>=1.9.0 # Apache-2.0 +oslotest>=1.10.0 # Apache-2.0 os-testr>=0.1.0 tempest-lib>=0.6.1 ddt>=0.7.0 diff --git a/tox.ini b/tox.ini index f5094b0cc1f..fc5fbbf38ff 100644 --- a/tox.ini +++ b/tox.ini @@ -101,142 +101,161 @@ commands = {posargs} commands = sphinx-build -W -b html doc/source doc/build/html [testenv:py34] -commands = python -m testtools.run \ - neutron.tests.unit.test_context \ - neutron.tests.unit.services.metering.drivers.test_iptables \ - neutron.tests.unit.services.metering.agents.test_metering_agent \ - neutron.tests.unit.services.test_provider_configuration \ - neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_sriov_nic_agent \ - neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_eswitch_manager \ - neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.common.test_config \ - neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_pci_lib \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.ovs_test_base \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_phys \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_int \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_tun \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.test_agent_scheduler \ - neutron.tests.unit.plugins.brocade.test_brocade_db \ - neutron.tests.unit.plugins.brocade.test_brocade_vlan \ - neutron.tests.unit.plugins.oneconvergence.test_nvsd_agent \ - neutron.tests.unit.plugins.oneconvergence.test_plugin_helper \ - neutron.tests.unit.plugins.oneconvergence.test_nvsdlib \ - neutron.tests.unit.plugins.ibm.test_sdnve_agent \ - neutron.tests.unit.plugins.ibm.test_sdnve_api \ - neutron.tests.unit.plugins.ml2.test_db \ - neutron.tests.unit.plugins.ml2.test_driver_context \ - neutron.tests.unit.plugins.ml2.test_port_binding \ - neutron.tests.unit.plugins.ml2.test_extension_driver_api \ - neutron.tests.unit.plugins.ml2.test_rpc \ - neutron.tests.unit.plugins.ml2.drivers.mlnx.test_mech_mlnx \ - neutron.tests.unit.plugins.ml2.drivers.openvswitch.mech_driver.test_mech_openvswitch \ - neutron.tests.unit.plugins.ml2.drivers.linuxbridge.mech_driver.test_mech_linuxbridge \ - neutron.tests.unit.plugins.ml2.drivers.linuxbridge.agent.test_linuxbridge_neutron_agent \ - neutron.tests.unit.plugins.ml2.drivers.base_type_tunnel \ - neutron.tests.unit.plugins.ml2.drivers.opendaylight.test_driver \ - neutron.tests.unit.plugins.ml2.drivers.ext_test \ - neutron.tests.unit.plugins.ml2.drivers.mech_sriov.mech_driver.test_mech_sriov_nic_switch \ - neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent \ - neutron.tests.unit.plugins.ml2.drivers.test_type_vxlan \ - neutron.tests.unit.plugins.ml2.drivers.test_type_gre \ - neutron.tests.unit.plugins.ml2.drivers.test_helpers \ - neutron.tests.unit.plugins.ml2.drivers.test_type_local \ - neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \ - neutron.tests.unit.plugins.ml2.drivers.test_type_flat \ - neutron.tests.unit.plugins.ml2.drivers.test_type_vlan \ - neutron.tests.unit.plugins.ml2.drivers.mechanism_test \ - neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.l2population_rpc_base \ - neutron.tests.unit.plugins.ml2.extensions.fake_extension \ - neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.test_l2population_rpc \ - neutron.tests.unit.plugins.ml2.drivers.l2pop.test_mech_driver \ - neutron.tests.unit.plugins.cisco.n1kv.test_n1kv_db \ - neutron.tests.unit.plugins.cisco.n1kv.fake_client \ - neutron.tests.unit.plugins.cisco.test_network_db \ - neutron.tests.unit.scheduler.test_l3_agent_scheduler \ - neutron.tests.unit.scheduler.test_dhcp_agent_scheduler \ - neutron.tests.unit.db.test_ipam_backend_mixin \ - neutron.tests.unit.db.test_l3_dvr_db \ - neutron.tests.unit.db.test_l3_hamode_db \ - neutron.tests.unit.db.test_migration \ - neutron.tests.unit.db.test_agents_db \ - neutron.tests.unit.db.quota.test_driver \ - neutron.tests.unit.db.test_dvr_mac_db \ - neutron.tests.unit.db.test_securitygroups_db \ - neutron.tests.unit.debug.test_commands \ - neutron.tests.unit.tests.test_post_mortem_debug \ - neutron.tests.unit.tests.test_base \ - neutron.tests.unit.database_stubs \ - neutron.tests.unit.dummy_plugin \ - neutron.tests.unit.extension_stubs \ - neutron.tests.unit.testlib_api \ - neutron.tests.unit.api.test_api_common \ - neutron.tests.unit.api.rpc.handlers.test_dhcp_rpc \ - neutron.tests.unit.api.rpc.handlers.test_securitygroups_rpc \ - neutron.tests.unit.api.rpc.handlers.test_dvr_rpc \ - neutron.tests.unit.api.rpc.agentnotifiers.test_dhcp_rpc_agent_api \ - neutron.tests.unit.api.v2.test_attributes \ - neutron.tests.unit.agent.metadata.test_driver \ - neutron.tests.unit.agent.test_rpc \ - neutron.tests.unit.agent.test_securitygroups_rpc \ - neutron.tests.unit.agent.l3.test_link_local_allocator \ - neutron.tests.unit.agent.l3.test_dvr_local_router \ - neutron.tests.unit.agent.l3.test_ha_router \ - neutron.tests.unit.agent.l3.test_legacy_router \ - neutron.tests.unit.agent.l3.test_router_info \ - neutron.tests.unit.agent.l3.test_router_processing_queue \ - neutron.tests.unit.agent.l3.test_namespace_manager \ - neutron.tests.unit.agent.l3.test_dvr_fip_ns \ - neutron.tests.unit.agent.ovsdb.native.test_helpers \ - neutron.tests.unit.agent.common.test_config \ - neutron.tests.unit.agent.common.test_polling \ - neutron.tests.unit.agent.common.test_utils \ - neutron.tests.unit.agent.linux.test_ip_lib \ - neutron.tests.unit.agent.linux.test_keepalived \ - neutron.tests.unit.agent.linux.test_daemon \ - neutron.tests.unit.agent.linux.test_ipset_manager \ - neutron.tests.unit.agent.linux.test_iptables_firewall \ - neutron.tests.unit.agent.linux.test_ebtables_manager \ - neutron.tests.unit.agent.linux.test_iptables_firewall \ - neutron.tests.unit.agent.linux.test_ebtables_driver \ - neutron.tests.unit.agent.linux.test_polling \ - neutron.tests.unit.agent.linux.test_ip_lib \ - neutron.tests.unit.agent.linux.test_ip_monitor \ - neutron.tests.unit.agent.linux.test_iptables_manager \ - neutron.tests.unit.agent.linux.test_external_process \ - neutron.tests.unit.agent.linux.test_dhcp \ - neutron.tests.unit.agent.linux.test_async_process \ - neutron.tests.unit.agent.linux.test_ovsdb_monitor \ - neutron.tests.unit.agent.linux.test_bridge_lib \ - neutron.tests.unit.agent.linux.test_ip_link_support \ - neutron.tests.unit.agent.linux.test_interface \ - neutron.tests.unit.agent.dhcp.test_agent \ - neutron.tests.unit.test_manager \ - neutron.tests.unit.test_service \ - neutron.tests.unit.test_auth \ - neutron.tests.unit.test_policy \ - neutron.tests.unit.extensions.v2attributes \ - neutron.tests.unit.extensions.test_l3_ext_gw_mode \ - neutron.tests.unit.extensions.test_extra_dhcp_opt \ - neutron.tests.unit.extensions.extendedattribute \ - neutron.tests.unit.extensions.base \ - neutron.tests.unit.extensions.foxinsocks \ - neutron.tests.unit.extensions.extensionattribute \ - neutron.tests.unit.extensions.test_servicetype \ - neutron.tests.unit.extensions.test_portsecurity \ - neutron.tests.unit.extensions.test_providernet \ - neutron.tests.unit.callbacks.test_manager \ - neutron.tests.unit.hacking.test_checks \ - neutron.tests.unit.common.test_config \ - neutron.tests.unit.common.test_rpc \ - neutron.tests.unit.common.test_ipv6_utils \ - neutron.tests.unit.cmd.test_ovs_cleanup \ - neutron.tests.unit.cmd.test_netns_cleanup \ - neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_db_api \ - neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_driver \ - neutron.tests.unit.ipam.test_subnet_alloc \ - neutron.tests.unit.ipam.test_utils \ - neutron.tests.unit.ipam.test_requests \ - neutron.tests.unit.notifiers.test_nova \ - neutron.tests.unit.notifiers.test_batch_notifier +commands = python setup.py test --testr-args='{posargs: \ + neutron.tests.unit.test_context \ + neutron.tests.unit.services.metering.drivers.test_iptables \ + neutron.tests.unit.services.metering.agents.test_metering_agent \ + neutron.tests.unit.services.test_provider_configuration \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_sriov_nic_agent \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_eswitch_manager \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.common.test_config \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.agent.test_pci_lib \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.ovs_test_base \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_phys \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_int \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl.test_br_tun \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.test_ovs_tunnel \ + neutron.tests.unit.plugins.brocade.test_brocade_db \ + neutron.tests.unit.plugins.brocade.test_brocade_plugin \ + neutron.tests.unit.plugins.brocade.test_brocade_vlan \ + neutron.tests.unit.plugins.oneconvergence.test_nvsd_agent \ + neutron.tests.unit.plugins.oneconvergence.test_plugin_helper \ + neutron.tests.unit.plugins.oneconvergence.test_nvsdlib \ + neutron.tests.unit.plugins.ibm.test_sdnve_agent \ + neutron.tests.unit.plugins.ibm.test_sdnve_api \ + neutron.tests.unit.plugins.ml2.test_db \ + neutron.tests.unit.plugins.ml2.test_driver_context \ + neutron.tests.unit.plugins.ml2.test_port_binding \ + neutron.tests.unit.plugins.ml2.test_extension_driver_api \ + neutron.tests.unit.plugins.ml2.test_rpc \ + neutron.tests.unit.plugins.ml2.drivers.mlnx.test_mech_mlnx \ + neutron.tests.unit.plugins.ml2.drivers.openvswitch.mech_driver.test_mech_openvswitch \ + neutron.tests.unit.plugins.ml2.drivers.linuxbridge.mech_driver.test_mech_linuxbridge \ + neutron.tests.unit.plugins.ml2.drivers.linuxbridge.agent.test_linuxbridge_neutron_agent \ + neutron.tests.unit.plugins.ml2.drivers.base_type_tunnel \ + neutron.tests.unit.plugins.ml2.drivers.opendaylight.test_driver \ + neutron.tests.unit.plugins.ml2.drivers.ext_test \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.mech_driver.test_mech_sriov_nic_switch \ + neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent \ + neutron.tests.unit.plugins.ml2.drivers.test_type_vxlan \ + neutron.tests.unit.plugins.ml2.drivers.test_type_gre \ + neutron.tests.unit.plugins.ml2.drivers.test_helpers \ + neutron.tests.unit.plugins.ml2.drivers.test_type_local \ + neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \ + neutron.tests.unit.plugins.ml2.drivers.test_type_flat \ + neutron.tests.unit.plugins.ml2.drivers.test_type_vlan \ + neutron.tests.unit.plugins.ml2.drivers.mechanism_test \ + neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.l2population_rpc_base \ + neutron.tests.unit.plugins.ml2.extensions.fake_extension \ + neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.test_l2population_rpc \ + neutron.tests.unit.plugins.ml2.drivers.l2pop.test_mech_driver \ + neutron.tests.unit.plugins.cisco.n1kv.test_n1kv_db \ + neutron.tests.unit.plugins.cisco.n1kv.fake_client \ + neutron.tests.unit.plugins.cisco.test_network_db \ + neutron.tests.unit.quota.test_resource \ + neutron.tests.unit.quota.test_resource_registry \ + neutron.tests.unit.scheduler.test_l3_agent_scheduler \ + neutron.tests.unit.scheduler.test_dhcp_agent_scheduler \ + neutron.tests.unit.db.test_agentschedulers_db \ + neutron.tests.unit.db.test_allowedaddresspairs_db \ + neutron.tests.unit.db.test_ipam_backend_mixin \ + neutron.tests.unit.db.test_l3_dvr_db \ + neutron.tests.unit.db.test_l3_hamode_db \ + neutron.tests.unit.db.test_migration \ + neutron.tests.unit.db.test_agents_db \ + neutron.tests.unit.db.quota.test_api \ + neutron.tests.unit.db.quota.test_driver \ + neutron.tests.unit.db.test_dvr_mac_db \ + neutron.tests.unit.db.test_securitygroups_db \ + neutron.tests.unit.debug.test_commands \ + neutron.tests.unit.tests.test_post_mortem_debug \ + neutron.tests.unit.tests.test_base \ + neutron.tests.unit.database_stubs \ + neutron.tests.unit.dummy_plugin \ + neutron.tests.unit.extension_stubs \ + neutron.tests.unit.testlib_api \ + neutron.tests.unit.api.test_api_common \ + neutron.tests.unit.api.rpc.handlers.test_dhcp_rpc \ + neutron.tests.unit.api.rpc.handlers.test_securitygroups_rpc \ + neutron.tests.unit.api.rpc.handlers.test_dvr_rpc \ + neutron.tests.unit.api.rpc.agentnotifiers.test_dhcp_rpc_agent_api \ + neutron.tests.unit.api.v2.test_attributes \ + neutron.tests.unit.agent.metadata.test_agent \ + neutron.tests.unit.agent.metadata.test_driver \ + neutron.tests.unit.agent.metadata.test_namespace_proxy \ + neutron.tests.unit.agent.test_rpc \ + neutron.tests.unit.agent.test_securitygroups_rpc \ + neutron.tests.unit.agent.l3.test_link_local_allocator \ + neutron.tests.unit.agent.l3.test_dvr_local_router \ + neutron.tests.unit.agent.l3.test_ha_router \ + neutron.tests.unit.agent.l3.test_legacy_router \ + neutron.tests.unit.agent.l3.test_router_info \ + neutron.tests.unit.agent.l3.test_router_processing_queue \ + neutron.tests.unit.agent.l3.test_namespace_manager \ + neutron.tests.unit.agent.l3.test_dvr_fip_ns \ + neutron.tests.unit.agent.ovsdb.native.test_helpers \ + neutron.tests.unit.agent.common.test_config \ + neutron.tests.unit.agent.common.test_ovs_lib \ + neutron.tests.unit.agent.common.test_polling \ + neutron.tests.unit.agent.common.test_utils \ + neutron.tests.unit.agent.linux.test_ip_lib \ + neutron.tests.unit.agent.linux.test_keepalived \ + neutron.tests.unit.agent.linux.test_daemon \ + neutron.tests.unit.agent.linux.test_ipset_manager \ + neutron.tests.unit.agent.linux.test_iptables_firewall \ + neutron.tests.unit.agent.linux.test_ebtables_manager \ + neutron.tests.unit.agent.linux.test_iptables_firewall \ + neutron.tests.unit.agent.linux.test_ebtables_driver \ + neutron.tests.unit.agent.linux.test_polling \ + neutron.tests.unit.agent.linux.test_ip_lib \ + neutron.tests.unit.agent.linux.test_ip_monitor \ + neutron.tests.unit.agent.linux.test_iptables_manager \ + neutron.tests.unit.agent.linux.test_external_process \ + neutron.tests.unit.agent.linux.test_dhcp \ + neutron.tests.unit.agent.linux.test_async_process \ + neutron.tests.unit.agent.linux.test_ovsdb_monitor \ + neutron.tests.unit.agent.linux.test_bridge_lib \ + neutron.tests.unit.agent.linux.test_ip_link_support \ + neutron.tests.unit.agent.linux.test_interface \ + neutron.tests.unit.agent.linux.test_utils \ + neutron.tests.unit.agent.dhcp.test_agent \ + neutron.tests.unit.test_manager \ + neutron.tests.unit.test_service \ + neutron.tests.unit.test_auth \ + neutron.tests.unit.test_policy \ + neutron.tests.unit.extensions.v2attributes \ + neutron.tests.unit.extensions.test_address_scope \ + neutron.tests.unit.extensions.test_agent \ + neutron.tests.unit.extensions.test_external_net \ + neutron.tests.unit.extensions.test_flavors \ + neutron.tests.unit.extensions.test_l3_ext_gw_mode \ + neutron.tests.unit.extensions.test_extra_dhcp_opt \ + neutron.tests.unit.extensions.test_netmtu \ + neutron.tests.unit.extensions.test_vlantransparent \ + neutron.tests.unit.extensions.extendedattribute \ + neutron.tests.unit.extensions.base \ + neutron.tests.unit.extensions.foxinsocks \ + neutron.tests.unit.extensions.extensionattribute \ + neutron.tests.unit.extensions.test_servicetype \ + neutron.tests.unit.extensions.test_portsecurity \ + neutron.tests.unit.extensions.test_providernet \ + neutron.tests.unit.callbacks.test_manager \ + neutron.tests.unit.hacking.test_checks \ + neutron.tests.unit.common.test_utils \ + neutron.tests.unit.common.test_config \ + neutron.tests.unit.common.test_rpc \ + neutron.tests.unit.common.test_ipv6_utils \ + neutron.tests.unit.cmd.test_ovs_cleanup \ + neutron.tests.unit.cmd.test_netns_cleanup \ + neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_db_api \ + neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_driver \ + neutron.tests.unit.ipam.test_subnet_alloc \ + neutron.tests.unit.ipam.test_utils \ + neutron.tests.unit.ipam.test_requests \ + neutron.tests.unit.notifiers.test_nova \ + neutron.tests.unit.notifiers.test_batch_notifier \ + neutron.tests.unit.api.test_extensions \ + neutron.tests.unit.db.test_db_base_plugin_common}' [flake8] # E125 continuation line does not distinguish itself from next logical line