Prepare initial sandbox for neutron-dynamic-routing

This patch-set prepares the basic code structure with all
the tools required for running static and unit tests.

Note: All the imported code from the seed repo is being removed
      temporarily and will be added after the required re-factoring
      needed as per the new repo in the subsequent patch-sets.

Co-Authored-By: Ryan Tidwell <ryan.tidwell@hpe.com>
Implements: blueprint bgp-spinout
Partial-Bug: #1560003

Change-Id: I9bff3d916279c4f335b309e7a2c2e943ac6f6cde
This commit is contained in:
vikram.choudhary 2016-04-22 20:26:18 +05:30
parent 8ff4499030
commit 4ba80f3f1c
98 changed files with 2125 additions and 8984 deletions

7
.coveragerc Normal file
View File

@ -0,0 +1,7 @@
[run]
branch = True
source = neutron_dynamic_routing
# omit = neutron_dynamic_routing/tests/*
[report]
ignore_errors = True

32
.gitignore vendored Normal file
View File

@ -0,0 +1,32 @@
AUTHORS
build/*
build-stamp
ChangeLog
cover/
covhtml/
dist/
doc/build
*.DS_Store
*.pyc
neutron.egg-info/
neutron_dynamic_routing.egg-info/
neutron/vcsversion.py
neutron/versioninfo
pbr*.egg/
run_tests.err.log
run_tests.log
setuptools*.egg/
subunit.log
*.mo
*.sw?
*~
/.*
!/.coveragerc
!/.gitignore
!/.gitreview
!/.mailmap
!/.pylintrc
!/.testr.conf
# Files created by releasenotes build
releasenotes/build

View File

@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/neutron-dynamic-routing.git
project=openstack/neutron-dynamic-routing.git

3
.mailmap Normal file
View File

@ -0,0 +1,3 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

133
.pylintrc Normal file
View File

@ -0,0 +1,133 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
#
# Note the 'openstack' below is intended to match only
# neutron.openstack.common. If we ever have another 'openstack'
# dirname, then we'll need to expand the ignore features in pylint :/
ignore=.git,tests,openstack
[MESSAGES CONTROL]
# NOTE(gus): This is a long list. A number of these are important and
# should be re-enabled once the offending code is fixed (or marked
# with a local disable)
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
locally-disabled,
# "E" Error for important programming issues (likely bugs)
access-member-before-definition,
bad-super-call,
maybe-no-member,
no-member,
no-method-argument,
no-self-argument,
not-callable,
no-value-for-parameter,
super-on-old-class,
too-few-format-args,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
anomalous-backslash-in-string,
anomalous-unicode-escape-in-string,
arguments-differ,
attribute-defined-outside-init,
bad-builtin,
bad-indentation,
broad-except,
dangerous-default-value,
deprecated-lambda,
duplicate-key,
expression-not-assigned,
fixme,
global-statement,
global-variable-not-assigned,
logging-not-lazy,
no-init,
non-parent-init-called,
pointless-string-statement,
protected-access,
redefined-builtin,
redefined-outer-name,
redefine-in-handler,
signature-differs,
star-args,
super-init-not-called,
unnecessary-lambda,
unnecessary-pass,
unpacking-non-sequence,
unreachable,
unused-argument,
unused-import,
unused-variable,
# TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims
nonstandard-exception,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
old-style-class,
superfluous-parens,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_
[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use openstack.common.jsonutils
json
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems
[REPORTS]
# Tells whether to display a full report or only the messages
reports=no

8
.testr.conf Normal file
View File

@ -0,0 +1,8 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
OS_LOG_CAPTURE=1 \
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

4
CONTRIBUTING.rst Normal file
View File

@ -0,0 +1,4 @@
Please see the Neutron CONTRIBUTING.rst file for how to contribute to
neutron-dynamic-routing:
`Neutron CONTRIBUTING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/CONTRIBUTING.rst>`_

7
HACKING.rst Normal file
View File

@ -0,0 +1,7 @@
Neutron Dynamic Routing Style Commandments
==========================================
Please see the Neutron HACKING.rst file for style commandments for
neutron-dynamic-routing:
`Neutron HACKING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/HACKING.rst>`_

176
LICENSE Normal file
View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

9
MANIFEST.in Normal file
View File

@ -0,0 +1,9 @@
include AUTHORS
include README.rst
include ChangeLog
include LICENSE
exclude .gitignore
exclude .gitreview
global-exclude *.pyc

18
README.rst Normal file
View File

@ -0,0 +1,18 @@
Welcome!
========
This package contains the code for the Neutron dynamic routing. This package
requires Neutron to run.
External Resources:
===================
The homepage for Neutron is: http://launchpad.net/neutron. Use this
site for asking for help, and filing bugs. We use a single Launchpad
page for all Neutron projects.
Code is available on git.openstack.org at:
<http://git.openstack.org/cgit/openstack/neutron-dynamic-routing>.
Please refer to Neutron documentation for more information:
`Neutron README.rst <http://git.openstack.org/cgit/openstack/neutron/tree/README.rst>`_

8
TESTING.rst Normal file
View File

@ -0,0 +1,8 @@
Testing Neutron Dynamic Routing
===============================
Please see the TESTING.rst file for the Neutron project itself. This will have
the latest up to date instructions for how to test Neutron, and will
be applicable to neutron-dynamic-routing as well:
`Neutron TESTING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/TESTING.rst>`_

2
babel.cfg Normal file
View File

@ -0,0 +1,2 @@
[python: **.py]

View File

@ -1,29 +0,0 @@
function configure_bgp_service_plugin {
_neutron_service_plugin_class_add "bgp"
}
function configure_bgp {
configure_bgp_service_plugin
}
function configure_bgp_dragent {
cp $NEUTRON_DIR/etc/bgp_dragent.ini.sample $Q_BGP_DRAGENT_CONF_FILE
iniset $Q_BGP_DRAGENT_CONF_FILE DEFAULT verbose True
iniset $Q_BGP_DRAGENT_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
if [ -n "$BGP_ROUTER_ID" ]; then
iniset $Q_BGP_DRAGENT_CONF_FILE BGP bgp_router_id $BGP_ROUTER_ID
fi
if [ -z "$BGP_SPEAKER_DRIVER" ]; then
BGP_SPEAKER_DRIVER=$RYU_BGP_SPEAKER_DRIVER
fi
iniset $Q_BGP_DRAGENT_CONF_FILE BGP bgp_speaker_driver $BGP_SPEAKER_DRIVER
}
function start_bgp_dragent {
run_process q-bgp-agt "$AGENT_BGP_BINARY --config-file $NEUTRON_CONF --config-file /$Q_BGP_DRAGENT_CONF_FILE"
}
function stop_bgp_dragent {
stop_process q-bgp-agt
}

View File

@ -1,57 +0,0 @@
LIBDIR=$DEST/neutron/devstack/lib
source $LIBDIR/bgp
source $LIBDIR/flavors
source $LIBDIR/l2_agent
source $LIBDIR/l2_agent_sriovnicswitch
source $LIBDIR/ml2
source $LIBDIR/qos
if [[ "$1" == "stack" ]]; then
case "$2" in
install)
if is_service_enabled q-flavors; then
configure_flavors
fi
if is_service_enabled q-qos; then
configure_qos
fi
if is_service_enabled q-bgp; then
configure_bgp
fi
;;
post-config)
if is_service_enabled q-agt; then
configure_l2_agent
fi
if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then
configure_bgp_dragent
fi
#Note: sriov agent should run with OVS or linux bridge agent
#because they are the mechanisms that bind the DHCP and router ports.
#Currently devstack lacks the option to run two agents on the same node.
#Therefore we create new service, q-sriov-agt, and the q-agt should be OVS
#or linux bridge.
if is_service_enabled q-sriov-agt; then
configure_$Q_PLUGIN
configure_l2_agent
configure_l2_agent_sriovnicswitch
fi
;;
extra)
if is_service_enabled q-sriov-agt; then
start_l2_agent_sriov
fi
if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then
start_bgp_dragent
fi
;;
esac
elif [[ "$1" == "unstack" ]]; then
if is_service_enabled q-sriov-agt; then
stop_l2_agent_sriov
fi
if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then
stop_bgp_dragent
fi
fi

View File

@ -1,8 +0,0 @@
L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-}
#BGP binary and config information
AGENT_BGP_BINARY=${AGENT_BGP_BINARY:-"$NEUTRON_BIN_DIR/neutron-bgp-dragent"}
Q_BGP_DRAGENT_CONF_FILE=${Q_BGP_DRAGENT_CONF_FILE:-"$NEUTRON_CONF_DIR/bgp_dragent.ini"}
BGP_ROUTER_ID=${BGP_ROUTER_ID:-}
RYU_BGP_SPEAKER_DRIVER="neutron.services.bgp.driver.ryu.driver.RyuBgpDriver"

75
doc/source/conf.py Executable file
View File

@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'neutron-dynamic-routing'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -0,0 +1,4 @@
============
Contributing
============
.. include:: ../../CONTRIBUTING.rst

43
doc/source/index.rst Normal file
View File

@ -0,0 +1,43 @@
..
Copyright 2016 Huawei India Pvt Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Convention for heading levels in Neutron devref:
======= Heading 0 (reserved for the title in a document)
------- Heading 1
~~~~~~~ Heading 2
+++++++ Heading 3
''''''' Heading 4
(Avoid deeper levels because they do not render well.)
Welcome to neutron-dynamic-routing's documentation!
===================================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -0,0 +1,12 @@
============
Installation
============
At the command line::
$ pip install neutron-dynamic-routing
Or, if you have virtualenvwrapper installed::
$ mkvirtualenv neutron-dynamic-routing
$ pip install neutron-dynamic-routing

1
doc/source/readme.rst Normal file
View File

@ -0,0 +1 @@
.. include:: ../../README.rst

7
doc/source/usage.rst Normal file
View File

@ -0,0 +1,7 @@
========
Usage
========
To use neutron-dynamic-routing in a project::
import neutron_dynamic_routing

View File

@ -1,7 +0,0 @@
[DEFAULT]
output_file = etc/bgp_dragent.ini.sample
wrap_width = 79
namespace = neutron.base.agent
namespace = neutron.bgp.agent
namespace = oslo.log

View File

@ -1,238 +0,0 @@
{
"context_is_admin": "role:admin",
"owner": "tenant_id:%(tenant_id)s",
"admin_or_owner": "rule:context_is_admin or rule:owner",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"shared_firewall_policies": "field:firewall_policies:shared=True",
"shared_subnetpools": "field:subnetpools:shared=True",
"shared_address_scopes": "field:address_scopes:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_subnetpool": "",
"create_subnetpool:shared": "rule:admin_only",
"create_subnetpool:is_default": "rule:admin_only",
"get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
"update_subnetpool": "rule:admin_or_owner",
"update_subnetpool:is_default": "rule:admin_only",
"delete_subnetpool": "rule:admin_or_owner",
"create_address_scope": "",
"create_address_scope:shared": "rule:admin_only",
"get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
"update_address_scope": "rule:admin_or_owner",
"update_address_scope:shared": "rule:admin_only",
"delete_address_scope": "rule:admin_or_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"get_network_ip_availabilities": "rule:admin_only",
"get_network_ip_availability": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:is_default": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
"get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
"delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"insert_rule": "rule:admin_or_owner",
"remove_rule": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"get_agent-loadbalancers": "rule:admin_only",
"get_loadbalancer-hosting-agent": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only",
"create_flavor": "rule:admin_only",
"update_flavor": "rule:admin_only",
"delete_flavor": "rule:admin_only",
"get_flavors": "rule:regular_user",
"get_flavor": "rule:regular_user",
"create_service_profile": "rule:admin_only",
"update_service_profile": "rule:admin_only",
"delete_service_profile": "rule:admin_only",
"get_service_profiles": "rule:admin_only",
"get_service_profile": "rule:admin_only",
"get_policy": "rule:regular_user",
"create_policy": "rule:admin_only",
"update_policy": "rule:admin_only",
"delete_policy": "rule:admin_only",
"get_policy_bandwidth_limit_rule": "rule:regular_user",
"create_policy_bandwidth_limit_rule": "rule:admin_only",
"delete_policy_bandwidth_limit_rule": "rule:admin_only",
"update_policy_bandwidth_limit_rule": "rule:admin_only",
"get_policy_dscp_marking_rule": "rule:regular_user",
"create_policy_dscp_marking_rule": "rule:admin_only",
"delete_policy_dscp_marking_rule": "rule:admin_only",
"update_policy_dscp_marking_rule": "rule:admin_only",
"get_rule_type": "rule:regular_user",
"restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
"create_rbac_policy": "",
"create_rbac_policy:target_tenant": "rule:restrict_wildcard",
"update_rbac_policy": "rule:admin_or_owner",
"update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
"get_rbac_policy": "rule:admin_or_owner",
"delete_rbac_policy": "rule:admin_or_owner",
"create_flavor_service_profile": "rule:admin_only",
"delete_flavor_service_profile": "rule:admin_only",
"get_flavor_service_profile": "rule:regular_user",
"get_auto_allocated_topology": "rule:admin_or_owner",
"get_bgp_speaker": "rule:admin_only",
"create_bgp_speaker": "rule:admin_only",
"update_bgp_speaker": "rule:admin_only",
"delete_bgp_speaker": "rule:admin_only",
"get_bgp_peer": "rule:admin_only",
"create_bgp_peer": "rule:admin_only",
"update_bgp_peer": "rule:admin_only",
"delete_bgp_peer": "rule:admin_only",
"add_bgp_peer": "rule:admin_only",
"remove_bgp_peer": "rule:admin_only",
"add_gateway_network": "rule:admin_only",
"remove_gateway_network": "rule:admin_only",
"get_advertised_routes":"rule:admin_only",
"add_bgp_speaker_to_dragent": "rule:admin_only",
"remove_bgp_speaker_from_dragent": "rule:admin_only",
"list_bgp_speaker_on_dragent": "rule:admin_only",
"list_dragent_hosting_bgp_speaker": "rule:admin_only"
}

View File

@ -1,105 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
from neutron.common import rpc as n_rpc
from neutron.services.bgp.common import constants as bgp_consts
class BgpDrAgentNotifyApi(object):
"""API for plugin to notify BGP DrAgent.
This class implements the client side of an rpc interface. The server side
is neutron.services.bgp_speaker.agent.bgp_dragent.BgpDrAgent. For more
information about rpc interfaces, please see doc/source/devref/rpc_api.rst.
"""
def __init__(self, topic=bgp_consts.BGP_DRAGENT):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
self.topic = topic
def bgp_routes_advertisement(self, context, bgp_speaker_id,
routes, host):
"""Tell BgpDrAgent to begin advertising the given route.
Invoked on FIP association, adding router port to a tenant network,
and new DVR port-host bindings, and subnet creation(?).
"""
self._notification_host_cast(context, 'bgp_routes_advertisement_end',
{'advertise_routes': {'speaker_id': bgp_speaker_id,
'routes': routes}}, host)
def bgp_routes_withdrawal(self, context, bgp_speaker_id,
routes, host):
"""Tell BgpDrAgent to stop advertising the given route.
Invoked on FIP disassociation, removal of a router port on a
network, and removal of DVR port-host binding, and subnet delete(?).
"""
self._notification_host_cast(context, 'bgp_routes_withdrawal_end',
{'withdraw_routes': {'speaker_id': bgp_speaker_id,
'routes': routes}}, host)
def bgp_peer_disassociated(self, context, bgp_speaker_id,
bgp_peer_ip, host):
"""Tell BgpDrAgent about a new BGP Peer association.
This effectively tells the BgpDrAgent to stop a peering session.
"""
self._notification_host_cast(context, 'bgp_peer_disassociation_end',
{'bgp_peer': {'speaker_id': bgp_speaker_id,
'peer_ip': bgp_peer_ip}}, host)
def bgp_peer_associated(self, context, bgp_speaker_id,
bgp_peer_id, host):
"""Tell BgpDrAgent about a BGP Peer disassociation.
This effectively tells the bgp_dragent to open a peering session.
"""
self._notification_host_cast(context, 'bgp_peer_association_end',
{'bgp_peer': {'speaker_id': bgp_speaker_id,
'peer_id': bgp_peer_id}}, host)
def bgp_speaker_created(self, context, bgp_speaker_id, host):
"""Tell BgpDrAgent about the creation of a BGP Speaker.
Because a BGP Speaker can be created with BgpPeer binding in place,
we need to inform the BgpDrAgent of a new BGP Speaker in case a
peering session needs to opened immediately.
"""
self._notification_host_cast(context, 'bgp_speaker_create_end',
{'bgp_speaker': {'id': bgp_speaker_id}}, host)
def bgp_speaker_removed(self, context, bgp_speaker_id, host):
"""Tell BgpDrAgent about the removal of a BGP Speaker.
Because a BGP Speaker can be removed with BGP Peer binding in
place, we need to inform the BgpDrAgent of the removal of a
BGP Speaker in case peering sessions need to be stopped.
"""
self._notification_host_cast(context, 'bgp_speaker_remove_end',
{'bgp_speaker': {'id': bgp_speaker_id}}, host)
def _notification_host_cast(self, context, method, payload, host):
"""Send payload to BgpDrAgent in the cast mode"""
cctxt = self.client.prepare(topic=self.topic, server=host)
cctxt.cast(context, method, payload=payload)
def _notification_host_call(self, context, method, payload, host):
"""Send payload to BgpDrAgent in the call mode"""
cctxt = self.client.prepare(topic=self.topic, server=host)
cctxt.call(context, method, payload=payload)

View File

@ -1,65 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
from neutron.extensions import bgp as bgp_ext
from neutron import manager
class BgpSpeakerRpcCallback(object):
"""BgpDrAgent RPC callback in plugin implementations.
This class implements the server side of an RPC interface.
The client side of this interface can be found in
neutron.services.bgp_speaker.agent.bgp_dragent.BgpDrPluginApi.
For more information about changing RPC interfaces,
see doc/source/devref/rpc_api.rst.
"""
# API version history:
# 1.0 BGPDRPluginApi BASE_RPC_API_VERSION
target = oslo_messaging.Target(version='1.0')
@property
def plugin(self):
if not hasattr(self, '_plugin'):
self._plugin = manager.NeutronManager.get_service_plugins().get(
bgp_ext.BGP_EXT_ALIAS)
return self._plugin
def get_bgp_speaker_info(self, context, bgp_speaker_id):
"""Return BGP Speaker details such as peer list and local_as.
Invoked by the BgpDrAgent to lookup the details of a BGP Speaker.
"""
return self.plugin.get_bgp_speaker_with_advertised_routes(
context, bgp_speaker_id)
def get_bgp_peer_info(self, context, bgp_peer_id):
"""Return BgpPeer details such as IP, remote_as, and credentials.
Invoked by the BgpDrAgent to lookup the details of a BGP peer.
"""
return self.plugin.get_bgp_peer(context, bgp_peer_id,
['peer_ip', 'remote_as',
'auth_type', 'password'])
def get_bgp_speakers(self, context, host=None, **kwargs):
"""Returns the list of all BgpSpeakers.
Typically invoked by the BgpDrAgent as part of its bootstrap process.
"""
return self.plugin.get_bgp_speakers_for_agent_host(context, host)

View File

@ -1,20 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.services.bgp.agent import entry as bgp_dragent
def main():
bgp_dragent.main()

File diff suppressed because it is too large Load Diff

View File

@ -1,215 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron._i18n import _LW
from neutron.db import agents_db
from neutron.db import agentschedulers_db as as_db
from neutron.db import model_base
from neutron.extensions import bgp_dragentscheduler as bgp_dras_ext
from neutron.services.bgp.common import constants as bgp_consts
LOG = logging.getLogger(__name__)
BGP_DRAGENT_SCHEDULER_OPTS = [
cfg.StrOpt(
'bgp_drscheduler_driver',
default='neutron.services.bgp.scheduler'
'.bgp_dragent_scheduler.ChanceScheduler',
help=_('Driver used for scheduling BGP speakers to BGP DrAgent'))
]
cfg.CONF.register_opts(BGP_DRAGENT_SCHEDULER_OPTS)
class BgpSpeakerDrAgentBinding(model_base.BASEV2):
"""Represents a mapping between BGP speaker and BGP DRAgent"""
__tablename__ = 'bgp_speaker_dragent_bindings'
bgp_speaker_id = sa.Column(sa.String(length=36),
sa.ForeignKey("bgp_speakers.id",
ondelete='CASCADE'),
nullable=False)
dragent = orm.relation(agents_db.Agent)
agent_id = sa.Column(sa.String(length=36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
class BgpDrAgentSchedulerDbMixin(bgp_dras_ext.BgpDrSchedulerPluginBase,
as_db.AgentSchedulerDbMixin):
bgp_drscheduler = None
def schedule_unscheduled_bgp_speakers(self, context, host):
if self.bgp_drscheduler:
return self.bgp_drscheduler.schedule_unscheduled_bgp_speakers(
context, host)
else:
LOG.warning(_LW("Cannot schedule BgpSpeaker to DrAgent. "
"Reason: No scheduler registered."))
def schedule_bgp_speaker(self, context, created_bgp_speaker):
if self.bgp_drscheduler:
agents = self.bgp_drscheduler.schedule(context,
created_bgp_speaker)
for agent in agents:
self._bgp_rpc.bgp_speaker_created(context,
created_bgp_speaker['id'],
agent.host)
else:
LOG.warning(_LW("Cannot schedule BgpSpeaker to DrAgent. "
"Reason: No scheduler registered."))
def add_bgp_speaker_to_dragent(self, context, agent_id, speaker_id):
"""Associate a BgpDrAgent with a BgpSpeaker."""
try:
self._save_bgp_speaker_dragent_binding(context,
agent_id,
speaker_id)
except db_exc.DBDuplicateEntry:
raise bgp_dras_ext.DrAgentAssociationError(
agent_id=agent_id)
LOG.debug('BgpSpeaker %(bgp_speaker_id)s added to '
'BgpDrAgent %(agent_id)s',
{'bgp_speaker_id': speaker_id, 'agent_id': agent_id})
def _save_bgp_speaker_dragent_binding(self, context,
agent_id, speaker_id):
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, agent_id)
agent_up = agent_db['admin_state_up']
is_agent_bgp = (agent_db['agent_type'] ==
bgp_consts.AGENT_TYPE_BGP_ROUTING)
if not is_agent_bgp or not agent_up:
raise bgp_dras_ext.DrAgentInvalid(id=agent_id)
binding = BgpSpeakerDrAgentBinding()
binding.bgp_speaker_id = speaker_id
binding.agent_id = agent_id
context.session.add(binding)
self._bgp_rpc.bgp_speaker_created(context, speaker_id, agent_db.host)
def remove_bgp_speaker_from_dragent(self, context, agent_id, speaker_id):
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, agent_id)
is_agent_bgp = (agent_db['agent_type'] ==
bgp_consts.AGENT_TYPE_BGP_ROUTING)
if not is_agent_bgp:
raise bgp_dras_ext.DrAgentInvalid(id=agent_id)
query = context.session.query(BgpSpeakerDrAgentBinding)
query = query.filter_by(bgp_speaker_id=speaker_id,
agent_id=agent_id)
num_deleted = query.delete()
if not num_deleted:
raise bgp_dras_ext.DrAgentNotHostingBgpSpeaker(
bgp_speaker_id=speaker_id,
agent_id=agent_id)
LOG.debug('BgpSpeaker %(bgp_speaker_id)s removed from '
'BgpDrAgent %(agent_id)s',
{'bgp_speaker_id': speaker_id,
'agent_id': agent_id})
self._bgp_rpc.bgp_speaker_removed(context, speaker_id, agent_db.host)
def get_dragents_hosting_bgp_speakers(self, context, bgp_speaker_ids,
active=None, admin_state_up=None):
query = context.session.query(BgpSpeakerDrAgentBinding)
query = query.options(orm.contains_eager(
BgpSpeakerDrAgentBinding.dragent))
query = query.join(BgpSpeakerDrAgentBinding.dragent)
if len(bgp_speaker_ids) == 1:
query = query.filter(
BgpSpeakerDrAgentBinding.bgp_speaker_id == (
bgp_speaker_ids[0]))
elif bgp_speaker_ids:
query = query.filter(
BgpSpeakerDrAgentBinding.bgp_speaker_id in bgp_speaker_ids)
if admin_state_up is not None:
query = query.filter(agents_db.Agent.admin_state_up ==
admin_state_up)
return [binding.dragent
for binding in query
if as_db.AgentSchedulerDbMixin.is_eligible_agent(
active, binding.dragent)]
def get_dragent_bgp_speaker_bindings(self, context):
return context.session.query(BgpSpeakerDrAgentBinding).all()
def list_dragent_hosting_bgp_speaker(self, context, speaker_id):
dragents = self.get_dragents_hosting_bgp_speakers(context,
[speaker_id])
agent_ids = [dragent.id for dragent in dragents]
if not agent_ids:
return {'agents': []}
return {'agents': self.get_agents(context, filters={'id': agent_ids})}
def list_bgp_speaker_on_dragent(self, context, agent_id):
query = context.session.query(BgpSpeakerDrAgentBinding.bgp_speaker_id)
query = query.filter_by(agent_id=agent_id)
bgp_speaker_ids = [item[0] for item in query]
if not bgp_speaker_ids:
# Exception will be thrown if the requested agent does not exist.
self._get_agent(context, agent_id)
return {'bgp_speakers': []}
return {'bgp_speakers':
self.get_bgp_speakers(context,
filters={'id': bgp_speaker_ids})}
def get_bgp_speakers_for_agent_host(self, context, host):
agent = self._get_agent_by_type_and_host(
context, bgp_consts.AGENT_TYPE_BGP_ROUTING, host)
if not agent.admin_state_up:
return {}
query = context.session.query(BgpSpeakerDrAgentBinding)
query = query.filter(BgpSpeakerDrAgentBinding.agent_id == agent.id)
try:
binding = query.one()
except exc.NoResultFound:
return []
bgp_speaker = self.get_bgp_speaker_with_advertised_routes(
context, binding['bgp_speaker_id'])
return [bgp_speaker]
def get_bgp_speaker_by_speaker_id(self, context, bgp_speaker_id):
try:
return self.get_bgp_speaker(context, bgp_speaker_id)
except exc.NoResultFound:
return {}
def get_bgp_peer_by_peer_id(self, context, bgp_peer_id):
try:
return self.get_bgp_peer(context, bgp_peer_id)
except exc.NoResultFound:
return {}

View File

@ -1,105 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add dynamic routing model data
Revision ID: 15be73214821
Create Date: 2015-07-29 13:16:08.604175
"""
# revision identifiers, used by Alembic.
revision = '15be73214821'
down_revision = '19f26505c74f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'bgp_speakers',
sa.Column('id', sa.String(length=36),
nullable=False),
sa.Column('name', sa.String(length=255),
nullable=False),
sa.Column('local_as', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('ip_version', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('tenant_id',
sa.String(length=255),
nullable=True,
index=True),
sa.Column('advertise_floating_ip_host_routes', sa.Boolean(),
nullable=False),
sa.Column('advertise_tenant_networks', sa.Boolean(),
nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'bgp_peers',
sa.Column('id', sa.String(length=36),
nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('auth_type', sa.String(length=16), nullable=False),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('peer_ip',
sa.String(length=64),
nullable=False),
sa.Column('remote_as', sa.Integer, nullable=False,
autoincrement=False),
sa.Column('tenant_id',
sa.String(length=255),
nullable=True,
index=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'bgp_speaker_network_bindings',
sa.Column('bgp_speaker_id',
sa.String(length=36),
nullable=False),
sa.Column('network_id',
sa.String(length=36),
nullable=True),
sa.Column('ip_version', sa.Integer, nullable=False,
autoincrement=False),
sa.ForeignKeyConstraint(['bgp_speaker_id'],
['bgp_speakers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['network_id'],
['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'bgp_speaker_id', 'ip_version')
)
op.create_table(
'bgp_speaker_peer_bindings',
sa.Column('bgp_speaker_id',
sa.String(length=36),
nullable=False),
sa.Column('bgp_peer_id',
sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['bgp_peer_id'], ['bgp_peers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('bgp_speaker_id', 'bgp_peer_id')
)

View File

@ -1,46 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_bgp_dragent_model_data
Revision ID: b4caf27aae4
Revises: 15be7321482
Create Date: 2015-08-20 17:05:31.038704
"""
# revision identifiers, used by Alembic.
revision = 'b4caf27aae4'
down_revision = '15be73214821'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'bgp_speaker_dragent_bindings',
sa.Column('agent_id',
sa.String(length=36),
primary_key=True),
sa.Column('bgp_speaker_id',
sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'],
ondelete='CASCADE'),
)

View File

@ -1,65 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db import address_scope_db # noqa
from neutron.db import agents_db # noqa
from neutron.db import agentschedulers_db # noqa
from neutron.db import allowedaddresspairs_db # noqa
from neutron.db import bgp_db # noqa
from neutron.db import bgp_dragentscheduler_db # noqa
from neutron.db import dns_db # noqa
from neutron.db import dvr_mac_db # noqa
from neutron.db import external_net_db # noqa
from neutron.db import extradhcpopt_db # noqa
from neutron.db import extraroute_db # noqa
from neutron.db import flavors_db # noqa
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db # noqa
from neutron.db import l3_db # noqa
from neutron.db import l3_dvrscheduler_db # noqa
from neutron.db import l3_gwmode_db # noqa
from neutron.db import l3_hamode_db # noqa
from neutron.db.metering import metering_db # noqa
from neutron.db import model_base
from neutron.db import models_v2 # noqa
from neutron.db import portbindings_db # noqa
from neutron.db import portsecurity_db # noqa
from neutron.db.qos import models as qos_models # noqa
from neutron.db.quota import models # noqa
from neutron.db import rbac_db_models # noqa
from neutron.db import securitygroups_db # noqa
from neutron.db import segments_db # noqa
from neutron.db import servicetype_db # noqa
from neutron.db import tag_db # noqa
from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_geneve # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.services.auto_allocate import models # noqa
def get_metadata():
return model_base.BASEV2.metadata

View File

@ -1,207 +0,0 @@
# Copyright 2016 Hewlett Packard Development Coompany LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper as rh
from neutron.common import exceptions
from neutron.services.bgp.common import constants as bgp_consts
BGP_EXT_ALIAS = 'bgp'
BGP_SPEAKER_RESOURCE_NAME = 'bgp-speaker'
BGP_SPEAKER_BODY_KEY_NAME = 'bgp_speaker'
BGP_PEER_BODY_KEY_NAME = 'bgp_peer'
RESOURCE_ATTRIBUTE_MAP = {
BGP_SPEAKER_RESOURCE_NAME + 's': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'local_as': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': (bgp_consts.MIN_ASNUM,
bgp_consts.MAX_ASNUM)},
'is_visible': True, 'default': None,
'required_by_policy': False,
'enforce_policy': False},
'ip_version': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': [4, 6]},
'is_visible': True, 'default': None,
'required_by_policy': False,
'enforce_policy': False},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': False,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'peers': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_list': None},
'is_visible': True, 'default': [],
'required_by_policy': False,
'enforce_policy': True},
'networks': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_list': None},
'is_visible': True, 'default': [],
'required_by_policy': False,
'enforce_policy': True},
'advertise_floating_ip_host_routes': {
'allow_post': True,
'allow_put': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None},
'is_visible': True, 'default': True,
'required_by_policy': False,
'enforce_policy': True},
'advertise_tenant_networks': {
'allow_post': True,
'allow_put': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None},
'is_visible': True, 'default': True,
'required_by_policy': False,
'enforce_policy': True},
},
'bgp-peers': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'peer_ip': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:ip_address': None},
'is_visible': True},
'remote_as': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': (bgp_consts.MIN_ASNUM,
bgp_consts.MAX_ASNUM)},
'is_visible': True, 'default': None,
'required_by_policy': False,
'enforce_policy': False},
'auth_type': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:values':
bgp_consts.SUPPORTED_AUTH_TYPES},
'is_visible': True},
'password': {'allow_post': True, 'allow_put': True,
'required_by_policy': True,
'validate': {'type:string_or_none': None},
'is_visible': False,
'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': False,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True}
}
}
# Dynamic Routing Exceptions
class BgpSpeakerNotFound(exceptions.NotFound):
message = _("BGP speaker %(id)s could not be found.")
class BgpPeerNotFound(exceptions.NotFound):
message = _("BGP peer %(id)s could not be found.")
class BgpPeerNotAuthenticated(exceptions.NotFound):
message = _("BGP peer %(bgp_peer_id)s not authenticated.")
class BgpSpeakerPeerNotAssociated(exceptions.NotFound):
message = _("BGP peer %(bgp_peer_id)s is not associated with "
"BGP speaker %(bgp_speaker_id)s.")
class BgpSpeakerNetworkNotAssociated(exceptions.NotFound):
message = _("Network %(network_id)s is not associated with "
"BGP speaker %(bgp_speaker_id)s.")
class BgpSpeakerNetworkBindingError(exceptions.Conflict):
message = _("Network %(network_id)s is already bound to BgpSpeaker "
"%(bgp_speaker_id)s.")
class NetworkNotBound(exceptions.NotFound):
message = _("Network %(network_id)s is not bound to a BgpSpeaker.")
class DuplicateBgpPeerIpException(exceptions.Conflict):
_message = _("BGP Speaker %(bgp_speaker_id)s is already configured to "
"peer with a BGP Peer at %(peer_ip)s, it cannot peer with "
"BGP Peer %(bgp_peer_id)s.")
class InvalidBgpPeerMd5Authentication(exceptions.BadRequest):
message = _("A password must be supplied when using auth_type md5.")
class NetworkNotBoundForIpVersion(NetworkNotBound):
message = _("Network %(network_id)s is not bound to a IPv%(ip_version)s "
"BgpSpeaker.")
class Bgp(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron BGP Dynamic Routing Extension"
@classmethod
def get_alias(cls):
return BGP_EXT_ALIAS
@classmethod
def get_description(cls):
return("Discover and advertise routes for Neutron prefixes "
"dynamically via BGP")
@classmethod
def get_updated(cls):
return "2014-07-01T15:37:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = rh.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
action_map = {BGP_SPEAKER_RESOURCE_NAME:
{'add_bgp_peer': 'PUT',
'remove_bgp_peer': 'PUT',
'add_gateway_network': 'PUT',
'remove_gateway_network': 'PUT',
'get_advertised_routes': 'GET'}}
exts = rh.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
BGP_EXT_ALIAS,
action_map=action_map)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
def update_attributes_map(self, attributes):
super(Bgp, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)

View File

@ -1,183 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
import webob
from oslo_log import log as logging
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import exceptions
from neutron.extensions import agent
from neutron.extensions import bgp as bgp_ext
from neutron._i18n import _, _LE
from neutron import manager
from neutron import wsgi
LOG = logging.getLogger(__name__)
BGP_DRAGENT_SCHEDULER_EXT_ALIAS = 'bgp_dragent_scheduler'
BGP_DRINSTANCE = 'bgp-drinstance'
BGP_DRINSTANCES = BGP_DRINSTANCE + 's'
BGP_DRAGENT = 'bgp-dragent'
BGP_DRAGENTS = BGP_DRAGENT + 's'
class DrAgentInvalid(agent.AgentNotFound):
message = _("BgpDrAgent %(id)s is invalid or has been disabled.")
class DrAgentNotHostingBgpSpeaker(exceptions.NotFound):
message = _("BGP speaker %(bgp_speaker_id)s is not hosted "
"by the BgpDrAgent %(agent_id)s.")
class DrAgentAssociationError(exceptions.Conflict):
message = _("BgpDrAgent %(agent_id)s is already associated "
"to a BGP speaker.")
class BgpDrSchedulerController(wsgi.Controller):
"""Schedule BgpSpeaker for a BgpDrAgent"""
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
bgp_ext.BGP_EXT_ALIAS)
if not plugin:
LOG.error(_LE('No plugin for BGP routing registered'))
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = self.get_plugin()
return plugin.list_bgp_speaker_on_dragent(
request.context, kwargs['agent_id'])
def create(self, request, body, **kwargs):
plugin = self.get_plugin()
return plugin.add_bgp_speaker_to_dragent(
request.context,
kwargs['agent_id'],
body['bgp_speaker_id'])
def delete(self, request, id, **kwargs):
plugin = self.get_plugin()
return plugin.remove_bgp_speaker_from_dragent(
request.context, kwargs['agent_id'], id)
class BgpDrAgentController(wsgi.Controller):
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
bgp_ext.BGP_EXT_ALIAS)
if not plugin:
LOG.error(_LE('No plugin for BGP routing registered'))
msg = _LE('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = manager.NeutronManager.get_service_plugins().get(
bgp_ext.BGP_EXT_ALIAS)
return plugin.list_dragent_hosting_bgp_speaker(
request.context, kwargs['bgp_speaker_id'])
class Bgp_dragentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting Dynamic Routing scheduler.
"""
@classmethod
def get_name(cls):
return "BGP Dynamic Routing Agent Scheduler"
@classmethod
def get_alias(cls):
return BGP_DRAGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_description(cls):
return "Schedules BgpSpeakers on BgpDrAgent"
@classmethod
def get_updated(cls):
return "2015-07-30T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(BgpDrSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(BGP_DRINSTANCES,
controller, parent))
parent = dict(member_name="bgp_speaker",
collection_name="bgp-speakers")
controller = resource.Resource(BgpDrAgentController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(BGP_DRAGENTS,
controller, parent))
return exts
def get_extended_resources(self, version):
return {}
@six.add_metaclass(abc.ABCMeta)
class BgpDrSchedulerPluginBase(object):
"""REST API to operate BGP dynamic routing agent scheduler.
All the methods must be executed in admin context.
"""
def get_plugin_description(self):
return "Neutron BGP dynamic routing scheduler Plugin"
def get_plugin_type(self):
return bgp_ext.BGP_EXT_ALIAS
@abc.abstractmethod
def add_bgp_speaker_to_dragent(self, context, agent_id, speaker_id):
pass
@abc.abstractmethod
def remove_bgp_speaker_from_dragent(self, context, agent_id, speaker_id):
pass
@abc.abstractmethod
def list_dragent_hosting_bgp_speaker(self, context, speaker_id):
pass
@abc.abstractmethod
def list_bgp_speaker_on_dragent(self, context, agent_id):
pass
@abc.abstractmethod
def get_bgp_speakers_for_agent_host(self, context, host):
pass
@abc.abstractmethod
def get_bgp_speaker_by_speaker_id(self, context, speaker_id):
pass
@abc.abstractmethod
def get_bgp_peer_by_peer_id(self, context, bgp_peer_id):
pass

View File

@ -1,707 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import importutils
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.extensions import bgp as bgp_ext
from neutron._i18n import _, _LE, _LI, _LW
from neutron import manager
from neutron.services.bgp.common import constants as bgp_consts
from neutron.services.bgp.driver import exceptions as driver_exc
LOG = logging.getLogger(__name__)
class BgpDrAgent(manager.Manager):
"""BGP Dynamic Routing agent service manager.
Note that the public methods of this class are exposed as the server side
of an rpc interface. The neutron server uses
neutron.api.rpc.agentnotifiers.bgp_dr_rpc_agent_api.
BgpDrAgentNotifyApi as the client side to execute the methods
here. For more information about changing rpc interfaces, see
doc/source/devref/rpc_api.rst.
API version history:
1.0 initial Version
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host, conf=None):
super(BgpDrAgent, self).__init__()
self.initialize_driver(conf)
self.needs_resync_reasons = collections.defaultdict(list)
self.needs_full_sync_reason = None
self.cache = BgpSpeakerCache()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = BgpDrPluginApi(bgp_consts.BGP_PLUGIN,
self.context, host)
def initialize_driver(self, conf):
self.conf = conf or cfg.CONF.BGP
try:
self.dr_driver_cls = (
importutils.import_object(self.conf.bgp_speaker_driver,
self.conf))
except ImportError:
LOG.exception(_LE("Error while importing BGP speaker driver %s"),
self.conf.bgp_speaker_driver)
raise SystemExit(1)
def _handle_driver_failure(self, bgp_speaker_id, method, driver_exec):
self.schedule_resync(reason=driver_exec,
speaker_id=bgp_speaker_id)
LOG.error(_LE('Call to driver for BGP Speaker %(bgp_speaker)s '
'%(method)s has failed with exception '
'%(driver_exec)s.'),
{'bgp_speaker': bgp_speaker_id,
'method': method,
'driver_exec': driver_exec})
def after_start(self):
self.run()
LOG.info(_LI("BGP Dynamic Routing agent started"))
def run(self):
"""Activate BGP Dynamic Routing agent."""
self.sync_state(self.context)
self.periodic_resync(self.context)
@utils.synchronized('bgp-dragent')
def sync_state(self, context, full_sync=None, bgp_speakers=None):
try:
hosted_bgp_speakers = self.plugin_rpc.get_bgp_speakers(context)
hosted_bgp_speaker_ids = [bgp_speaker['id']
for bgp_speaker in hosted_bgp_speakers]
cached_bgp_speakers = self.cache.get_bgp_speaker_ids()
for bgp_speaker_id in cached_bgp_speakers:
if bgp_speaker_id not in hosted_bgp_speaker_ids:
self.remove_bgp_speaker_from_dragent(bgp_speaker_id)
resync_all = not bgp_speakers or full_sync
only_bs = set() if resync_all else set(bgp_speakers)
for hosted_bgp_speaker in hosted_bgp_speakers:
hosted_bs_id = hosted_bgp_speaker['id']
if resync_all or hosted_bs_id in only_bs:
if not self.cache.is_bgp_speaker_added(hosted_bs_id):
self.safe_configure_dragent_for_bgp_speaker(
hosted_bgp_speaker)
continue
self.sync_bgp_speaker(hosted_bgp_speaker)
resync_reason = "Periodic route cache refresh"
self.schedule_resync(speaker_id=hosted_bs_id,
reason=resync_reason)
except Exception as e:
self.schedule_full_resync(reason=e)
LOG.error(_LE('Unable to sync BGP speaker state.'))
def sync_bgp_speaker(self, bgp_speaker):
# sync BGP Speakers
bgp_peer_ips = set(
[bgp_peer['peer_ip'] for bgp_peer in bgp_speaker['peers']])
cached_bgp_peer_ips = set(
self.cache.get_bgp_peer_ips(bgp_speaker['id']))
removed_bgp_peer_ips = cached_bgp_peer_ips - bgp_peer_ips
for bgp_peer_ip in removed_bgp_peer_ips:
self.remove_bgp_peer_from_bgp_speaker(bgp_speaker['id'],
bgp_peer_ip)
if bgp_peer_ips:
self.add_bgp_peers_to_bgp_speaker(bgp_speaker)
# sync advertise routes
cached_adv_routes = self.cache.get_adv_routes(bgp_speaker['id'])
adv_routes = bgp_speaker['advertised_routes']
if cached_adv_routes == adv_routes:
return
for cached_route in cached_adv_routes:
if cached_route not in adv_routes:
self.withdraw_route_via_bgp_speaker(bgp_speaker['id'],
bgp_speaker['local_as'],
cached_route)
self.advertise_routes_via_bgp_speaker(bgp_speaker)
@utils.exception_logger()
def _periodic_resync_helper(self, context):
"""Resync the BgpDrAgent state at the configured interval."""
if self.needs_resync_reasons or self.needs_full_sync_reason:
full_sync = self.needs_full_sync_reason
reasons = self.needs_resync_reasons
# Reset old reasons
self.needs_full_sync_reason = None
self.needs_resync_reasons = collections.defaultdict(list)
if full_sync:
LOG.debug("resync all: %(reason)s", {"reason": full_sync})
for bgp_speaker, reason in reasons.items():
LOG.debug("resync (%(bgp_speaker)s): %(reason)s",
{"reason": reason, "bgp_speaker": bgp_speaker})
self.sync_state(
context, full_sync=full_sync, bgp_speakers=reasons.keys())
# NOTE: spacing is set 1 sec. The actual interval is controlled
# by neutron/service.py which defaults to CONF.periodic_interval
@periodic_task.periodic_task(spacing=1)
def periodic_resync(self, context):
LOG.debug("Started periodic resync.")
self._periodic_resync_helper(context)
@utils.synchronized('bgp-dr-agent')
def bgp_speaker_create_end(self, context, payload):
"""Handle bgp_speaker_create_end notification event."""
bgp_speaker_id = payload['bgp_speaker']['id']
LOG.debug('Received BGP speaker create notification for '
'speaker_id=%(speaker_id)s from the neutron server.',
{'speaker_id': bgp_speaker_id})
self.add_bgp_speaker_helper(bgp_speaker_id)
@utils.synchronized('bgp-dr-agent')
def bgp_speaker_remove_end(self, context, payload):
"""Handle bgp_speaker_create_end notification event."""
bgp_speaker_id = payload['bgp_speaker']['id']
LOG.debug('Received BGP speaker remove notification for '
'speaker_id=%(speaker_id)s from the neutron server.',
{'speaker_id': bgp_speaker_id})
self.remove_bgp_speaker_from_dragent(bgp_speaker_id)
@utils.synchronized('bgp-dr-agent')
def bgp_peer_association_end(self, context, payload):
"""Handle bgp_peer_association_end notification event."""
bgp_peer_id = payload['bgp_peer']['peer_id']
bgp_speaker_id = payload['bgp_peer']['speaker_id']
LOG.debug('Received BGP peer associate notification for '
'speaker_id=%(speaker_id)s peer_id=%(peer_id)s '
'from the neutron server.',
{'speaker_id': bgp_speaker_id,
'peer_id': bgp_peer_id})
self.add_bgp_peer_helper(bgp_speaker_id, bgp_peer_id)
@utils.synchronized('bgp-dr-agent')
def bgp_peer_disassociation_end(self, context, payload):
"""Handle bgp_peer_disassociation_end notification event."""
bgp_peer_ip = payload['bgp_peer']['peer_ip']
bgp_speaker_id = payload['bgp_peer']['speaker_id']
LOG.debug('Received BGP peer disassociate notification for '
'speaker_id=%(speaker_id)s peer_ip=%(peer_ip)s '
'from the neutron server.',
{'speaker_id': bgp_speaker_id,
'peer_ip': bgp_peer_ip})
self.remove_bgp_peer_from_bgp_speaker(bgp_speaker_id, bgp_peer_ip)
@utils.synchronized('bgp-dr-agent')
def bgp_routes_advertisement_end(self, context, payload):
"""Handle bgp_routes_advertisement_end notification event."""
bgp_speaker_id = payload['advertise_routes']['speaker_id']
LOG.debug('Received routes advertisement end notification '
'for speaker_id=%(speaker_id)s from the neutron server.',
{'speaker_id': bgp_speaker_id})
routes = payload['advertise_routes']['routes']
self.add_routes_helper(bgp_speaker_id, routes)
@utils.synchronized('bgp-dr-agent')
def bgp_routes_withdrawal_end(self, context, payload):
"""Handle bgp_routes_withdrawal_end notification event."""
bgp_speaker_id = payload['withdraw_routes']['speaker_id']
LOG.debug('Received route withdrawal notification for '
'speaker_id=%(speaker_id)s from the neutron server.',
{'speaker_id': bgp_speaker_id})
routes = payload['withdraw_routes']['routes']
self.withdraw_routes_helper(bgp_speaker_id, routes)
def add_bgp_speaker_helper(self, bgp_speaker_id):
"""Add BGP speaker."""
bgp_speaker = self.safe_get_bgp_speaker_info(bgp_speaker_id)
if bgp_speaker:
self.add_bgp_speaker_on_dragent(bgp_speaker)
def add_bgp_peer_helper(self, bgp_speaker_id, bgp_peer_id):
"""Add BGP peer."""
# Ideally BGP Speaker must be added by now, If not then let's
# re-sync.
if not self.cache.is_bgp_speaker_added(bgp_speaker_id):
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Speaker Out-of-sync")
return
bgp_peer = self.safe_get_bgp_peer_info(bgp_speaker_id,
bgp_peer_id)
if bgp_peer:
bgp_speaker_as = self.cache.get_bgp_speaker_local_as(
bgp_speaker_id)
self.add_bgp_peer_to_bgp_speaker(bgp_speaker_id,
bgp_speaker_as,
bgp_peer)
def add_routes_helper(self, bgp_speaker_id, routes):
"""Advertise routes to BGP speaker."""
# Ideally BGP Speaker must be added by now, If not then let's
# re-sync.
if not self.cache.is_bgp_speaker_added(bgp_speaker_id):
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Speaker Out-of-sync")
return
bgp_speaker_as = self.cache.get_bgp_speaker_local_as(bgp_speaker_id)
for route in routes:
self.advertise_route_via_bgp_speaker(bgp_speaker_id,
bgp_speaker_as,
route)
if self.is_resync_scheduled(bgp_speaker_id):
break
def withdraw_routes_helper(self, bgp_speaker_id, routes):
"""Withdraw routes advertised by BGP speaker."""
# Ideally BGP Speaker must be added by now, If not then let's
# re-sync.
if not self.cache.is_bgp_speaker_added(bgp_speaker_id):
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Speaker Out-of-sync")
return
bgp_speaker_as = self.cache.get_bgp_speaker_local_as(bgp_speaker_id)
for route in routes:
self.withdraw_route_via_bgp_speaker(bgp_speaker_id,
bgp_speaker_as,
route)
if self.is_resync_scheduled(bgp_speaker_id):
break
def safe_get_bgp_speaker_info(self, bgp_speaker_id):
try:
bgp_speaker = self.plugin_rpc.get_bgp_speaker_info(self.context,
bgp_speaker_id)
if not bgp_speaker:
LOG.warning(_LW('BGP Speaker %s has been deleted.'),
bgp_speaker_id)
return bgp_speaker
except Exception as e:
self.schedule_resync(speaker_id=bgp_speaker_id,
reason=e)
LOG.error(_LE('BGP Speaker %(bgp_speaker)s info call '
'failed with reason=%(e)s.'),
{'bgp_speaker': bgp_speaker_id, 'e': e})
def safe_get_bgp_peer_info(self, bgp_speaker_id, bgp_peer_id):
try:
bgp_peer = self.plugin_rpc.get_bgp_peer_info(self.context,
bgp_peer_id)
if not bgp_peer:
LOG.warning(_LW('BGP Peer %s has been deleted.'), bgp_peer)
return bgp_peer
except Exception as e:
self.schedule_resync(speaker_id=bgp_speaker_id,
reason=e)
LOG.error(_LE('BGP peer %(bgp_peer)s info call '
'failed with reason=%(e)s.'),
{'bgp_peer': bgp_peer_id, 'e': e})
@utils.exception_logger()
def safe_configure_dragent_for_bgp_speaker(self, bgp_speaker):
try:
self.add_bgp_speaker_on_dragent(bgp_speaker)
except (bgp_ext.BgpSpeakerNotFound, RuntimeError):
LOG.warning(_LW('BGP speaker %s may have been deleted and its '
'resources may have already been disposed.'),
bgp_speaker['id'])
def add_bgp_speaker_on_dragent(self, bgp_speaker):
# Caching BGP speaker details in BGPSpeakerCache. Will be used
# during smooth.
self.cache.put_bgp_speaker(bgp_speaker)
LOG.debug('Calling driver for adding BGP speaker %(speaker_id)s,'
' speaking for local_as %(local_as)s',
{'speaker_id': bgp_speaker['id'],
'local_as': bgp_speaker['local_as']})
try:
self.dr_driver_cls.add_bgp_speaker(bgp_speaker['local_as'])
except driver_exc.BgpSpeakerAlreadyScheduled:
return
except Exception as e:
self._handle_driver_failure(bgp_speaker['id'],
'add_bgp_speaker', e)
# Add peer and route information to the driver.
self.add_bgp_peers_to_bgp_speaker(bgp_speaker)
self.advertise_routes_via_bgp_speaker(bgp_speaker)
self.schedule_resync(speaker_id=bgp_speaker['id'],
reason="Periodic route cache refresh")
def remove_bgp_speaker_from_dragent(self, bgp_speaker_id):
if self.cache.is_bgp_speaker_added(bgp_speaker_id):
bgp_speaker_as = self.cache.get_bgp_speaker_local_as(
bgp_speaker_id)
self.cache.remove_bgp_speaker_by_id(bgp_speaker_id)
LOG.debug('Calling driver for removing BGP speaker %(speaker_as)s',
{'speaker_as': bgp_speaker_as})
try:
self.dr_driver_cls.delete_bgp_speaker(bgp_speaker_as)
except Exception as e:
self._handle_driver_failure(bgp_speaker_id,
'remove_bgp_speaker', e)
return
# Ideally, only the added speakers can be removed by the neutron
# server. Looks like there might be some synchronization
# issue between the server and the agent. Let's initiate a re-sync
# to resolve the issue.
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Speaker Out-of-sync")
def add_bgp_peers_to_bgp_speaker(self, bgp_speaker):
for bgp_peer in bgp_speaker['peers']:
self.add_bgp_peer_to_bgp_speaker(bgp_speaker['id'],
bgp_speaker['local_as'],
bgp_peer)
if self.is_resync_scheduled(bgp_speaker['id']):
break
def add_bgp_peer_to_bgp_speaker(self, bgp_speaker_id,
bgp_speaker_as, bgp_peer):
if self.cache.get_bgp_peer_by_ip(bgp_speaker_id, bgp_peer['peer_ip']):
return
self.cache.put_bgp_peer(bgp_speaker_id, bgp_peer)
LOG.debug('Calling driver interface for adding BGP peer %(peer_ip)s '
'remote_as=%(remote_as)s to BGP Speaker running for '
'local_as=%(local_as)d',
{'peer_ip': bgp_peer['peer_ip'],
'remote_as': bgp_peer['remote_as'],
'local_as': bgp_speaker_as})
try:
self.dr_driver_cls.add_bgp_peer(bgp_speaker_as,
bgp_peer['peer_ip'],
bgp_peer['remote_as'],
bgp_peer['auth_type'],
bgp_peer['password'])
except Exception as e:
self._handle_driver_failure(bgp_speaker_id,
'add_bgp_peer', e)
def remove_bgp_peer_from_bgp_speaker(self, bgp_speaker_id, bgp_peer_ip):
# Ideally BGP Speaker must be added by now, If not then let's
# re-sync.
if not self.cache.is_bgp_speaker_added(bgp_speaker_id):
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Speaker Out-of-sync")
return
if self.cache.is_bgp_peer_added(bgp_speaker_id, bgp_peer_ip):
self.cache.remove_bgp_peer_by_ip(bgp_speaker_id, bgp_peer_ip)
bgp_speaker_as = self.cache.get_bgp_speaker_local_as(
bgp_speaker_id)
LOG.debug('Calling driver interface to remove BGP peer '
'%(peer_ip)s from BGP Speaker running for '
'local_as=%(local_as)d',
{'peer_ip': bgp_peer_ip, 'local_as': bgp_speaker_as})
try:
self.dr_driver_cls.delete_bgp_peer(bgp_speaker_as,
bgp_peer_ip)
except Exception as e:
self._handle_driver_failure(bgp_speaker_id,
'remove_bgp_peer', e)
return
# Ideally, only the added peers can be removed by the neutron
# server. Looks like there might be some synchronization
# issue between the server and the agent. Let's initiate a re-sync
# to resolve the issue.
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="BGP Peer Out-of-sync")
def advertise_routes_via_bgp_speaker(self, bgp_speaker):
for route in bgp_speaker['advertised_routes']:
self.advertise_route_via_bgp_speaker(bgp_speaker['id'],
bgp_speaker['local_as'],
route)
if self.is_resync_scheduled(bgp_speaker['id']):
break
def advertise_route_via_bgp_speaker(self, bgp_speaker_id,
bgp_speaker_as, route):
if self.cache.is_route_advertised(bgp_speaker_id, route):
# Requested route already advertised. Hence, Nothing to be done.
return
self.cache.put_adv_route(bgp_speaker_id, route)
LOG.debug('Calling driver for advertising prefix: %(cidr)s, '
'next_hop: %(nexthop)s',
{'cidr': route['destination'],
'nexthop': route['next_hop']})
try:
self.dr_driver_cls.advertise_route(bgp_speaker_as,
route['destination'],
route['next_hop'])
except Exception as e:
self._handle_driver_failure(bgp_speaker_id,
'advertise_route', e)
def withdraw_route_via_bgp_speaker(self, bgp_speaker_id,
bgp_speaker_as, route):
if self.cache.is_route_advertised(bgp_speaker_id, route):
self.cache.remove_adv_route(bgp_speaker_id, route)
LOG.debug('Calling driver for withdrawing prefix: %(cidr)s, '
'next_hop: %(nexthop)s',
{'cidr': route['destination'],
'nexthop': route['next_hop']})
try:
self.dr_driver_cls.withdraw_route(bgp_speaker_as,
route['destination'],
route['next_hop'])
except Exception as e:
self._handle_driver_failure(bgp_speaker_id,
'withdraw_route', e)
return
# Ideally, only the advertised routes can be withdrawn by the
# neutron server. Looks like there might be some synchronization
# issue between the server and the agent. Let's initiate a re-sync
# to resolve the issue.
self.schedule_resync(speaker_id=bgp_speaker_id,
reason="Advertised routes Out-of-sync")
def schedule_full_resync(self, reason):
LOG.debug('Recording full resync request for all BGP Speakers '
'with reason=%s', reason)
self.needs_full_sync_reason = reason
def schedule_resync(self, reason, speaker_id):
"""Schedule a full resync for a given BGP Speaker.
If no BGP Speaker is specified, resync all BGP Speakers.
"""
LOG.debug('Recording resync request for BGP Speaker %s '
'with reason=%s', speaker_id, reason)
self.needs_resync_reasons[speaker_id].append(reason)
def is_resync_scheduled(self, bgp_speaker_id):
if bgp_speaker_id not in self.needs_resync_reasons:
return False
reason = self.needs_resync_reasons[bgp_speaker_id]
# Re-sync scheduled for the queried BGP speaker. No point
# continuing further. Let's stop processing and wait for
# re-sync to happen.
LOG.debug('Re-sync already scheduled for BGP Speaker %s '
'with reason=%s', bgp_speaker_id, reason)
return True
class BgpDrPluginApi(object):
"""Agent side of BgpDrAgent RPC API.
This class implements the client side of an rpc interface.
The server side of this interface can be found in
neutron.api.rpc.handlers.bgp_speaker_rpc.BgpSpeakerRpcCallback.
For more information about changing rpc interfaces, see
doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
"""
def __init__(self, topic, context, host):
self.context = context
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_bgp_speakers(self, context):
"""Make a remote process call to retrieve all BGP speakers info."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_bgp_speakers', host=self.host)
def get_bgp_speaker_info(self, context, bgp_speaker_id):
"""Make a remote process call to retrieve a BGP speaker info."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_bgp_speaker_info',
bgp_speaker_id=bgp_speaker_id)
def get_bgp_peer_info(self, context, bgp_peer_id):
"""Make a remote process call to retrieve a BGP peer info."""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_bgp_peer_info',
bgp_peer_id=bgp_peer_id)
class BgpSpeakerCache(object):
"""Agent cache of the current BGP speaker state.
This class is designed to support the advertisement for
multiple BGP speaker via a single driver interface.
Version history:
1.0 - Initial version for caching the state of BGP speaker.
"""
def __init__(self):
self.cache = {}
def get_bgp_speaker_ids(self):
return self.cache.keys()
def put_bgp_speaker(self, bgp_speaker):
if bgp_speaker['id'] in self.cache:
self.remove_bgp_speaker_by_id(self.cache[bgp_speaker['id']])
self.cache[bgp_speaker['id']] = {'bgp_speaker': bgp_speaker,
'peers': {},
'advertised_routes': []}
def get_bgp_speaker_by_id(self, bgp_speaker_id):
if bgp_speaker_id in self.cache:
return self.cache[bgp_speaker_id]['bgp_speaker']
def get_bgp_speaker_local_as(self, bgp_speaker_id):
bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id)
if bgp_speaker:
return bgp_speaker['local_as']
def is_bgp_speaker_added(self, bgp_speaker_id):
return self.get_bgp_speaker_by_id(bgp_speaker_id)
def remove_bgp_speaker_by_id(self, bgp_speaker_id):
if bgp_speaker_id in self.cache:
del self.cache[bgp_speaker_id]
def put_bgp_peer(self, bgp_speaker_id, bgp_peer):
if bgp_peer['peer_ip'] in self.get_bgp_peer_ips(bgp_speaker_id):
del self.cache[bgp_speaker_id]['peers'][bgp_peer['peer_ip']]
self.cache[bgp_speaker_id]['peers'][bgp_peer['peer_ip']] = bgp_peer
def is_bgp_peer_added(self, bgp_speaker_id, bgp_peer_ip):
return self.get_bgp_peer_by_ip(bgp_speaker_id, bgp_peer_ip)
def get_bgp_peer_ips(self, bgp_speaker_id):
bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id)
if bgp_speaker:
return self.cache[bgp_speaker_id]['peers'].keys()
def get_bgp_peer_by_ip(self, bgp_speaker_id, bgp_peer_ip):
bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id)
if bgp_speaker:
return self.cache[bgp_speaker_id]['peers'].get(bgp_peer_ip)
def remove_bgp_peer_by_ip(self, bgp_speaker_id, bgp_peer_ip):
if bgp_peer_ip in self.get_bgp_peer_ips(bgp_speaker_id):
del self.cache[bgp_speaker_id]['peers'][bgp_peer_ip]
def put_adv_route(self, bgp_speaker_id, route):
self.cache[bgp_speaker_id]['advertised_routes'].append(route)
def is_route_advertised(self, bgp_speaker_id, route):
routes = self.cache[bgp_speaker_id]['advertised_routes']
for r in routes:
if r['destination'] == route['destination'] and (
r['next_hop'] == route['next_hop']):
return True
return False
def remove_adv_route(self, bgp_speaker_id, route):
routes = self.cache[bgp_speaker_id]['advertised_routes']
updated_routes = [r for r in routes if (
r['destination'] != route['destination'])]
self.cache[bgp_speaker_id]['advertised_routes'] = updated_routes
def get_adv_routes(self, bgp_speaker_id):
return self.cache[bgp_speaker_id]['advertised_routes']
def get_state(self):
bgp_speaker_ids = self.get_bgp_speaker_ids()
num_bgp_speakers = len(bgp_speaker_ids)
num_bgp_peers = 0
num_advertised_routes = 0
for bgp_speaker_id in bgp_speaker_ids:
bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id)
num_bgp_peers += len(bgp_speaker['peers'])
num_advertised_routes += len(bgp_speaker['advertised_routes'])
return {'bgp_speakers': num_bgp_speakers,
'bgp_peers': num_bgp_peers,
'advertise_routes': num_advertised_routes}
class BgpDrAgentWithStateReport(BgpDrAgent):
def __init__(self, host, conf=None):
super(BgpDrAgentWithStateReport,
self).__init__(host, conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'agent_type': bgp_consts.AGENT_TYPE_BGP_ROUTING,
'binary': 'neutron-bgp-dragent',
'configurations': {},
'host': host,
'topic': bgp_consts.BGP_DRAGENT,
'start_flag': True}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
LOG.debug("Report state task started")
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
agent_status = self.state_rpc.report_state(ctx, self.agent_state,
True)
if agent_status == constants.AGENT_REVIVED:
LOG.info(_LI("Agent has just been revived. "
"Scheduling full sync"))
self.schedule_full_resync(
reason=_("Agent has just been revived"))
except AttributeError:
# This means the server does not support report_state
LOG.warning(_LW("Neutron server does not support state report. "
"State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_full_resync(
reason=_("BgpDrAgent updated: %s") % payload)
LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_LI("BGP dynamic routing agent started"))

View File

@ -1,47 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_config import cfg
from oslo_service import service
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.common import config as common_config
from neutron import service as neutron_service
from neutron.services.bgp.agent import config as bgp_dragent_config
from neutron.services.bgp.common import constants as bgp_consts
def register_options():
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(bgp_dragent_config.BGP_DRIVER_OPTS, 'BGP')
cfg.CONF.register_opts(bgp_dragent_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.register_opts(external_process.OPTS)
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-bgp-dragent',
topic=bgp_consts.BGP_DRAGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.bgp.agent.bgp_dragent.'
'BgpDrAgentWithStateReport')
service.launch(cfg.CONF, server).wait()

View File

@ -1,289 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPAddress
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.api.rpc.agentnotifiers import bgp_dr_rpc_agent_api
from neutron.api.rpc.handlers import bgp_speaker_rpc as bs_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db
from neutron.extensions import bgp as bgp_ext
from neutron.extensions import bgp_dragentscheduler as dras_ext
from neutron.services.bgp.common import constants as bgp_consts
from neutron.services import service_base
PLUGIN_NAME = bgp_ext.BGP_EXT_ALIAS + '_svc_plugin'
LOG = logging.getLogger(__name__)
class BgpPlugin(service_base.ServicePluginBase,
bgp_db.BgpDbMixin,
bgp_dragentscheduler_db.BgpDrAgentSchedulerDbMixin):
supported_extension_aliases = [bgp_ext.BGP_EXT_ALIAS,
dras_ext.BGP_DRAGENT_SCHEDULER_EXT_ALIAS]
def __init__(self):
super(BgpPlugin, self).__init__()
self.bgp_drscheduler = importutils.import_object(
cfg.CONF.bgp_drscheduler_driver)
self._setup_rpc()
self._register_callbacks()
def get_plugin_name(self):
return PLUGIN_NAME
def get_plugin_type(self):
return bgp_ext.BGP_EXT_ALIAS
def get_plugin_description(self):
"""returns string description of the plugin."""
return ("BGP dynamic routing service for announcement of next-hops "
"for tenant networks, floating IP's, and DVR host routes.")
def _setup_rpc(self):
self.topic = bgp_consts.BGP_PLUGIN
self.conn = n_rpc.create_connection()
self.agent_notifiers[bgp_consts.AGENT_TYPE_BGP_ROUTING] = (
bgp_dr_rpc_agent_api.BgpDrAgentNotifyApi()
)
self._bgp_rpc = self.agent_notifiers[bgp_consts.AGENT_TYPE_BGP_ROUTING]
self.endpoints = [bs_rpc.BgpSpeakerRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def _register_callbacks(self):
registry.subscribe(self.floatingip_update_callback,
resources.FLOATING_IP,
events.AFTER_UPDATE)
registry.subscribe(self.router_interface_callback,
resources.ROUTER_INTERFACE,
events.AFTER_CREATE)
registry.subscribe(self.router_interface_callback,
resources.ROUTER_INTERFACE,
events.BEFORE_CREATE)
registry.subscribe(self.router_interface_callback,
resources.ROUTER_INTERFACE,
events.AFTER_DELETE)
registry.subscribe(self.router_gateway_callback,
resources.ROUTER_GATEWAY,
events.AFTER_CREATE)
registry.subscribe(self.router_gateway_callback,
resources.ROUTER_GATEWAY,
events.AFTER_DELETE)
def create_bgp_speaker(self, context, bgp_speaker):
bgp_speaker = super(BgpPlugin, self).create_bgp_speaker(context,
bgp_speaker)
return bgp_speaker
def delete_bgp_speaker(self, context, bgp_speaker_id):
hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers(
context,
[bgp_speaker_id])
super(BgpPlugin, self).delete_bgp_speaker(context, bgp_speaker_id)
for agent in hosted_bgp_dragents:
self._bgp_rpc.bgp_speaker_removed(context,
bgp_speaker_id,
agent.host)
def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info):
ret_value = super(BgpPlugin, self).add_bgp_peer(context,
bgp_speaker_id,
bgp_peer_info)
hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers(
context,
[bgp_speaker_id])
for agent in hosted_bgp_dragents:
self._bgp_rpc.bgp_peer_associated(context, bgp_speaker_id,
ret_value['bgp_peer_id'],
agent.host)
return ret_value
def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info):
hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers(
context, [bgp_speaker_id])
ret_value = super(BgpPlugin, self).remove_bgp_peer(context,
bgp_speaker_id,
bgp_peer_info)
for agent in hosted_bgp_dragents:
self._bgp_rpc.bgp_peer_disassociated(context,
bgp_speaker_id,
ret_value['bgp_peer_id'],
agent.host)
def floatingip_update_callback(self, resource, event, trigger, **kwargs):
if event != events.AFTER_UPDATE:
return
ctx = context.get_admin_context()
new_router_id = kwargs['router_id']
last_router_id = kwargs['last_known_router_id']
next_hop = kwargs['next_hop']
dest = kwargs['floating_ip_address'] + '/32'
bgp_speakers = self._bgp_speakers_for_gw_network_by_family(
ctx,
kwargs['floating_network_id'],
n_const.IP_VERSION_4)
if last_router_id and new_router_id != last_router_id:
for bgp_speaker in bgp_speakers:
self.stop_route_advertisements(ctx, self._bgp_rpc,
bgp_speaker.id, [dest])
if next_hop and new_router_id != last_router_id:
new_host_route = {'destination': dest, 'next_hop': next_hop}
for bgp_speaker in bgp_speakers:
self.start_route_advertisements(ctx, self._bgp_rpc,
bgp_speaker.id,
[new_host_route])
def router_interface_callback(self, resource, event, trigger, **kwargs):
if event == events.AFTER_CREATE:
self._handle_router_interface_after_create(**kwargs)
if event == events.AFTER_DELETE:
gw_network = kwargs['network_id']
next_hops = self._next_hops_from_gateway_ips(
kwargs['gateway_ips'])
ctx = context.get_admin_context()
speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network)
for speaker in speakers:
routes = self._route_list_from_prefixes_and_next_hop(
kwargs['cidrs'],
next_hops[speaker.ip_version])
self._handle_router_interface_after_delete(gw_network, routes)
def _handle_router_interface_after_create(self, **kwargs):
gw_network = kwargs['network_id']
if not gw_network:
return
ctx = context.get_admin_context()
with ctx.session.begin(subtransactions=True):
speakers = self._bgp_speakers_for_gateway_network(ctx,
gw_network)
next_hops = self._next_hops_from_gateway_ips(
kwargs['gateway_ips'])
for speaker in speakers:
prefixes = self._tenant_prefixes_by_router(
ctx,
kwargs['router_id'],
speaker.id)
next_hop = next_hops.get(speaker.ip_version)
if next_hop:
rl = self._route_list_from_prefixes_and_next_hop(prefixes,
next_hop)
self.start_route_advertisements(ctx,
self._bgp_rpc,
speaker.id,
rl)
def router_gateway_callback(self, resource, event, trigger, **kwargs):
if event == events.AFTER_CREATE:
self._handle_router_gateway_after_create(**kwargs)
if event == events.AFTER_DELETE:
gw_network = kwargs['network_id']
router_id = kwargs['router_id']
next_hops = self._next_hops_from_gateway_ips(
kwargs['gateway_ips'])
ctx = context.get_admin_context()
speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network)
for speaker in speakers:
if speaker.ip_version in next_hops:
next_hop = next_hops[speaker.ip_version]
prefixes = self._tenant_prefixes_by_router(ctx,
router_id,
speaker.id)
routes = self._route_list_from_prefixes_and_next_hop(
prefixes,
next_hop)
self._handle_router_interface_after_delete(gw_network, routes)
def _handle_router_gateway_after_create(self, **kwargs):
ctx = context.get_admin_context()
gw_network = kwargs['network_id']
router_id = kwargs['router_id']
with ctx.session.begin(subtransactions=True):
speakers = self._bgp_speakers_for_gateway_network(ctx,
gw_network)
next_hops = self._next_hops_from_gateway_ips(kwargs['gw_ips'])
for speaker in speakers:
if speaker.ip_version in next_hops:
next_hop = next_hops[speaker.ip_version]
prefixes = self._tenant_prefixes_by_router(ctx,
router_id,
speaker.id)
routes = self._route_list_from_prefixes_and_next_hop(
prefixes,
next_hop)
self.start_route_advertisements(ctx, self._bgp_rpc,
speaker.id, routes)
def _handle_router_interface_after_delete(self, gw_network, routes):
if gw_network and routes:
ctx = context.get_admin_context()
speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network)
for speaker in speakers:
self.stop_route_advertisements(ctx, self._bgp_rpc,
speaker.id, routes)
def _next_hops_from_gateway_ips(self, gw_ips):
if gw_ips:
return {IPAddress(ip).version: ip for ip in gw_ips}
return {}
def start_route_advertisements(self, ctx, bgp_rpc,
bgp_speaker_id, routes):
agents = self.list_dragent_hosting_bgp_speaker(ctx, bgp_speaker_id)
for agent in agents['agents']:
bgp_rpc.bgp_routes_advertisement(ctx,
bgp_speaker_id,
routes,
agent['host'])
msg = "Starting route advertisements for %s on BgpSpeaker %s"
self._debug_log_for_routes(msg, routes, bgp_speaker_id)
def stop_route_advertisements(self, ctx, bgp_rpc,
bgp_speaker_id, routes):
agents = self.list_dragent_hosting_bgp_speaker(ctx, bgp_speaker_id)
for agent in agents['agents']:
bgp_rpc.bgp_routes_withdrawal(ctx,
bgp_speaker_id,
routes,
agent['host'])
msg = "Stopping route advertisements for %s on BgpSpeaker %s"
self._debug_log_for_routes(msg, routes, bgp_speaker_id)
def _debug_log_for_routes(self, msg, routes, bgp_speaker_id):
# Could have a large number of routes passed, check log level first
if LOG.isEnabledFor(logging.DEBUG):
for route in routes:
LOG.debug(msg, route, bgp_speaker_id)

View File

@ -1,27 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
AGENT_TYPE_BGP_ROUTING = 'BGP dynamic routing agent'
BGP_DRAGENT = 'bgp_dragent'
BGP_PLUGIN = 'q-bgp-plugin'
# List of supported authentication types.
SUPPORTED_AUTH_TYPES = ['none', 'md5']
# Supported AS number range
MIN_ASNUM = 1
MAX_ASNUM = 65535

View File

@ -1,28 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import neutron.services.bgp.agent.config
def list_bgp_agent_opts():
return [
('BGP',
itertools.chain(
neutron.services.bgp.agent.config.BGP_DRIVER_OPTS,
neutron.services.bgp.agent.config.BGP_PROTO_CONFIG_OPTS)
)
]

View File

@ -1,142 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BgpDriverBase(object):
"""Base class for BGP Speaking drivers.
Any class which provides BGP functionality should extend this
defined base class.
"""
@abc.abstractmethod
def add_bgp_speaker(self, speaker_as):
"""Add a BGP speaker.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:raises: BgpSpeakerAlreadyScheduled, BgpSpeakerMaxScheduled,
InvalidParamType, InvalidParamRange
"""
@abc.abstractmethod
def delete_bgp_speaker(self, speaker_as):
"""Deletes BGP speaker.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:raises: BgpSpeakerNotAdded
"""
@abc.abstractmethod
def add_bgp_peer(self, speaker_as, peer_ip, peer_as,
auth_type='none', password=None):
"""Add a new BGP peer.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:param peer_ip: Specifies the IP address of the peer.
:type peer_ip: string
:param peer_as: Specifies Autonomous Number of the peer.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type peer_as: integer
:param auth_type: Specifies authentication type.
By default, authentication will be disabled.
:type auth_type: value in SUPPORTED_AUTH_TYPES
:param password: Authentication password.By default, authentication
will be disabled.
:type password: string
:raises: BgpSpeakerNotAdded, InvalidParamType, InvalidParamRange,
InvaildAuthType, PasswordNotSpecified
"""
@abc.abstractmethod
def delete_bgp_peer(self, speaker_as, peer_ip):
"""Delete a BGP peer associated with the given peer IP
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:param peer_ip: Specifies the IP address of the peer. Must be the
string representation of an IP address.
:type peer_ip: string
:raises: BgpSpeakerNotAdded, BgpPeerNotAdded
"""
@abc.abstractmethod
def advertise_route(self, speaker_as, cidr, nexthop):
"""Add a new prefix to advertise.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:param cidr: CIDR of the network to advertise. Must be the string
representation of an IP network (e.g., 10.1.1.0/24)
:type cidr: string
:param nexthop: Specifies the next hop address for the above
prefix.
:type nexthop: string
:raises: BgpSpeakerNotAdded, InvalidParamType
"""
@abc.abstractmethod
def withdraw_route(self, speaker_as, cidr, nexthop=None):
"""Withdraw an advertised prefix.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:param cidr: CIDR of the network to withdraw. Must be the string
representation of an IP network (e.g., 10.1.1.0/24)
:type cidr: string
:param nexthop: Specifies the next hop address for the above
prefix.
:type nexthop: string
:raises: BgpSpeakerNotAdded, RouteNotAdvertised, InvalidParamType
"""
@abc.abstractmethod
def get_bgp_speaker_statistics(self, speaker_as):
"""Collect BGP Speaker statistics.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:raises: BgpSpeakerNotAdded
:returns: bgp_speaker_stats: string
"""
@abc.abstractmethod
def get_bgp_peer_statistics(self, speaker_as, peer_ip, peer_as):
"""Collect BGP Peer statistics.
:param speaker_as: Specifies BGP Speaker autonomous system number.
Must be an integer between MIN_ASNUM and MAX_ASNUM.
:type speaker_as: integer
:param peer_ip: Specifies the IP address of the peer.
:type peer_ip: string
:param peer_as: Specifies the AS number of the peer. Must be an
integer between MIN_ASNUM and MAX_ASNUM.
:type peer_as: integer .
:raises: BgpSpeakerNotAdded, BgpPeerNotAdded
:returns: bgp_peer_stats: string
"""

View File

@ -1,61 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron._i18n import _
from neutron.common import exceptions as n_exc
# BGP Driver Exceptions
class BgpSpeakerNotAdded(n_exc.BadRequest):
message = _("BGP Speaker for local_as=%(local_as)s with "
"router_id=%(rtid)s not added yet.")
class BgpSpeakerMaxScheduled(n_exc.BadRequest):
message = _("Already hosting maximum number of BGP Speakers. "
"Allowed scheduled count=%(count)d")
class BgpSpeakerAlreadyScheduled(n_exc.Conflict):
message = _("Already hosting BGP Speaker for local_as=%(current_as)d with "
"router_id=%(rtid)s.")
class BgpPeerNotAdded(n_exc.BadRequest):
message = _("BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running "
"for BGP Speaker %(speaker_as)d not added yet.")
class RouteNotAdvertised(n_exc.BadRequest):
message = _("Route %(cidr)s not advertised for BGP Speaker "
"%(speaker_as)d.")
class InvalidParamType(n_exc.NeutronException):
message = _("Parameter %(param)s must be of %(param_type)s type.")
class InvalidParamRange(n_exc.NeutronException):
message = _("%(param)s must be in %(range)s range.")
class InvaildAuthType(n_exc.BadRequest):
message = _("Authentication type not supported. Requested "
"type=%(auth_type)s.")
class PasswordNotSpecified(n_exc.BadRequest):
message = _("Password not specified for authentication "
"type=%(auth_type)s.")

View File

@ -1,202 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.driver import base
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver import utils
from neutron._i18n import _LE, _LI
LOG = logging.getLogger(__name__)
# Function for logging BGP peer and path changes.
def bgp_peer_down_cb(remote_ip, remote_as):
LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d went DOWN.'),
{'peer_ip': remote_ip, 'peer_as': remote_as})
def bgp_peer_up_cb(remote_ip, remote_as):
LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d is UP.'),
{'peer_ip': remote_ip, 'peer_as': remote_as})
def best_path_change_cb(event):
LOG.info(_LI("Best path change observed. cidr=%(prefix)s, "
"nexthop=%(nexthop)s, remote_as=%(remote_as)d, "
"is_withdraw=%(is_withdraw)s"),
{'prefix': event.prefix, 'nexthop': event.nexthop,
'remote_as': event.remote_as,
'is_withdraw': event.is_withdraw})
class RyuBgpDriver(base.BgpDriverBase):
"""BGP speaker implementation via Ryu."""
def __init__(self, cfg):
LOG.info(_LI('Initializing Ryu driver for BGP Speaker functionality.'))
self._read_config(cfg)
# Note: Even though Ryu can only support one BGP speaker as of now,
# we have tried making the framework generic for the future purposes.
self.cache = utils.BgpMultiSpeakerCache()
def _read_config(self, cfg):
if cfg is None or cfg.bgp_router_id is None:
# If either cfg or router_id is not specified, raise voice
LOG.error(_LE('BGP router-id MUST be specified for the correct '
'functional working.'))
else:
self.routerid = cfg.bgp_router_id
LOG.info(_LI('Initialized Ryu BGP Speaker driver interface with '
'bgp_router_id=%s'), self.routerid)
def add_bgp_speaker(self, speaker_as):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if curr_speaker is not None:
raise bgp_driver_exc.BgpSpeakerAlreadyScheduled(
current_as=speaker_as,
rtid=self.routerid)
# Ryu can only support One speaker
if self.cache.get_hosted_bgp_speakers_count() == 1:
raise bgp_driver_exc.BgpSpeakerMaxScheduled(count=1)
# Validate input parameters.
# speaker_as must be an integer in the allowed range.
utils.validate_as_num('local_as', speaker_as)
# Notify Ryu about BGP Speaker addition.
# Please note: Since, only the route-advertisement support is
# implemented we are explicitly setting the bgp_server_port
# attribute to 0 which disables listening on port 179.
curr_speaker = bgpspeaker.BGPSpeaker(as_number=speaker_as,
router_id=self.routerid, bgp_server_port=0,
best_path_change_handler=best_path_change_cb,
peer_down_handler=bgp_peer_down_cb,
peer_up_handler=bgp_peer_up_cb)
LOG.info(_LI('Added BGP Speaker for local_as=%(as)d with '
'router_id= %(rtid)s.'),
{'as': speaker_as, 'rtid': self.routerid})
self.cache.put_bgp_speaker(speaker_as, curr_speaker)
def delete_bgp_speaker(self, speaker_as):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Notify Ryu about BGP Speaker deletion
curr_speaker.shutdown()
LOG.info(_LI('Removed BGP Speaker for local_as=%(as)d with '
'router_id=%(rtid)s.'),
{'as': speaker_as, 'rtid': self.routerid})
self.cache.remove_bgp_speaker(speaker_as)
def add_bgp_peer(self, speaker_as, peer_ip, peer_as,
auth_type='none', password=None):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate peer_ip and peer_as.
utils.validate_as_num('remote_as', peer_as)
utils.validate_string(peer_ip)
utils.validate_auth(auth_type, password)
# Notify Ryu about BGP Peer addition
curr_speaker.neighbor_add(address=peer_ip,
remote_as=peer_as,
password=password,
connect_mode=CONNECT_MODE_ACTIVE)
LOG.info(_LI('Added BGP Peer %(peer)s for remote_as=%(as)d to '
'BGP Speaker running for local_as=%(local_as)d.'),
{'peer': peer_ip, 'as': peer_as, 'local_as': speaker_as})
def delete_bgp_peer(self, speaker_as, peer_ip):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate peer_ip. It must be a string.
utils.validate_string(peer_ip)
# Notify Ryu about BGP Peer removal
curr_speaker.neighbor_del(address=peer_ip)
LOG.info(_LI('Removed BGP Peer %(peer)s from BGP Speaker '
'running for local_as=%(local_as)d.'),
{'peer': peer_ip, 'local_as': speaker_as})
def advertise_route(self, speaker_as, cidr, nexthop):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate cidr and nexthop. Both must be strings.
utils.validate_string(cidr)
utils.validate_string(nexthop)
# Notify Ryu about route advertisement
curr_speaker.prefix_add(prefix=cidr, next_hop=nexthop)
LOG.info(_LI('Route cidr=%(prefix)s, nexthop=%(nexthop)s is '
'advertised for BGP Speaker running for '
'local_as=%(local_as)d.'),
{'prefix': cidr, 'nexthop': nexthop, 'local_as': speaker_as})
def withdraw_route(self, speaker_as, cidr, nexthop=None):
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# Validate cidr. It must be a string.
utils.validate_string(cidr)
# Notify Ryu about route withdrawal
curr_speaker.prefix_del(prefix=cidr)
LOG.info(_LI('Route cidr=%(prefix)s is withdrawn from BGP Speaker '
'running for local_as=%(local_as)d.'),
{'prefix': cidr, 'local_as': speaker_as})
def get_bgp_speaker_statistics(self, speaker_as):
LOG.info(_LI('Collecting BGP Speaker statistics for local_as=%d.'),
speaker_as)
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# TODO(vikram): Filter and return the necessary information.
# Will be done as part of new RFE requirement
# https://bugs.launchpad.net/neutron/+bug/1527993
return curr_speaker.neighbor_state_get()
def get_bgp_peer_statistics(self, speaker_as, peer_ip):
LOG.info(_LI('Collecting BGP Peer statistics for peer_ip=%(peer)s, '
'running in speaker_as=%(speaker_as)d '),
{'peer': peer_ip, 'speaker_as': speaker_as})
curr_speaker = self.cache.get_bgp_speaker(speaker_as)
if not curr_speaker:
raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as,
rtid=self.routerid)
# TODO(vikram): Filter and return the necessary information.
# Will be done as part of new RFE requirement
# https://bugs.launchpad.net/neutron/+bug/1527993
return curr_speaker.neighbor_state_get(address=peer_ip)

View File

@ -1,75 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from neutron.services.bgp.common import constants as bgp_consts
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
# Parameter validation functions provided are provided by the base.
def validate_as_num(param, as_num):
if not isinstance(as_num, six.integer_types):
raise bgp_driver_exc.InvalidParamType(param=param,
param_type='integer')
if not (bgp_consts.MIN_ASNUM <= as_num <= bgp_consts.MAX_ASNUM):
# Must be in [AS_NUM_MIN, AS_NUM_MAX] range.
allowed_range = ('[' +
str(bgp_consts.MIN_ASNUM) + '-' +
str(bgp_consts.MAX_ASNUM) +
']')
raise bgp_driver_exc.InvalidParamRange(param=param,
range=allowed_range)
def validate_auth(auth_type, password):
validate_string(password)
if auth_type in bgp_consts.SUPPORTED_AUTH_TYPES:
if auth_type != 'none' and password is None:
raise bgp_driver_exc.PasswordNotSpecified(auth_type=auth_type)
if auth_type == 'none' and password is not None:
raise bgp_driver_exc.InvaildAuthType(auth_type=auth_type)
else:
raise bgp_driver_exc.InvaildAuthType(auth_type=auth_type)
def validate_string(param):
if param is not None:
if not isinstance(param, six.string_types):
raise bgp_driver_exc.InvalidParamType(param=param,
param_type='string')
class BgpMultiSpeakerCache(object):
"""Class for saving multiple BGP speakers information.
Version history:
1.0 - Initial version for caching multiple BGP speaker information.
"""
def __init__(self):
self.cache = {}
def get_hosted_bgp_speakers_count(self):
return len(self.cache)
def put_bgp_speaker(self, local_as, speaker):
self.cache[local_as] = speaker
def get_bgp_speaker(self, local_as):
return self.cache.get(local_as)
def remove_bgp_speaker(self, local_as):
self.cache.pop(local_as, None)

View File

@ -1,191 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy.orm import exc
from sqlalchemy import sql
from neutron.db import agents_db
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron._i18n import _LI, _LW
from neutron.scheduler import base_resource_filter
from neutron.scheduler import base_scheduler
from neutron.services.bgp.common import constants as bgp_consts
LOG = logging.getLogger(__name__)
BGP_SPEAKER_PER_DRAGENT = 1
class BgpDrAgentFilter(base_resource_filter.BaseResourceFilter):
def bind(self, context, agents, bgp_speaker_id):
"""Bind the BgpSpeaker to a BgpDrAgent."""
bound_agents = agents[:]
for agent in agents:
# saving agent_id to use it after rollback to avoid
# DetachedInstanceError
agent_id = agent.id
binding = bgp_dras_db.BgpSpeakerDrAgentBinding()
binding.agent_id = agent_id
binding.bgp_speaker_id = bgp_speaker_id
try:
with context.session.begin(subtransactions=True):
context.session.add(binding)
except db_exc.DBDuplicateEntry:
# it's totally ok, someone just did our job!
bound_agents.remove(agent)
LOG.info(_LI('BgpDrAgent %s already present'), agent_id)
LOG.debug('BgpSpeaker %(bgp_speaker_id)s is scheduled to be '
'hosted by BgpDrAgent %(agent_id)s',
{'bgp_speaker_id': bgp_speaker_id,
'agent_id': agent_id})
super(BgpDrAgentFilter, self).bind(context, bound_agents,
bgp_speaker_id)
def filter_agents(self, plugin, context, bgp_speaker):
"""Return the agents that can host the BgpSpeaker."""
agents_dict = self._get_bgp_speaker_hostable_dragents(
plugin, context, bgp_speaker)
if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0:
return {'n_agents': 0,
'hostable_agents': [],
'hosted_agents': []}
return agents_dict
def _get_active_dragents(self, plugin, context):
"""Return a list of active BgpDrAgents."""
with context.session.begin(subtransactions=True):
active_dragents = plugin.get_agents_db(
context, filters={
'agent_type': [bgp_consts.AGENT_TYPE_BGP_ROUTING],
'admin_state_up': [True]})
if not active_dragents:
return []
return active_dragents
def _get_num_dragents_hosting_bgp_speaker(self, bgp_speaker_id,
dragent_bindings):
return sum(1 if dragent_binding.bgp_speaker_id == bgp_speaker_id else 0
for dragent_binding in dragent_bindings)
def _get_bgp_speaker_hostable_dragents(self, plugin, context, bgp_speaker):
"""Return number of additional BgpDrAgents which will actually host
the given BgpSpeaker and a list of BgpDrAgents which can host the
given BgpSpeaker
"""
# only one BgpSpeaker can be hosted by a BgpDrAgent for now.
dragents_per_bgp_speaker = BGP_SPEAKER_PER_DRAGENT
dragent_bindings = plugin.get_dragent_bgp_speaker_bindings(context)
agents_hosting = [dragent_binding.agent_id
for dragent_binding in dragent_bindings]
num_dragents_hosting_bgp_speaker = (
self._get_num_dragents_hosting_bgp_speaker(bgp_speaker['id'],
dragent_bindings))
n_agents = dragents_per_bgp_speaker - num_dragents_hosting_bgp_speaker
if n_agents <= 0:
return {'n_agents': 0,
'hostable_agents': [],
'hosted_agents': []}
active_dragents = self._get_active_dragents(plugin, context)
hostable_dragents = [
agent for agent in set(active_dragents)
if agent.id not in agents_hosting and plugin.is_eligible_agent(
active=True, agent=agent)
]
if not hostable_dragents:
return {'n_agents': 0,
'hostable_agents': [],
'hosted_agents': []}
n_agents = min(len(hostable_dragents), n_agents)
return {'n_agents': n_agents,
'hostable_agents': hostable_dragents,
'hosted_agents': num_dragents_hosting_bgp_speaker}
class BgpDrAgentSchedulerBase(BgpDrAgentFilter):
def schedule_unscheduled_bgp_speakers(self, context, host):
"""Schedule unscheduled BgpSpeaker to a BgpDrAgent.
"""
LOG.debug('Started auto-scheduling on host %s', host)
with context.session.begin(subtransactions=True):
query = context.session.query(agents_db.Agent)
query = query.filter_by(
agent_type=bgp_consts.AGENT_TYPE_BGP_ROUTING,
host=host,
admin_state_up=sql.true())
try:
bgp_dragent = query.one()
except (exc.NoResultFound):
LOG.debug('No enabled BgpDrAgent on host %s', host)
return False
if agents_db.AgentDbMixin.is_agent_down(
bgp_dragent.heartbeat_timestamp):
LOG.warning(_LW('BgpDrAgent %s is down'), bgp_dragent.id)
return False
if self._is_bgp_speaker_hosted(context, bgp_dragent['id']):
# One BgpDrAgent can only host one BGP speaker
LOG.debug('BgpDrAgent already hosting a speaker on host %s. '
'Cannot schedule an another one', host)
return False
unscheduled_speakers = self._get_unscheduled_bgp_speakers(context)
if not unscheduled_speakers:
LOG.debug('Nothing to auto-schedule on host %s', host)
return False
self.bind(context, [bgp_dragent], unscheduled_speakers[0])
return True
def _is_bgp_speaker_hosted(self, context, agent_id):
speaker_binding_model = bgp_dras_db.BgpSpeakerDrAgentBinding
query = context.session.query(speaker_binding_model)
query = query.filter(speaker_binding_model.agent_id == agent_id)
return query.count() > 0
def _get_unscheduled_bgp_speakers(self, context):
"""BGP speakers that needs to be scheduled.
"""
no_agent_binding = ~sql.exists().where(
bgp_db.BgpSpeaker.id ==
bgp_dras_db.BgpSpeakerDrAgentBinding.bgp_speaker_id)
query = context.session.query(bgp_db.BgpSpeaker.id).filter(
no_agent_binding)
return [bgp_speaker_id_[0] for bgp_speaker_id_ in query]
class ChanceScheduler(base_scheduler.BaseChanceScheduler,
BgpDrAgentSchedulerBase):
def __init__(self):
super(ChanceScheduler, self).__init__(self)
class WeightScheduler(base_scheduler.BaseWeightScheduler,
BgpDrAgentSchedulerBase):
def __init__(self):
super(WeightScheduler, self).__init__(self)

View File

@ -1,286 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
import testtools
from neutron.tests.api import base
from neutron.tests.tempest.common import tempest_fixtures as fixtures
CONF = config.CONF
class BgpSpeakerTestJSONBase(base.BaseAdminNetworkTest):
default_bgp_speaker_args = {'local_as': '1234',
'ip_version': 4,
'name': 'my-bgp-speaker',
'advertise_floating_ip_host_routes': True,
'advertise_tenant_networks': True}
default_bgp_peer_args = {'remote_as': '4321',
'name': 'my-bgp-peer',
'peer_ip': '192.168.1.1',
'auth_type': 'md5', 'password': 'my-secret'}
@classmethod
@test.requires_ext(extension="bgp_speaker", service="network")
def resource_setup(cls):
super(BgpSpeakerTestJSONBase, cls).resource_setup()
cls.admin_routerports = []
cls.admin_floatingips = []
cls.admin_routers = []
cls.ext_net_id = CONF.network.public_network_id
@classmethod
def resource_cleanup(cls):
for floatingip in cls.admin_floatingips:
cls._try_delete_resource(cls.admin_client.delete_floatingip,
floatingip['id'])
for routerport in cls.admin_routerports:
cls._try_delete_resource(
cls.admin_client.remove_router_interface_with_subnet_id,
routerport['router_id'], routerport['subnet_id'])
for router in cls.admin_routers:
cls._try_delete_resource(cls.admin_client.delete_router,
router['id'])
super(BgpSpeakerTestJSONBase, cls).resource_cleanup()
def create_bgp_speaker(self, auto_delete=True, **args):
data = {'bgp_speaker': args}
bgp_speaker = self.admin_client.create_bgp_speaker(data)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
if auto_delete:
self.addCleanup(self.delete_bgp_speaker, bgp_speaker_id)
return bgp_speaker
def create_bgp_peer(self, **args):
bgp_peer = self.admin_client.create_bgp_peer({'bgp_peer': args})
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.addCleanup(self.delete_bgp_peer, bgp_peer_id)
return bgp_peer
def update_bgp_speaker(self, id, **args):
data = {'bgp_speaker': args}
return self.admin_client.update_bgp_speaker(id, data)
def delete_bgp_speaker(self, id):
return self.admin_client.delete_bgp_speaker(id)
def get_bgp_speaker(self, id):
return self.admin_client.get_bgp_speaker(id)
def create_bgp_speaker_and_peer(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
return (bgp_speaker, bgp_peer)
def delete_bgp_peer(self, id):
return self.admin_client.delete_bgp_peer(id)
def add_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.add_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def remove_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.remove_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def delete_address_scope(self, id):
return self.admin_client.delete_address_scope(id)
class BgpSpeakerTestJSON(BgpSpeakerTestJSONBase):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
Create bgp-speaker
Delete bgp-speaker
Create bgp-peer
Update bgp-peer
Delete bgp-peer
"""
@test.idempotent_id('df259771-7104-4ffa-b77f-bd183600d7f9')
def test_delete_bgp_speaker(self):
bgp_speaker = self.create_bgp_speaker(auto_delete=False,
**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.delete_bgp_speaker(bgp_speaker_id)
self.assertRaises(lib_exc.NotFound,
self.get_bgp_speaker,
bgp_speaker_id)
@test.idempotent_id('81d9dc45-19f8-4c6e-88b8-401d965cd1b0')
def test_create_bgp_peer(self):
self.create_bgp_peer(**self.default_bgp_peer_args)
@test.idempotent_id('6ade0319-1ee2-493c-ac4b-5eb230ff3a77')
def test_add_bgp_peer(self):
bgp_speaker, bgp_peer = self.create_bgp_speaker_and_peer()
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertEqual(1, len(bgp_peers_list))
self.assertTrue(bgp_peer_id in bgp_peers_list)
@test.idempotent_id('f9737708-1d79-440b-8350-779f97d882ee')
def test_remove_bgp_peer(self):
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
bgp_peer_id = bgp_peer['bgp-peer']['id']
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(bgp_peer_id in bgp_peers_list)
bgp_speaker = self.remove_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(not bgp_peers_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('23c8eb37-d10d-4f43-b2e7-6542cb6a4405')
def test_add_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertEqual(1, len(network_list))
self.assertTrue(self.ext_net_id in network_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0')
def test_remove_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
networks = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(self.ext_net_id in networks)
self.admin_client.remove_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(not network_list)
@testtools.skip('bug/1553374')
@test.idempotent_id('5bef22ad-5e70-4f7b-937a-dc1944642996')
def test_get_advertised_routes_null_address_scope(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(0, len(routes['advertised_routes']))
@testtools.skip('bug/1553374')
@test.idempotent_id('cae9cdb1-ad65-423c-9604-d4cd0073616e')
def test_get_advertised_routes_floating_ips(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
tenant_net = self.create_network()
tenant_subnet = self.create_subnet(tenant_net)
ext_gw_info = {'network_id': self.ext_net_id}
router = self.admin_client.create_router(
'my-router',
external_gateway_info=ext_gw_info,
admin_state_up=True,
distributed=False)
self.admin_routers.append(router['router'])
self.admin_client.add_router_interface_with_subnet_id(
router['router']['id'],
tenant_subnet['id'])
self.admin_routerports.append({'router_id': router['router']['id'],
'subnet_id': tenant_subnet['id']})
tenant_port = self.create_port(tenant_net)
floatingip = self.create_floatingip(self.ext_net_id)
self.admin_floatingips.append(floatingip)
self.client.update_floatingip(floatingip['id'],
port_id=tenant_port['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(floatingip['floating_ip_address'] + '/32',
routes['advertised_routes'][0]['destination'])
@testtools.skip('bug/1553374')
@test.idempotent_id('c9ad566e-fe8f-4559-8303-bbad9062a30c')
def test_get_advertised_routes_tenant_networks(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
addr_scope = self.create_address_scope('my-scope', ip_version=4)
ext_net = self.create_shared_network(**{'router:external': True})
tenant_net = self.create_network()
ext_subnetpool = self.create_subnetpool(
'test-pool-ext',
is_admin=True,
default_prefixlen=24,
address_scope_id=addr_scope['id'],
prefixes=['8.0.0.0/8'])
tenant_subnetpool = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope['id'],
prefixes=['10.10.0.0/16'])
self.create_subnet({'id': ext_net['id']},
cidr=netaddr.IPNetwork('8.0.0.0/24'),
ip_version=4,
client=self.admin_client,
subnetpool_id=ext_subnetpool['id'])
tenant_subnet = self.create_subnet(
{'id': tenant_net['id']},
cidr=netaddr.IPNetwork('10.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool['id'])
ext_gw_info = {'network_id': ext_net['id']}
router = self.admin_client.create_router(
'my-router',
external_gateway_info=ext_gw_info,
distributed=False)['router']
self.admin_routers.append(router)
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet['id']})
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
ext_net['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(tenant_subnet['cidr'],
routes['advertised_routes'][0]['destination'])
fixed_ip = router['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(fixed_ip['ip_address'],
routes['advertised_routes'][0]['next_hop'])

View File

@ -1,120 +0,0 @@
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.lib import exceptions as lib_exc
from neutron.tests.api import test_bgp_speaker_extensions as test_base
from tempest import test
class BgpSpeakerTestJSONNegative(test_base.BgpSpeakerTestJSONBase):
"""Negative test cases asserting proper behavior of BGP API extension"""
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('75e9ee2f-6efd-4320-bff7-ae24741c8b06')
def test_create_bgp_speaker_illegal_local_asn(self):
self.assertRaises(lib_exc.BadRequest,
self.create_bgp_speaker,
local_as='65537')
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('6742ec2e-382a-4453-8791-13a19b47cd13')
def test_create_bgp_speaker_non_admin(self):
self.assertRaises(lib_exc.Forbidden,
self.client.create_bgp_speaker,
{'bgp_speaker': self.default_bgp_speaker_args})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('33f7aaf0-9786-478b-b2d1-a51086a50eb4')
def test_create_bgp_peer_non_admin(self):
self.assertRaises(lib_exc.Forbidden,
self.client.create_bgp_peer,
{'bgp_peer': self.default_bgp_peer_args})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('39435932-0266-4358-899b-0e9b1e53c3e9')
def test_update_bgp_speaker_local_asn(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.assertRaises(lib_exc.BadRequest, self.update_bgp_speaker,
bgp_speaker_id, local_as='4321')
@test.idempotent_id('9cc33701-51e5-421f-a5d5-fd7b330e550f')
def test_get_advertised_routes_tenant_networks(self):
addr_scope1 = self.create_address_scope('my-scope1', ip_version=4)
addr_scope2 = self.create_address_scope('my-scope2', ip_version=4)
ext_net = self.create_shared_network(**{'router:external': True})
tenant_net1 = self.create_network()
tenant_net2 = self.create_network()
ext_subnetpool = self.create_subnetpool(
'test-pool-ext',
is_admin=True,
default_prefixlen=24,
address_scope_id=addr_scope1['id'],
prefixes=['8.0.0.0/8'])
tenant_subnetpool1 = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope1['id'],
prefixes=['10.10.0.0/16'])
tenant_subnetpool2 = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope2['id'],
prefixes=['11.10.0.0/16'])
self.create_subnet({'id': ext_net['id']},
cidr=netaddr.IPNetwork('8.0.0.0/24'),
ip_version=4,
client=self.admin_client,
subnetpool_id=ext_subnetpool['id'])
tenant_subnet1 = self.create_subnet(
{'id': tenant_net1['id']},
cidr=netaddr.IPNetwork('10.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool1['id'])
tenant_subnet2 = self.create_subnet(
{'id': tenant_net2['id']},
cidr=netaddr.IPNetwork('11.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool2['id'])
ext_gw_info = {'network_id': ext_net['id']}
router = self.admin_client.create_router(
'my-router',
distributed=False,
external_gateway_info=ext_gw_info)['router']
self.admin_routers.append(router)
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet1['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet1['id']})
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet2['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet2['id']})
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
ext_net['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(tenant_subnet1['cidr'],
routes['advertised_routes'][0]['destination'])
fixed_ip = router['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(fixed_ip['ip_address'],
routes['advertised_routes'][0]['next_hop'])

View File

@ -1,197 +0,0 @@
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
from oslo_utils import timeutils
import six
import testtools
import neutron
from neutron.common import constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.services.bgp.common import constants as bgp_const
HOST = 'localhost'
DEFAULT_AZ = 'nova'
def find_file(filename, path):
"""Find a file with name 'filename' located in 'path'."""
for root, _, files in os.walk(path):
if filename in files:
return os.path.abspath(os.path.join(root, filename))
def find_sample_file(filename):
"""Find a file with name 'filename' located in the sample directory."""
return find_file(
filename,
path=os.path.join(neutron.__path__[0], '..', 'etc'))
class FakePlugin(common_db_mixin.CommonDbMixin,
agents_db.AgentDbMixin):
pass
def _get_l3_agent_dict(host, agent_mode, internal_only=True,
ext_net_id='', ext_bridge='', router_id=None,
az=DEFAULT_AZ):
return {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'availability_zone': az,
'configurations': {'agent_mode': agent_mode,
'handle_internal_only_routers': internal_only,
'external_network_bridge': ext_bridge,
'gateway_external_network_id': ext_net_id,
'router_id': router_id}}
def _register_agent(agent):
plugin = FakePlugin()
admin_context = context.get_admin_context()
plugin.create_or_update_agent(admin_context, agent)
return plugin._get_agent_by_type_and_host(
admin_context, agent['agent_type'], agent['host'])
def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY,
internal_only=True, ext_net_id='', ext_bridge='',
router_id=None, az=DEFAULT_AZ):
agent = _get_l3_agent_dict(host, agent_mode, internal_only, ext_net_id,
ext_bridge, router_id, az)
return _register_agent(agent)
def _get_dhcp_agent_dict(host, networks=0, az=DEFAULT_AZ):
agent = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'agent_type': constants.AGENT_TYPE_DHCP,
'availability_zone': az,
'configurations': {'dhcp_driver': 'dhcp_driver',
'networks': networks}}
return agent
def register_dhcp_agent(host=HOST, networks=0, admin_state_up=True,
alive=True, az=DEFAULT_AZ):
agent = _register_agent(
_get_dhcp_agent_dict(host, networks, az=az))
if not admin_state_up:
set_agent_admin_state(agent['id'])
if not alive:
kill_agent(agent['id'])
return FakePlugin()._get_agent_by_type_and_host(
context.get_admin_context(), agent['agent_type'], agent['host'])
def _get_bgp_dragent_dict(host):
agent = {
'binary': 'neutron-bgp-dragent',
'host': host,
'topic': 'q-bgp_dragent',
'agent_type': bgp_const.AGENT_TYPE_BGP_ROUTING,
'configurations': {'bgp_speakers': 1}}
return agent
def register_bgp_dragent(host=HOST, admin_state_up=True,
alive=True):
agent = _register_agent(
_get_bgp_dragent_dict(host))
if not admin_state_up:
set_agent_admin_state(agent['id'])
if not alive:
kill_agent(agent['id'])
return FakePlugin()._get_agent_by_type_and_host(
context.get_admin_context(), agent['agent_type'], agent['host'])
def kill_agent(agent_id):
hour_ago = timeutils.utcnow() - datetime.timedelta(hours=1)
FakePlugin().update_agent(
context.get_admin_context(),
agent_id,
{'agent': {
'started_at': hour_ago,
'heartbeat_timestamp': hour_ago}})
def revive_agent(agent_id):
now = timeutils.utcnow()
FakePlugin().update_agent(
context.get_admin_context(), agent_id,
{'agent': {'started_at': now, 'heartbeat_timestamp': now}})
def set_agent_admin_state(agent_id, admin_state_up=False):
FakePlugin().update_agent(
context.get_admin_context(),
agent_id,
{'agent': {'admin_state_up': admin_state_up}})
def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip='20.0.0.1', interface_mappings=None,
bridge_mappings=None, l2pop_network_types=None):
agent = {
'binary': binary,
'host': host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': tunneling_ip,
'tunnel_types': tunnel_types},
'agent_type': agent_type,
'tunnel_type': [],
'start_flag': True}
if bridge_mappings is not None:
agent['configurations']['bridge_mappings'] = bridge_mappings
if interface_mappings is not None:
agent['configurations']['interface_mappings'] = interface_mappings
if l2pop_network_types is not None:
agent['configurations']['l2pop_network_types'] = l2pop_network_types
return agent
def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS,
binary='neutron-openvswitch-agent',
tunnel_types=['vxlan'], tunneling_ip='20.0.0.1',
interface_mappings=None, bridge_mappings=None,
l2pop_network_types=None):
agent = _get_ovs_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip, interface_mappings,
bridge_mappings, l2pop_network_types)
return _register_agent(agent)
def requires_py2(testcase):
return testtools.skipUnless(six.PY2, "requires python 2.x")(testcase)
def requires_py3(testcase):
return testtools.skipUnless(six.PY3, "requires python 3.x")(testcase)

View File

@ -1,238 +0,0 @@
{
"context_is_admin": "role:admin",
"owner": "tenant_id:%(tenant_id)s",
"admin_or_owner": "rule:context_is_admin or rule:owner",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"shared_firewall_policies": "field:firewall_policies:shared=True",
"shared_subnetpools": "field:subnetpools:shared=True",
"shared_address_scopes": "field:address_scopes:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_subnetpool": "",
"create_subnetpool:shared": "rule:admin_only",
"create_subnetpool:is_default": "rule:admin_only",
"get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
"update_subnetpool": "rule:admin_or_owner",
"update_subnetpool:is_default": "rule:admin_only",
"delete_subnetpool": "rule:admin_or_owner",
"create_address_scope": "",
"create_address_scope:shared": "rule:admin_only",
"get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
"update_address_scope": "rule:admin_or_owner",
"update_address_scope:shared": "rule:admin_only",
"delete_address_scope": "rule:admin_or_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"get_network_ip_availabilities": "rule:admin_only",
"get_network_ip_availability": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:is_default": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
"get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
"delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"insert_rule": "rule:admin_or_owner",
"remove_rule": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"get_agent-loadbalancers": "rule:admin_only",
"get_loadbalancer-hosting-agent": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only",
"create_flavor": "rule:admin_only",
"update_flavor": "rule:admin_only",
"delete_flavor": "rule:admin_only",
"get_flavors": "rule:regular_user",
"get_flavor": "rule:regular_user",
"create_service_profile": "rule:admin_only",
"update_service_profile": "rule:admin_only",
"delete_service_profile": "rule:admin_only",
"get_service_profiles": "rule:admin_only",
"get_service_profile": "rule:admin_only",
"get_policy": "rule:regular_user",
"create_policy": "rule:admin_only",
"update_policy": "rule:admin_only",
"delete_policy": "rule:admin_only",
"get_policy_bandwidth_limit_rule": "rule:regular_user",
"create_policy_bandwidth_limit_rule": "rule:admin_only",
"delete_policy_bandwidth_limit_rule": "rule:admin_only",
"update_policy_bandwidth_limit_rule": "rule:admin_only",
"get_policy_dscp_marking_rule": "rule:regular_user",
"create_policy_dscp_marking_rule": "rule:admin_only",
"delete_policy_dscp_marking_rule": "rule:admin_only",
"update_policy_dscp_marking_rule": "rule:admin_only",
"get_rule_type": "rule:regular_user",
"restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
"create_rbac_policy": "",
"create_rbac_policy:target_tenant": "rule:restrict_wildcard",
"update_rbac_policy": "rule:admin_or_owner",
"update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
"get_rbac_policy": "rule:admin_or_owner",
"delete_rbac_policy": "rule:admin_or_owner",
"create_flavor_service_profile": "rule:admin_only",
"delete_flavor_service_profile": "rule:admin_only",
"get_flavor_service_profile": "rule:regular_user",
"get_auto_allocated_topology": "rule:admin_or_owner",
"get_bgp_speaker": "rule:admin_only",
"create_bgp_speaker": "rule:admin_only",
"update_bgp_speaker": "rule:admin_only",
"delete_bgp_speaker": "rule:admin_only",
"get_bgp_peer": "rule:admin_only",
"create_bgp_peer": "rule:admin_only",
"update_bgp_peer": "rule:admin_only",
"delete_bgp_peer": "rule:admin_only",
"add_bgp_peer": "rule:admin_only",
"remove_bgp_peer": "rule:admin_only",
"add_gateway_network": "rule:admin_only",
"remove_gateway_network": "rule:admin_only",
"get_advertised_routes":"rule:admin_only",
"add_bgp_speaker_to_dragent": "rule:admin_only",
"remove_bgp_speaker_from_dragent": "rule:admin_only",
"list_bgp_speaker_on_dragent": "rule:admin_only",
"list_dragent_hosting_bgp_speaker": "rule:admin_only"
}

View File

@ -1,208 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from neutron import context
from neutron.db import agents_db
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron.db import common_db_mixin
from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class TestAutoSchedule(testlib_api.SqlTestCase,
bgp_dras_db.BgpDrAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin):
"""Test various scenarios for schedule_unscheduled_bgp_speakers.
Below is the brief description of the scenario variables
--------------------------------------------------------
host_count
number of hosts.
agent_count
number of BGP dynamic routing agents.
down_agent_count
number of DRAgents which are inactive.
bgp_speaker_count
Number of bgp_speakers.
hosted_bgp_speakers
A mapping of agent id to the ids of the bgp_speakers that they
should be initially hosting.
expected_schedule_return_value
Expected return value of 'schedule_unscheduled_bgp_speakers'.
expected_hosted_bgp_speakers
This stores the expected bgp_speakers that should have been
scheduled (or that could have already been scheduled) for each
agent after the 'schedule_unscheduled_bgp_speakers' function is
called.
"""
scenarios = [
('No BgpDrAgent scheduled, if no DRAgent is present',
dict(host_count=1,
agent_count=0,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=False)),
('No BgpDrAgent scheduled, if no BGP speaker are present',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=0,
hosted_bgp_speakers={},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': []})),
('No BgpDrAgent scheduled, if BGP speaker already hosted',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})),
('BgpDrAgent scheduled to the speaker, if the speaker is not hosted',
dict(host_count=1,
agent_count=1,
down_agent_count=0,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=True,
expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})),
('No BgpDrAgent scheduled, if all the agents are down',
dict(host_count=2,
agent_count=2,
down_agent_count=2,
bgp_speaker_count=1,
hosted_bgp_speakers={},
expected_schedule_return_value=False,
expected_hosted_bgp_speakers={'agent-0': [],
'agent-1': [], })),
]
def _strip_host_index(self, name):
"""Strips the host index.
Eg. if name = '2-agent-3', then 'agent-3' is returned.
"""
return name[name.find('-') + 1:]
def _extract_index(self, name):
"""Extracts the index number and returns.
Eg. if name = '2-agent-3', then 3 is returned
"""
return int(name.split('-')[-1])
def _get_hosted_bgp_speakers_on_dragent(self, agent_id):
query = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding.bgp_speaker_id)
query = query.filter(
bgp_dras_db.BgpSpeakerDrAgentBinding.agent_id ==
agent_id)
return [item[0] for item in query]
def _create_and_set_agents_down(self, hosts, agent_count=0,
down_agent_count=0, admin_state_up=True):
agents = []
if agent_count:
for i, host in enumerate(hosts):
is_alive = i >= down_agent_count
agents.append(helpers.register_bgp_dragent(
host,
admin_state_up=admin_state_up,
alive=is_alive))
return agents
def _save_bgp_speakers(self, bgp_speakers):
cls = bgp_db.BgpDbMixin()
bgp_speaker_body = {
'bgp_speaker': {'name': 'fake_bgp_speaker',
'ip_version': '4',
'local_as': '123',
'advertise_floating_ip_host_routes': '0',
'advertise_tenant_networks': '0',
'peers': [],
'networks': []}}
i = 1
for bgp_speaker_id in bgp_speakers:
bgp_speaker_body['bgp_speaker']['local_as'] = i
cls._save_bgp_speaker(self.ctx, bgp_speaker_body,
uuid=bgp_speaker_id)
i = i + 1
def _test_auto_schedule(self, host_index):
scheduler = bgp_dras.ChanceScheduler()
self.ctx = context.get_admin_context()
msg = 'host_index = %s' % host_index
# create hosts
hosts = ['%s-agent-%s' % (host_index, i)
for i in range(self.host_count)]
bgp_dragents = self._create_and_set_agents_down(hosts,
self.agent_count,
self.down_agent_count)
# create bgp_speakers
self._bgp_speakers = ['%s-bgp-speaker-%s' % (host_index, i)
for i in range(self.bgp_speaker_count)]
self._save_bgp_speakers(self._bgp_speakers)
# pre schedule the bgp_speakers to the agents defined in
# self.hosted_bgp_speakers before calling auto_schedule_bgp_speaker
for agent, bgp_speakers in self.hosted_bgp_speakers.items():
agent_index = self._extract_index(agent)
for bgp_speaker in bgp_speakers:
bs_index = self._extract_index(bgp_speaker)
scheduler.bind(self.ctx, [bgp_dragents[agent_index]],
self._bgp_speakers[bs_index])
retval = scheduler.schedule_unscheduled_bgp_speakers(self.ctx,
hosts[host_index])
self.assertEqual(self.expected_schedule_return_value, retval,
message=msg)
if self.agent_count:
agent_id = bgp_dragents[host_index].id
hosted_bgp_speakers = self._get_hosted_bgp_speakers_on_dragent(
agent_id)
hosted_bs_ids = [self._strip_host_index(net)
for net in hosted_bgp_speakers]
expected_hosted_bgp_speakers = self.expected_hosted_bgp_speakers[
'agent-%s' % host_index]
self.assertItemsEqual(hosted_bs_ids, expected_hosted_bgp_speakers,
msg)
def test_auto_schedule(self):
for i in range(self.host_count):
self._test_auto_schedule(i)

View File

@ -1,758 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urlparse
from tempest.lib.common import rest_client as service_client
from tempest.lib import exceptions as lib_exc
from neutron.tests.tempest import exceptions
class NetworkClientJSON(service_client.RestClient):
"""
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
Implements create, delete, update, list and show for the basic Neutron
abstractions (networks, sub-networks, routers, ports and floating IP):
Implements add/remove interface to router using subnet ID / port ID
It also implements list, show, update and reset for OpenStack Networking
quotas
"""
version = '2.0'
uri_prefix = "v2.0"
def get_uri(self, plural_name):
# get service prefix from resource name
# The following list represents resource names that do not require
# changing underscore to a hyphen
hyphen_exceptions = ["service_profiles"]
# the following map is used to construct proper URI
# for the given neutron resource
service_resource_prefix_map = {
'bgp-peers': '',
'bgp-speakers': '',
'networks': '',
'subnets': '',
'subnetpools': '',
'ports': '',
'metering_labels': 'metering',
'metering_label_rules': 'metering',
'policies': 'qos',
'bandwidth_limit_rules': 'qos',
'rule_types': 'qos',
'rbac-policies': '',
}
service_prefix = service_resource_prefix_map.get(
plural_name)
if plural_name not in hyphen_exceptions:
plural_name = plural_name.replace("_", "-")
if service_prefix:
uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
plural_name)
else:
uri = '%s/%s' % (self.uri_prefix, plural_name)
return uri
def pluralize(self, resource_name):
# get plural from map or just add 's'
# map from resource name to a plural name
# needed only for those which can't be constructed as name + 's'
resource_plural_map = {
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
'quotas': 'quotas',
'qos_policy': 'policies',
'rbac_policy': 'rbac_policies',
}
return resource_plural_map.get(resource_name, resource_name + 's')
def _lister(self, plural_name):
def _list(**filters):
uri = self.get_uri(plural_name)
if filters:
uri += '?' + urlparse.urlencode(filters, doseq=1)
resp, body = self.get(uri)
result = {plural_name: self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, result)
return _list
def _deleter(self, resource_name):
def _delete(resource_id):
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
return _delete
def _shower(self, resource_name):
def _show(resource_id, **fields):
# fields is a dict which key is 'fields' and value is a
# list of field's name. An example:
# {'fields': ['id', 'name']}
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
if fields:
uri += '?' + urlparse.urlencode(fields, doseq=1)
resp, body = self.get(uri)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
return _show
def _creater(self, resource_name):
def _create(**kwargs):
plural = self.pluralize(resource_name)
uri = self.get_uri(plural)
post_data = self.serialize({resource_name: kwargs})
resp, body = self.post(uri, post_data)
body = self.deserialize_single(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
return _create
def _updater(self, resource_name):
def _update(res_id, **kwargs):
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), res_id)
post_data = self.serialize({resource_name: kwargs})
resp, body = self.put(uri, post_data)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
return _update
def __getattr__(self, name):
method_prefixes = ["list_", "delete_", "show_", "create_", "update_"]
method_functors = [self._lister,
self._deleter,
self._shower,
self._creater,
self._updater]
for index, prefix in enumerate(method_prefixes):
prefix_len = len(prefix)
if name[:prefix_len] == prefix:
return method_functors[index](name[prefix_len:])
raise AttributeError(name)
# Subnetpool methods
def create_subnetpool(self, name, **kwargs):
subnetpool_data = {'name': name}
for arg in kwargs:
subnetpool_data[arg] = kwargs[arg]
post_data = {'subnetpool': subnetpool_data}
body = self.serialize_list(post_data, "subnetpools", "subnetpool")
uri = self.get_uri("subnetpools")
resp, body = self.post(uri, body)
body = {'subnetpool': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def get_subnetpool(self, id):
uri = self.get_uri("subnetpools")
subnetpool_uri = '%s/%s' % (uri, id)
resp, body = self.get(subnetpool_uri)
body = {'subnetpool': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def delete_subnetpool(self, id):
uri = self.get_uri("subnetpools")
subnetpool_uri = '%s/%s' % (uri, id)
resp, body = self.delete(subnetpool_uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_subnetpools(self, **filters):
uri = self.get_uri("subnetpools")
if filters:
uri = '?'.join([uri, urlparse.urlencode(filters)])
resp, body = self.get(uri)
body = {'subnetpools': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def update_subnetpool(self, id, **kwargs):
subnetpool_data = {}
for arg in kwargs:
subnetpool_data[arg] = kwargs[arg]
post_data = {'subnetpool': subnetpool_data}
body = self.serialize_list(post_data, "subnetpools", "subnetpool")
uri = self.get_uri("subnetpools")
subnetpool_uri = '%s/%s' % (uri, id)
resp, body = self.put(subnetpool_uri, body)
body = {'subnetpool': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
# BGP speaker methods
def create_bgp_speaker(self, post_data):
body = self.serialize_list(post_data, "bgp-speakers", "bgp-speaker")
uri = self.get_uri("bgp-speakers")
resp, body = self.post(uri, body)
body = {'bgp-speaker': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def get_bgp_speaker(self, id):
uri = self.get_uri("bgp-speakers")
bgp_speaker_uri = '%s/%s' % (uri, id)
resp, body = self.get(bgp_speaker_uri)
body = {'bgp-speaker': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def get_bgp_speakers(self):
uri = self.get_uri("bgp-speakers")
resp, body = self.get(uri)
body = {'bgp-speakers': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def update_bgp_speaker(self, id, put_data):
body = self.serialize_list(put_data, "bgp-speakers", "bgp-speaker")
uri = self.get_uri("bgp-speakers")
bgp_speaker_uri = '%s/%s' % (uri, id)
resp, body = self.put(bgp_speaker_uri, body)
body = {'bgp-speaker': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def delete_bgp_speaker(self, id):
uri = self.get_uri("bgp-speakers")
bgp_speaker_uri = '%s/%s' % (uri, id)
resp, body = self.delete(bgp_speaker_uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_bgp_peer(self, post_data):
body = self.serialize_list(post_data, "bgp-peers", "bgp-peer")
uri = self.get_uri("bgp-peers")
resp, body = self.post(uri, body)
body = {'bgp-peer': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def get_bgp_peer(self, id):
uri = self.get_uri("bgp-peers")
bgp_speaker_uri = '%s/%s' % (uri, id)
resp, body = self.get(bgp_speaker_uri)
body = {'bgp-peer': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def delete_bgp_peer(self, id):
uri = self.get_uri("bgp-peers")
bgp_speaker_uri = '%s/%s' % (uri, id)
resp, body = self.delete(bgp_speaker_uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def add_bgp_peer_with_id(self, bgp_speaker_id, bgp_peer_id):
uri = '%s/bgp-speakers/%s/add_bgp_peer' % (self.uri_prefix,
bgp_speaker_id)
update_body = {"bgp_peer_id": bgp_peer_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_bgp_peer_with_id(self, bgp_speaker_id, bgp_peer_id):
uri = '%s/bgp-speakers/%s/remove_bgp_peer' % (self.uri_prefix,
bgp_speaker_id)
update_body = {"bgp_peer_id": bgp_peer_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_bgp_gateway_network(self, bgp_speaker_id, network_id):
uri = '%s/bgp-speakers/%s/add_gateway_network' % (self.uri_prefix,
bgp_speaker_id)
update_body = {"network_id": network_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_bgp_gateway_network(self, bgp_speaker_id, network_id):
uri = '%s/bgp-speakers/%s/remove_gateway_network'
uri = uri % (self.uri_prefix, bgp_speaker_id)
update_body = {"network_id": network_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_bgp_advertised_routes(self, bgp_speaker_id):
base_uri = '%s/bgp-speakers/%s/get_advertised_routes'
uri = base_uri % (self.uri_prefix, bgp_speaker_id)
resp, body = self.get(uri)
body = {'advertised_routes': self.deserialize_list(body)}
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def get_bgp_router_routes(self, router_id):
base_uri = '%s/router-routes/%s'
uri = base_uri % (self.uri_prefix, router_id)
resp, body = self.get(uri)
body = self.deserialize_list(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
# Common methods that are hard to automate
def create_bulk_network(self, names, shared=False):
network_list = [{'name': name, 'shared': shared} for name in names]
post_data = {'networks': network_list}
body = self.serialize_list(post_data, "networks", "network")
uri = self.get_uri("networks")
resp, body = self.post(uri, body)
body = {'networks': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def create_bulk_subnet(self, subnet_list):
post_data = {'subnets': subnet_list}
body = self.serialize_list(post_data, 'subnets', 'subnet')
uri = self.get_uri('subnets')
resp, body = self.post(uri, body)
body = {'subnets': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def create_bulk_port(self, port_list):
post_data = {'ports': port_list}
body = self.serialize_list(post_data, 'ports', 'port')
uri = self.get_uri('ports')
resp, body = self.post(uri, body)
body = {'ports': self.deserialize_list(body)}
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def wait_for_resource_deletion(self, resource_type, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(resource_type, id):
return
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def is_resource_deleted(self, resource_type, id):
method = 'show_' + resource_type
try:
getattr(self, method)(id)
except AttributeError:
raise Exception("Unknown resource type %s " % resource_type)
except lib_exc.NotFound:
return True
return False
def deserialize_single(self, body):
return json.loads(body)
def deserialize_list(self, body):
res = json.loads(body)
# expecting response in form
# {'resources': [ res1, res2] } => when pagination disabled
# {'resources': [..], 'resources_links': {}} => if pagination enabled
for k in res.keys():
if k.endswith("_links"):
continue
return res[k]
def serialize(self, data):
return json.dumps(data)
def serialize_list(self, data, root=None, item=None):
return self.serialize(data)
def update_quotas(self, tenant_id, **kwargs):
put_body = {'quota': kwargs}
body = json.dumps(put_body)
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['quota'])
def reset_quotas(self, tenant_id):
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_router(self, name, admin_state_up=True, **kwargs):
post_body = {'router': kwargs}
post_body['router']['name'] = name
post_body['router']['admin_state_up'] = admin_state_up
body = json.dumps(post_body)
uri = '%s/routers' % (self.uri_prefix)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def _update_router(self, router_id, set_enable_snat, **kwargs):
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
update_body = {}
update_body['name'] = kwargs.get('name', body['router']['name'])
update_body['admin_state_up'] = kwargs.get(
'admin_state_up', body['router']['admin_state_up'])
if 'description' in kwargs:
update_body['description'] = kwargs['description']
cur_gw_info = body['router']['external_gateway_info']
if cur_gw_info:
# TODO(kevinbenton): setting the external gateway info is not
# allowed for a regular tenant. If the ability to update is also
# merged, a test case for this will need to be added similar to
# the SNAT case.
cur_gw_info.pop('external_fixed_ips', None)
if not set_enable_snat:
cur_gw_info.pop('enable_snat', None)
update_body['external_gateway_info'] = kwargs.get(
'external_gateway_info', body['router']['external_gateway_info'])
if 'distributed' in kwargs:
update_body['distributed'] = kwargs['distributed']
update_body = dict(router=update_body)
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_router(self, router_id, **kwargs):
"""Update a router leaving enable_snat to its default value."""
# If external_gateway_info contains enable_snat the request will fail
# with 404 unless executed with admin client, and therefore we instruct
# _update_router to not set this attribute
# NOTE(salv-orlando): The above applies as long as Neutron's default
# policy is to restrict enable_snat usage to admins only.
return self._update_router(router_id, set_enable_snat=False, **kwargs)
def update_router_with_snat_gw_info(self, router_id, **kwargs):
"""Update a router passing also the enable_snat attribute.
This method must be execute with admin credentials, otherwise the API
call will return a 404 error.
"""
return self._update_router(router_id, set_enable_snat=True, **kwargs)
def add_router_interface_with_subnet_id(self, router_id, subnet_id):
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
router_id)
update_body = {"subnet_id": subnet_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_router_interface_with_port_id(self, router_id, port_id):
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
router_id)
update_body = {"port_id": port_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
router_id)
update_body = {"subnet_id": subnet_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_router_interface_with_port_id(self, router_id, port_id):
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
router_id)
update_body = {"port_id": port_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_agent(self, agent_id, agent_info):
"""
:param agent_info: Agent update information.
E.g {"admin_state_up": True}
"""
uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
agent = {"agent": agent_info}
body = json.dumps(agent)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_routers_on_l3_agent(self, agent_id):
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_l3_agents_hosting_router(self, router_id):
uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_router_to_l3_agent(self, agent_id, router_id):
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
post_body = {"router_id": router_id}
body = json.dumps(post_body)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_router_from_l3_agent(self, agent_id, router_id):
uri = '%s/agents/%s/l3-routers/%s' % (
self.uri_prefix, agent_id, router_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_dhcp_agent_hosting_network(self, network_id):
uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def remove_network_from_dhcp_agent(self, agent_id, network_id):
uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
network_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def update_extra_routes(self, router_id, nexthop, destination):
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
put_body = {
'router': {
'routes': [{'nexthop': nexthop,
"destination": destination}]
}
}
body = json.dumps(put_body)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_extra_routes(self, router_id):
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
null_routes = None
put_body = {
'router': {
'routes': null_routes
}
}
body = json.dumps(put_body)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_dhcp_agent_to_network(self, agent_id, network_id):
post_body = {'network_id': network_id}
body = json.dumps(post_body)
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_qos_policies(self, **filters):
if filters:
uri = '%s/qos/policies?%s' % (self.uri_prefix,
urlparse.urlencode(filters))
else:
uri = '%s/qos/policies' % self.uri_prefix
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def create_qos_policy(self, name, description, shared, tenant_id=None):
uri = '%s/qos/policies' % self.uri_prefix
post_data = {'policy': {
'name': name,
'description': description,
'shared': shared
}}
if tenant_id is not None:
post_data['policy']['tenant_id'] = tenant_id
resp, body = self.post(uri, self.serialize(post_data))
body = self.deserialize_single(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def update_qos_policy(self, policy_id, **kwargs):
uri = '%s/qos/policies/%s' % (self.uri_prefix, policy_id)
post_data = self.serialize({'policy': kwargs})
resp, body = self.put(uri, post_data)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def create_bandwidth_limit_rule(self, policy_id, max_kbps, max_burst_kbps):
uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
self.uri_prefix, policy_id)
post_data = self.serialize(
{'bandwidth_limit_rule': {
'max_kbps': max_kbps,
'max_burst_kbps': max_burst_kbps}
})
resp, body = self.post(uri, post_data)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_bandwidth_limit_rules(self, policy_id):
uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
self.uri_prefix, policy_id)
resp, body = self.get(uri)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def show_bandwidth_limit_rule(self, policy_id, rule_id):
uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
resp, body = self.get(uri)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def update_bandwidth_limit_rule(self, policy_id, rule_id, **kwargs):
uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
post_data = {'bandwidth_limit_rule': kwargs}
resp, body = self.put(uri, json.dumps(post_data))
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def delete_bandwidth_limit_rule(self, policy_id, rule_id):
uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def create_dscp_marking_rule(self, policy_id, dscp_mark):
uri = '%s/qos/policies/%s/dscp_marking_rules' % (
self.uri_prefix, policy_id)
post_data = self.serialize(
{'dscp_marking_rule': {
'dscp_mark': dscp_mark}
})
resp, body = self.post(uri, post_data)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_dscp_marking_rules(self, policy_id):
uri = '%s/qos/policies/%s/dscp_marking_rules' % (
self.uri_prefix, policy_id)
resp, body = self.get(uri)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def show_dscp_marking_rule(self, policy_id, rule_id):
uri = '%s/qos/policies/%s/dscp_marking_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
resp, body = self.get(uri)
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def update_dscp_marking_rule(self, policy_id, rule_id, **kwargs):
uri = '%s/qos/policies/%s/dscp_marking_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
post_data = {'dscp_marking_rule': kwargs}
resp, body = self.put(uri, json.dumps(post_data))
body = self.deserialize_single(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def delete_dscp_marking_rule(self, policy_id, rule_id):
uri = '%s/qos/policies/%s/dscp_marking_rules/%s' % (
self.uri_prefix, policy_id, rule_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_qos_rule_types(self):
uri = '%s/qos/rule-types' % self.uri_prefix
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_auto_allocated_topology(self, tenant_id=None):
uri = '%s/auto-allocated-topology/%s' % (self.uri_prefix, tenant_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)

View File

@ -1,83 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.api.rpc.agentnotifiers import bgp_dr_rpc_agent_api
from neutron import context
from neutron.tests import base
class TestBgpDrAgentNotifyApi(base.BaseTestCase):
def setUp(self):
super(TestBgpDrAgentNotifyApi, self).setUp()
self.notifier = (
bgp_dr_rpc_agent_api.BgpDrAgentNotifyApi())
mock_cast_p = mock.patch.object(self.notifier,
'_notification_host_cast')
self.mock_cast = mock_cast_p.start()
mock_call_p = mock.patch.object(self.notifier,
'_notification_host_call')
self.mock_call = mock_call_p.start()
self.context = context.get_admin_context()
self.host = 'host-1'
def test_notify_dragent_bgp_routes_advertisement(self):
bgp_speaker_id = 'bgp-speaker-1'
routes = [{'destination': '1.1.1.1', 'next_hop': '2.2.2.2'}]
self.notifier.bgp_routes_advertisement(self.context, bgp_speaker_id,
routes, self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)
def test_notify_dragent_bgp_routes_withdrawal(self):
bgp_speaker_id = 'bgp-speaker-1'
routes = [{'destination': '1.1.1.1'}]
self.notifier.bgp_routes_withdrawal(self.context, bgp_speaker_id,
routes, self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)
def test_notify_bgp_peer_disassociated(self):
bgp_speaker_id = 'bgp-speaker-1'
bgp_peer_ip = '1.1.1.1'
self.notifier.bgp_peer_disassociated(self.context, bgp_speaker_id,
bgp_peer_ip, self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)
def test_notify_bgp_peer_associated(self):
bgp_speaker_id = 'bgp-speaker-1'
bgp_peer_id = 'bgp-peer-1'
self.notifier.bgp_peer_associated(self.context, bgp_speaker_id,
bgp_peer_id, self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)
def test_notify_bgp_speaker_created(self):
bgp_speaker_id = 'bgp-speaker-1'
self.notifier.bgp_speaker_created(self.context, bgp_speaker_id,
self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)
def test_notify_bgp_speaker_removed(self):
bgp_speaker_id = 'bgp-speaker-1'
self.notifier.bgp_speaker_removed(self.context, bgp_speaker_id,
self.host)
self.assertEqual(1, self.mock_cast.call_count)
self.assertEqual(0, self.mock_call.call_count)

View File

@ -1,44 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.api.rpc.handlers import bgp_speaker_rpc
from neutron.tests import base
class TestBgpSpeakerRpcCallback(base.BaseTestCase):
def setUp(self):
self.plugin_p = mock.patch('neutron.manager.NeutronManager.'
'get_service_plugins')
self.plugin = self.plugin_p.start()
self.callback = bgp_speaker_rpc.BgpSpeakerRpcCallback()
super(TestBgpSpeakerRpcCallback, self).setUp()
def test_get_bgp_speaker_info(self):
self.callback.get_bgp_speaker_info(mock.Mock(),
bgp_speaker_id='id1')
self.assertIsNotNone(len(self.plugin.mock_calls))
def test_get_bgp_peer_info(self):
self.callback.get_bgp_peer_info(mock.Mock(),
bgp_peer_id='id1')
self.assertIsNotNone(len(self.plugin.mock_calls))
def test_get_bgp_speakers(self):
self.callback.get_bgp_speakers(mock.Mock(),
host='host')
self.assertIsNotNone(len(self.plugin.mock_calls))

File diff suppressed because it is too large Load Diff

View File

@ -1,203 +0,0 @@
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
from neutron.api.v2 import attributes
from neutron import context
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron.extensions import agent
from neutron.extensions import bgp
from neutron.extensions import bgp_dragentscheduler as bgp_dras_ext
from neutron import manager
from neutron.tests.unit.db import test_bgp_db
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base_plugin
from neutron.tests.unit.extensions import test_agent
from webob import exc
class BgpDrSchedulerTestExtensionManager(object):
def get_resources(self):
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
resources = agent.Agent.get_resources()
resources.extend(bgp_dras_ext.Bgp_dragentscheduler.get_resources())
return resources
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestBgpDrSchedulerPlugin(bgp_db.BgpDbMixin,
bgp_dras_db.BgpDrAgentSchedulerDbMixin):
bgp_drscheduler = importutils.import_object(
cfg.CONF.bgp_drscheduler_driver)
supported_extension_aliases = ["bgp_dragent_scheduler"]
def get_plugin_description(self):
return ("BGP dynamic routing service Plugin test class that test "
"BGP speaker functionality, with scheduler.")
class BgpDrSchedulingTestCase(test_agent.AgentDBTestMixIn,
test_bgp_db.BgpEntityCreationMixin):
def test_schedule_bgp_speaker(self):
"""Test happy path over full scheduling cycle."""
with self.bgp_speaker(4, 1234) as ri:
bgp_speaker_id = ri['id']
self._register_bgp_dragent(host='host1')
agent = self._list('agents')['agents'][0]
agent_id = agent['id']
data = {'bgp_speaker_id': bgp_speaker_id}
req = self.new_create_request('agents', data, self.fmt,
agent_id, 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
req_show = self.new_show_request('agents', agent_id, self.fmt,
'bgp-drinstances')
res = req_show.get_response(self.ext_api)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertIn('bgp_speakers', res)
self.assertTrue(bgp_speaker_id,
res['bgp_speakers'][0]['id'])
req = self.new_delete_request('agents',
agent_id,
self.fmt,
'bgp-drinstances',
bgp_speaker_id)
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
res = req_show.get_response(self.ext_api)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertIn('bgp_speakers', res)
self.assertEqual([], res['bgp_speakers'])
def test_schedule_bgp_speaker_on_invalid_agent(self):
"""Test error while scheduling BGP speaker on an invalid agent."""
with self.bgp_speaker(4, 1234) as ri:
bgp_speaker_id = ri['id']
self._register_l3_agent(host='host1') # Register wrong agent
agent = self._list('agents')['agents'][0]
data = {'bgp_speaker_id': bgp_speaker_id}
req = self.new_create_request(
'agents', data, self.fmt,
agent['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
# Raises an AgentNotFound exception if the agent is invalid
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
def test_schedule_bgp_speaker_twice_on_same_agent(self):
"""Test error if a BGP speaker is scheduled twice on same agent"""
with self.bgp_speaker(4, 1234) as ri:
bgp_speaker_id = ri['id']
self._register_bgp_dragent(host='host1')
agent = self._list('agents')['agents'][0]
data = {'bgp_speaker_id': bgp_speaker_id}
req = self.new_create_request(
'agents', data, self.fmt,
agent['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
# Try second time, should raise conflict
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPConflict.code, res.status_int)
def test_schedule_bgp_speaker_on_two_different_agents(self):
"""Test that a BGP speaker can be associated to two agents."""
with self.bgp_speaker(4, 1234) as ri:
bgp_speaker_id = ri['id']
self._register_bgp_dragent(host='host1')
self._register_bgp_dragent(host='host2')
data = {'bgp_speaker_id': bgp_speaker_id}
agent1 = self._list('agents')['agents'][0]
req = self.new_create_request(
'agents', data, self.fmt,
agent1['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
agent2 = self._list('agents')['agents'][1]
req = self.new_create_request(
'agents', data, self.fmt,
agent2['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_schedule_multi_bgp_speaker_on_one_dragent(self):
"""Test only one BGP speaker can be associated to one dragent."""
with self.bgp_speaker(4, 1) as ri1, self.bgp_speaker(4, 2) as ri2:
self._register_bgp_dragent(host='host1')
agent = self._list('agents')['agents'][0]
data = {'bgp_speaker_id': ri1['id']}
req = self.new_create_request(
'agents', data, self.fmt,
agent['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
data = {'bgp_speaker_id': ri2['id']}
req = self.new_create_request(
'agents', data, self.fmt,
agent['id'], 'bgp-drinstances')
res = req.get_response(self.ext_api)
self.assertEqual(exc.HTTPConflict.code, res.status_int)
def test_non_scheduled_bgp_speaker_binding_removal(self):
"""Test exception while removing an invalid binding."""
with self.bgp_speaker(4, 1234) as ri1:
self._register_bgp_dragent(host='host1')
agent = self._list('agents')['agents'][0]
agent_id = agent['id']
self.assertRaises(bgp_dras_ext.DrAgentNotHostingBgpSpeaker,
self.bgp_plugin.remove_bgp_speaker_from_dragent,
self.context, agent_id, ri1['id'])
class BgpDrPluginSchedulerTests(test_db_base_plugin.NeutronDbPluginV2TestCase,
BgpDrSchedulingTestCase):
def setUp(self, plugin=None, ext_mgr=None, service_plugins=None):
if not plugin:
plugin = ('neutron.tests.unit.db.'
'test_bgp_dragentscheduler_db.TestBgpDrSchedulerPlugin')
if not service_plugins:
service_plugins = {bgp.BGP_EXT_ALIAS:
'neutron.services.bgp.bgp_plugin.BgpPlugin'}
ext_mgr = ext_mgr or BgpDrSchedulerTestExtensionManager()
super(BgpDrPluginSchedulerTests, self).setUp(
plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
self.bgp_plugin = manager.NeutronManager.get_service_plugins().get(
bgp.BGP_EXT_ALIAS)
self.context = context.get_admin_context()

View File

@ -1,224 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from oslo_utils import importutils
from neutron import context
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class TestBgpDrAgentSchedulerBaseTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TestBgpDrAgentSchedulerBaseTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.bgp_speaker = {'id': 'foo_bgp_speaker_id'}
self.bgp_speaker_id = 'foo_bgp_speaker_id'
self._save_bgp_speaker(self.bgp_speaker_id)
def _create_and_set_agents_down(self, hosts, down_agent_count=0,
admin_state_up=True):
agents = []
for i, host in enumerate(hosts):
is_alive = i >= down_agent_count
agents.append(helpers.register_bgp_dragent(
host,
admin_state_up=admin_state_up,
alive=is_alive))
return agents
def _save_bgp_speaker(self, bgp_speaker_id):
cls = bgp_db.BgpDbMixin()
bgp_speaker_body = {'bgp_speaker': {'ip_version': '4',
'name': 'test-speaker',
'local_as': '123',
'advertise_floating_ip_host_routes': '0',
'advertise_tenant_networks': '0',
'peers': [],
'networks': []}}
cls._save_bgp_speaker(self.ctx, bgp_speaker_body, uuid=bgp_speaker_id)
def _test_schedule_bind_bgp_speaker(self, agents, bgp_speaker_id):
scheduler = bgp_dras.ChanceScheduler()
scheduler.resource_filter.bind(self.ctx, agents, bgp_speaker_id)
results = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding).filter_by(
bgp_speaker_id=bgp_speaker_id).all()
for result in results:
self.assertEqual(bgp_speaker_id, result.bgp_speaker_id)
class TestBgpDrAgentScheduler(TestBgpDrAgentSchedulerBaseTestCase,
bgp_db.BgpDbMixin):
def test_schedule_bind_bgp_speaker_single_agent(self):
agents = self._create_and_set_agents_down(['host-a'])
self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id)
def test_schedule_bind_bgp_speaker_multi_agents(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id)
class TestBgpAgentFilter(TestBgpDrAgentSchedulerBaseTestCase,
bgp_db.BgpDbMixin,
bgp_dras_db.BgpDrAgentSchedulerDbMixin):
def setUp(self):
super(TestBgpAgentFilter, self).setUp()
self.bgp_drscheduler = importutils.import_object(
'neutron.services.bgp.scheduler'
'.bgp_dragent_scheduler.ChanceScheduler'
)
self.plugin = self
def _test_filter_agents_helper(self, bgp_speaker,
expected_filtered_dragent_ids=None,
expected_num_agents=1):
if not expected_filtered_dragent_ids:
expected_filtered_dragent_ids = []
filtered_agents = (
self.plugin.bgp_drscheduler.resource_filter.filter_agents(
self.plugin, self.ctx, bgp_speaker))
self.assertEqual(expected_num_agents,
filtered_agents['n_agents'])
actual_filtered_dragent_ids = [
agent.id for agent in filtered_agents['hostable_agents']]
self.assertEqual(len(expected_filtered_dragent_ids),
len(actual_filtered_dragent_ids))
for filtered_agent_id in actual_filtered_dragent_ids:
self.assertIn(filtered_agent_id, expected_filtered_dragent_ids)
def test_filter_agents_single_agent(self):
agents = self._create_and_set_agents_down(['host-a'])
expected_filtered_dragent_ids = [agents[0].id]
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
def test_filter_agents_no_agents(self):
expected_filtered_dragent_ids = []
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids,
expected_num_agents=0)
def test_filter_agents_two_agents(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
expected_filtered_dragent_ids = [agent.id for agent in agents]
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
def test_filter_agents_agent_already_scheduled(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id)
self._test_filter_agents_helper(self.bgp_speaker,
expected_num_agents=0)
def test_filter_agents_multiple_agents_bgp_speakers(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id)
bgp_speaker = {'id': 'bar-speaker-id'}
self._save_bgp_speaker(bgp_speaker['id'])
expected_filtered_dragent_ids = [agents[1].id]
self._test_filter_agents_helper(
bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
class TestAutoScheduleBgpSpeakers(TestBgpDrAgentSchedulerBaseTestCase):
"""Unit test scenarios for schedule_unscheduled_bgp_speakers.
bgp_speaker_present
BGP speaker is present or not
scheduled_already
BGP speaker is already scheduled to the agent or not
agent_down
BGP DRAgent is down or alive
valid_host
If true, then an valid host is passed to schedule BGP speaker,
else an invalid host is passed.
"""
scenarios = [
('BGP speaker present',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=False,
valid_host=True,
expected_result=True)),
('No BGP speaker',
dict(bgp_speaker_present=False,
scheduled_already=False,
agent_down=False,
valid_host=True,
expected_result=False)),
('BGP speaker already scheduled',
dict(bgp_speaker_present=True,
scheduled_already=True,
agent_down=False,
valid_host=True,
expected_result=False)),
('BGP DR agent down',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=True,
valid_host=False,
expected_result=False)),
('Invalid host',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=False,
valid_host=False,
expected_result=False)),
]
def test_auto_schedule_bgp_speaker(self):
scheduler = bgp_dras.ChanceScheduler()
if self.bgp_speaker_present:
down_agent_count = 1 if self.agent_down else 0
agents = self._create_and_set_agents_down(
['host-a'], down_agent_count=down_agent_count)
if self.scheduled_already:
self._test_schedule_bind_bgp_speaker(agents,
self.bgp_speaker_id)
expected_hosted_agents = (1 if self.bgp_speaker_present and
self.valid_host else 0)
host = "host-a" if self.valid_host else "host-b"
observed_ret_value = scheduler.schedule_unscheduled_bgp_speakers(
self.ctx, host)
self.assertEqual(self.expected_result, observed_ret_value)
hosted_agents = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding).all()
self.assertEqual(expected_hosted_agents, len(hosted_agents))

View File

@ -1,736 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sys
import uuid
import eventlet
import mock
from oslo_config import cfg
import testtools
from neutron.common import config as common_config
from neutron import context
from neutron.services.bgp.agent import bgp_dragent
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.agent import entry
from neutron.tests import base
HOSTNAME = 'hostname'
rpc_api = bgp_dragent.BgpDrPluginApi
BGP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__)
FAKE_BGPSPEAKER_UUID = str(uuid.uuid4())
FAKE_BGPPEER_UUID = str(uuid.uuid4())
FAKE_BGP_SPEAKER = {'id': FAKE_BGPSPEAKER_UUID,
'local_as': 12345,
'peers': [{'remote_as': '2345',
'peer_ip': '1.1.1.1',
'auth_type': 'none',
'password': ''}],
'advertised_routes': []}
FAKE_BGP_PEER = {'id': FAKE_BGPPEER_UUID,
'remote_as': '2345',
'peer_ip': '1.1.1.1',
'auth_type': 'none',
'password': ''}
FAKE_ROUTE = {'id': FAKE_BGPSPEAKER_UUID,
'destination': '2.2.2.2/32',
'next_hop': '3.3.3.3'}
FAKE_ROUTES = {'routes': {'id': FAKE_BGPSPEAKER_UUID,
'destination': '2.2.2.2/32',
'next_hop': '3.3.3.3'}
}
class TestBgpDrAgent(base.BaseTestCase):
def setUp(self):
super(TestBgpDrAgent, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_DRIVER_OPTS, 'BGP')
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
mock_log_p = mock.patch.object(bgp_dragent, 'LOG')
self.mock_log = mock_log_p.start()
self.driver_cls_p = mock.patch(
'neutron.services.bgp.agent.bgp_dragent.importutils.import_class')
self.driver_cls = self.driver_cls_p.start()
self.context = context.get_admin_context()
def test_bgp_dragent_manager(self):
state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI'
# sync_state is needed for this test
with mock.patch.object(bgp_dragent.BgpDrAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'bgp_dragent', '--config-file',
base.etcdir('neutron.conf')]
common_config.init(sys.argv[1:])
agent_mgr = bgp_dragent.BgpDrAgentWithStateReport(
'testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
self.assertIsNotNone(len(mock_sync_state.mock_calls))
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY,
mock.ANY)])
def test_bgp_dragent_main_agent_manager(self):
logging_str = 'neutron.agent.common.config.setup_logging'
launcher_str = 'oslo_service.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['bgp_dragent', '--config-file',
base.etcdir('neutron.conf')]
entry.main()
launcher.assert_has_calls(
[mock.call(cfg.CONF),
mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
with mock.patch.object(bgp_dr, 'sync_state') as sync_state:
bgp_dr.run()
self.assertIsNotNone(len(sync_state.mock_calls))
def test_after_start(self):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
with mock.patch.object(bgp_dr, 'sync_state') as sync_state:
bgp_dr.after_start()
self.assertIsNotNone(len(sync_state.mock_calls))
def _test_sync_state_helper(self, bgp_speaker_list=None,
cached_info=None,
safe_configure_call_count=0,
sync_bgp_speaker_call_count=0,
remove_bgp_speaker_call_count=0,
remove_bgp_speaker_ids=None,
added_bgp_speakers=None,
synced_bgp_speakers=None):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.MagicMock())
for a in ['plugin_rpc', 'sync_bgp_speaker',
'safe_configure_dragent_for_bgp_speaker',
'remove_bgp_speaker_from_dragent']])
with mock.patch.multiple(bgp_dr, **attrs_to_mock):
if not cached_info:
cached_info = {}
if not added_bgp_speakers:
added_bgp_speakers = []
if not remove_bgp_speaker_ids:
remove_bgp_speaker_ids = []
if not synced_bgp_speakers:
synced_bgp_speakers = []
bgp_dr.plugin_rpc.get_bgp_speakers.return_value = bgp_speaker_list
bgp_dr.cache.cache = cached_info
bgp_dr.cache.clear_cache = mock.Mock()
bgp_dr.sync_state(mock.ANY)
self.assertEqual(
remove_bgp_speaker_call_count,
bgp_dr.remove_bgp_speaker_from_dragent.call_count)
if remove_bgp_speaker_call_count:
expected_calls = [mock.call(bgp_speaker_id)
for bgp_speaker_id in remove_bgp_speaker_ids]
bgp_dr.remove_bgp_speaker_from_dragent.assert_has_calls(
expected_calls)
self.assertEqual(
safe_configure_call_count,
bgp_dr.safe_configure_dragent_for_bgp_speaker.call_count)
if safe_configure_call_count:
expected_calls = [mock.call(bgp_speaker)
for bgp_speaker in added_bgp_speakers]
bgp_dr.safe_configure_dragent_for_bgp_speaker.assert_has_calls(
expected_calls)
self.assertEqual(sync_bgp_speaker_call_count,
bgp_dr.sync_bgp_speaker.call_count)
if sync_bgp_speaker_call_count:
expected_calls = [mock.call(bgp_speaker)
for bgp_speaker in synced_bgp_speakers]
bgp_dr.sync_bgp_speaker.assert_has_calls(expected_calls)
def test_sync_state_bgp_speaker_added(self):
bgp_speaker_list = [{'id': 'foo-id',
'local_as': 12345,
'peers': [],
'advertised_routes': []}]
self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list,
safe_configure_call_count=1,
added_bgp_speakers=bgp_speaker_list)
def test_sync_state_bgp_speaker_deleted(self):
bgp_speaker_list = []
cached_bgp_speaker = {'id': 'foo-id',
'local_as': 12345,
'peers': ['peer-1'],
'advertised_routes': []}
cached_info = {'foo-id': cached_bgp_speaker}
self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list,
cached_info=cached_info,
remove_bgp_speaker_call_count=1,
remove_bgp_speaker_ids=['foo-id'])
def test_sync_state_added_and_deleted(self):
bgp_speaker_list = [{'id': 'foo-id',
'local_as': 12345,
'peers': [],
'advertised_routes': []}]
cached_bgp_speaker = {'bgp_speaker': {'local_as': 12345},
'peers': ['peer-1'],
'advertised_routes': []}
cached_info = {'bar-id': cached_bgp_speaker}
self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list,
cached_info=cached_info,
remove_bgp_speaker_call_count=1,
remove_bgp_speaker_ids=['bar-id'],
safe_configure_call_count=1,
added_bgp_speakers=bgp_speaker_list)
def test_sync_state_added_and_synced(self):
bgp_speaker_list = [{'id': 'foo-id',
'local_as': 12345,
'peers': [],
'advertised_routes': []},
{'id': 'bar-id', 'peers': ['peer-2'],
'advertised_routes': []},
{'id': 'temp-id', 'peers': ['temp-1'],
'advertised_routes': []}]
cached_bgp_speaker = {'id': 'bar-id', 'bgp_speaker': {'id': 'bar-id'},
'peers': ['peer-1'],
'advertised_routes': []}
cached_bgp_speaker_2 = {'id': 'temp-id',
'bgp_speaker': {'id': 'temp-id'},
'peers': ['temp-1'],
'advertised_routes': []}
cached_info = {'bar-id': cached_bgp_speaker,
'temp-id': cached_bgp_speaker_2}
self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list,
cached_info=cached_info,
safe_configure_call_count=1,
added_bgp_speakers=[bgp_speaker_list[0]],
sync_bgp_speaker_call_count=2,
synced_bgp_speakers=[bgp_speaker_list[1],
bgp_speaker_list[2]]
)
def test_sync_state_added_synced_and_removed(self):
bgp_speaker_list = [{'id': 'foo-id',
'local_as': 12345,
'peers': [],
'advertised_routes': []},
{'id': 'bar-id', 'peers': ['peer-2'],
'advertised_routes': []}]
cached_bgp_speaker = {'id': 'bar-id',
'bgp_speaker': {'id': 'bar-id'},
'peers': ['peer-1'],
'advertised_routes': []}
cached_bgp_speaker_2 = {'id': 'temp-id',
'bgp_speaker': {'id': 'temp-id'},
'peers': ['temp-1'],
'advertised_routes': []}
cached_info = {'bar-id': cached_bgp_speaker,
'temp-id': cached_bgp_speaker_2}
self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list,
cached_info=cached_info,
remove_bgp_speaker_call_count=1,
remove_bgp_speaker_ids=['temp-id'],
safe_configure_call_count=1,
added_bgp_speakers=[bgp_speaker_list[0]],
sync_bgp_speaker_call_count=1,
synced_bgp_speakers=[bgp_speaker_list[1]])
def _test_sync_bgp_speaker_helper(self, bgp_speaker, cached_info=None,
remove_bgp_peer_call_count=0,
removed_bgp_peer_ip_list=None,
withdraw_route_call_count=0,
withdraw_routes_list=None,
add_bgp_peers_called=False,
advertise_routes_called=False):
if not cached_info:
cached_info = {}
if not removed_bgp_peer_ip_list:
removed_bgp_peer_ip_list = []
if not withdraw_routes_list:
withdraw_routes_list = []
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.MagicMock())
for a in ['remove_bgp_peer_from_bgp_speaker',
'add_bgp_peers_to_bgp_speaker',
'advertise_routes_via_bgp_speaker',
'withdraw_route_via_bgp_speaker']])
with mock.patch.multiple(bgp_dr, **attrs_to_mock):
bgp_dr.cache.cache = cached_info
bgp_dr.sync_bgp_speaker(bgp_speaker)
self.assertEqual(
remove_bgp_peer_call_count,
bgp_dr.remove_bgp_peer_from_bgp_speaker.call_count)
if remove_bgp_peer_call_count:
expected_calls = [mock.call(bgp_speaker['id'], peer_ip)
for peer_ip in removed_bgp_peer_ip_list]
bgp_dr.remove_bgp_peer_from_bgp_speaker.assert_has_calls(
expected_calls)
self.assertEqual(add_bgp_peers_called,
bgp_dr.add_bgp_peers_to_bgp_speaker.called)
if add_bgp_peers_called:
bgp_dr.add_bgp_peers_to_bgp_speaker.assert_called_with(
bgp_speaker)
self.assertEqual(
withdraw_route_call_count,
bgp_dr.withdraw_route_via_bgp_speaker.call_count)
if withdraw_route_call_count:
expected_calls = [mock.call(bgp_speaker['id'], 12345, route)
for route in withdraw_routes_list]
bgp_dr.withdraw_route_via_bgp_speaker.assert_has_calls(
expected_calls)
self.assertEqual(advertise_routes_called,
bgp_dr.advertise_routes_via_bgp_speaker.called)
if advertise_routes_called:
bgp_dr.advertise_routes_via_bgp_speaker.assert_called_with(
bgp_speaker)
def test_sync_bgp_speaker_bgp_peers_updated(self):
peers = [{'id': 'peer-1', 'peer_ip': '1.1.1.1'},
{'id': 'peer-2', 'peer_ip': '2.2.2.2'}]
bgp_speaker = {'id': 'foo-id',
'local_as': 12345,
'peers': peers,
'advertised_routes': []}
cached_peers = {'1.1.1.1': {'id': 'peer-2', 'peer_ip': '1.1.1.1'},
'3.3.3.3': {'id': 'peer-3', 'peer_ip': '3.3.3.3'}}
cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': cached_peers,
'advertised_routes': []}}
self._test_sync_bgp_speaker_helper(
bgp_speaker, cached_info=cached_bgp_speaker,
remove_bgp_peer_call_count=1,
removed_bgp_peer_ip_list=['3.3.3.3'],
add_bgp_peers_called=True,
advertise_routes_called=False)
def test_sync_bgp_speaker_routes_updated(self):
adv_routes = [{'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'},
{'destination': '20.0.0.0/24', 'next_hop': '2.2.2.2'}]
bgp_speaker = {'id': 'foo-id',
'local_as': 12345,
'peers': {},
'advertised_routes': adv_routes}
cached_adv_routes = [{'destination': '20.0.0.0/24',
'next_hop': '2.2.2.2'},
{'destination': '30.0.0.0/24',
'next_hop': '3.3.3.3'}]
cached_bgp_speaker = {
'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': {},
'advertised_routes': cached_adv_routes}}
self._test_sync_bgp_speaker_helper(
bgp_speaker, cached_info=cached_bgp_speaker,
withdraw_route_call_count=1,
withdraw_routes_list=[cached_adv_routes[1]],
add_bgp_peers_called=False,
advertise_routes_called=True)
def test_sync_bgp_speaker_peers_routes_added(self):
peers = [{'id': 'peer-1', 'peer_ip': '1.1.1.1'},
{'id': 'peer-2', 'peer_ip': '2.2.2.2'}]
adv_routes = [{'destination': '10.0.0.0/24',
'next_hop': '1.1.1.1'},
{'destination': '20.0.0.0/24',
'next_hop': '2.2.2.2'}]
bgp_speaker = {'id': 'foo-id',
'local_as': 12345,
'peers': peers,
'advertised_routes': adv_routes}
cached_bgp_speaker = {
'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': {},
'advertised_routes': []}}
self._test_sync_bgp_speaker_helper(
bgp_speaker, cached_info=cached_bgp_speaker,
add_bgp_peers_called=True,
advertise_routes_called=True)
def test_sync_state_plugin_error(self):
with mock.patch(BGP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_bgp_speakers.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(bgp_dragent.LOG, 'error') as log:
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
with mock.patch.object(bgp_dr,
'schedule_full_resync') as schedule_full_resync:
bgp_dr.sync_state(mock.ANY)
self.assertTrue(log.called)
self.assertTrue(schedule_full_resync.called)
def test_periodic_resync(self):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
with mock.patch.object(bgp_dr,
'_periodic_resync_helper') as resync_helper:
bgp_dr.periodic_resync(self.context)
self.assertTrue(resync_helper.called)
def test_periodic_resync_helper(self):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
bgp_dr.schedule_resync('foo reason', 'foo-id')
with mock.patch.object(bgp_dr, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
bgp_dr._periodic_resync_helper(self.context)
self.assertTrue(sync_state.called)
self.assertEqual(len(bgp_dr.needs_resync_reasons), 0)
def _test_add_bgp_peer_helper(self, bgp_speaker_id,
bgp_peer, cached_bgp_speaker,
put_bgp_peer_called=True):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
bgp_dr.cache.cache = cached_bgp_speaker
with mock.patch.object(
bgp_dr.cache, 'put_bgp_peer') as mock_put_bgp_peer:
bgp_dr.add_bgp_peer_to_bgp_speaker('foo-id', 12345, bgp_peer)
if put_bgp_peer_called:
mock_put_bgp_peer.assert_called_once_with(
bgp_speaker_id, bgp_peer)
else:
self.assertFalse(mock_put_bgp_peer.called)
def test_add_bgp_peer_not_cached(self):
bgp_peer = {'peer_ip': '1.1.1.1', 'remote_as': 34567,
'auth_type': 'md5', 'password': 'abc'}
cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': {},
'advertised_routes': []}}
self._test_add_bgp_peer_helper('foo-id', bgp_peer, cached_bgp_speaker)
def test_add_bgp_peer_already_cached(self):
bgp_peer = {'peer_ip': '1.1.1.1', 'remote_as': 34567,
'auth_type': 'md5', 'password': 'abc'}
cached_peers = {'1.1.1.1': {'peer_ip': '1.1.1.1', 'remote_as': 34567}}
cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': cached_peers,
'advertised_routes': []}}
self._test_add_bgp_peer_helper('foo-id', bgp_peer, cached_bgp_speaker,
put_bgp_peer_called=False)
def _test_advertise_route_helper(self, bgp_speaker_id,
route, cached_bgp_speaker,
put_adv_route_called=True):
bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
bgp_dr.cache.cache = cached_bgp_speaker
with mock.patch.object(
bgp_dr.cache, 'put_adv_route') as mock_put_adv_route:
bgp_dr.advertise_route_via_bgp_speaker(bgp_speaker_id, 12345,
route)
if put_adv_route_called:
mock_put_adv_route.assert_called_once_with(
bgp_speaker_id, route)
else:
self.assertFalse(mock_put_adv_route.called)
def test_advertise_route_helper_not_cached(self):
route = {'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'}
cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': {},
'advertised_routes': []}}
self._test_advertise_route_helper('foo-id', route, cached_bgp_speaker,
put_adv_route_called=True)
def test_advertise_route_helper_already_cached(self):
route = {'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'}
cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345},
'peers': {},
'advertised_routes': [route]}}
self._test_advertise_route_helper('foo-id', route, cached_bgp_speaker,
put_adv_route_called=False)
class TestBgpDrAgentEventHandler(base.BaseTestCase):
cache_cls = 'neutron.services.bgp.agent.bgp_dragent.BgpSpeakerCache'
def setUp(self):
super(TestBgpDrAgentEventHandler, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_DRIVER_OPTS, 'BGP')
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
mock_log_p = mock.patch.object(bgp_dragent, 'LOG')
self.mock_log = mock_log_p.start()
self.plugin_p = mock.patch(BGP_PLUGIN)
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch(self.cache_cls)
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.driver_cls_p = mock.patch(
'neutron.services.bgp.agent.bgp_dragent.importutils.import_class')
self.driver_cls = self.driver_cls_p.start()
self.bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME)
self.schedule_full_resync_p = mock.patch.object(
self.bgp_dr, 'schedule_full_resync')
self.schedule_full_resync = self.schedule_full_resync_p.start()
self.context = mock.Mock()
def test_bgp_speaker_create_end(self):
payload = {'bgp_speaker': {'id': FAKE_BGPSPEAKER_UUID}}
with mock.patch.object(self.bgp_dr,
'add_bgp_speaker_helper') as enable:
self.bgp_dr.bgp_speaker_create_end(None, payload)
enable.assert_called_once_with(FAKE_BGP_SPEAKER['id'])
def test_bgp_peer_association_end(self):
payload = {'bgp_peer': {'speaker_id': FAKE_BGPSPEAKER_UUID,
'peer_id': FAKE_BGPPEER_UUID}}
with mock.patch.object(self.bgp_dr,
'add_bgp_peer_helper') as enable:
self.bgp_dr.bgp_peer_association_end(None, payload)
enable.assert_called_once_with(FAKE_BGP_SPEAKER['id'],
FAKE_BGP_PEER['id'])
def test_route_advertisement_end(self):
routes = [{'destination': '2.2.2.2/32', 'next_hop': '3.3.3.3'},
{'destination': '4.4.4.4/32', 'next_hop': '5.5.5.5'}]
payload = {'advertise_routes': {'speaker_id': FAKE_BGPSPEAKER_UUID,
'routes': routes}}
expected_calls = [mock.call(FAKE_BGP_SPEAKER['id'], routes)]
with mock.patch.object(self.bgp_dr,
'add_routes_helper') as enable:
self.bgp_dr.bgp_routes_advertisement_end(None, payload)
enable.assert_has_calls(expected_calls)
def test_add_bgp_speaker_helper(self):
self.plugin.get_bgp_speaker_info.return_value = FAKE_BGP_SPEAKER
add_bs_p = mock.patch.object(self.bgp_dr,
'add_bgp_speaker_on_dragent')
add_bs = add_bs_p.start()
self.bgp_dr.add_bgp_speaker_helper(FAKE_BGP_SPEAKER['id'])
self.plugin.assert_has_calls([
mock.call.get_bgp_speaker_info(mock.ANY,
FAKE_BGP_SPEAKER['id'])])
add_bs.assert_called_once_with(FAKE_BGP_SPEAKER)
def test_add_bgp_peer_helper(self):
self.plugin.get_bgp_peer_info.return_value = FAKE_BGP_PEER
add_bp_p = mock.patch.object(self.bgp_dr,
'add_bgp_peer_to_bgp_speaker')
add_bp = add_bp_p.start()
self.bgp_dr.add_bgp_peer_helper(FAKE_BGP_SPEAKER['id'],
FAKE_BGP_PEER['id'])
self.plugin.assert_has_calls([
mock.call.get_bgp_peer_info(mock.ANY,
FAKE_BGP_PEER['id'])])
self.assertEqual(1, add_bp.call_count)
def test_add_routes_helper(self):
add_rt_p = mock.patch.object(self.bgp_dr,
'advertise_route_via_bgp_speaker')
add_bp = add_rt_p.start()
self.bgp_dr.add_routes_helper(FAKE_BGP_SPEAKER['id'], FAKE_ROUTES)
self.assertEqual(1, add_bp.call_count)
def test_bgp_speaker_remove_end(self):
payload = {'bgp_speaker': {'id': FAKE_BGPSPEAKER_UUID}}
with mock.patch.object(self.bgp_dr,
'remove_bgp_speaker_from_dragent') as disable:
self.bgp_dr.bgp_speaker_remove_end(None, payload)
disable.assert_called_once_with(FAKE_BGP_SPEAKER['id'])
def test_bgp_peer_disassociation_end(self):
payload = {'bgp_peer': {'speaker_id': FAKE_BGPSPEAKER_UUID,
'peer_ip': '1.1.1.1'}}
with mock.patch.object(self.bgp_dr,
'remove_bgp_peer_from_bgp_speaker') as disable:
self.bgp_dr.bgp_peer_disassociation_end(None, payload)
disable.assert_called_once_with(FAKE_BGPSPEAKER_UUID,
FAKE_BGP_PEER['peer_ip'])
def test_bgp_routes_withdrawal_end(self):
withdraw_routes = [{'destination': '2.2.2.2/32'},
{'destination': '3.3.3.3/32'}]
payload = {'withdraw_routes': {'speaker_id': FAKE_BGPSPEAKER_UUID,
'routes': withdraw_routes}}
expected_calls = [mock.call(FAKE_BGP_SPEAKER['id'], withdraw_routes)]
with mock.patch.object(self.bgp_dr,
'withdraw_routes_helper') as disable:
self.bgp_dr.bgp_routes_withdrawal_end(None, payload)
disable.assert_has_calls(expected_calls)
class TestBGPSpeakerCache(base.BaseTestCase):
def setUp(self):
super(TestBGPSpeakerCache, self).setUp()
self.expected_cache = {FAKE_BGP_SPEAKER['id']:
{'bgp_speaker': FAKE_BGP_SPEAKER,
'peers': {},
'advertised_routes': []}}
self.bs_cache = bgp_dragent.BgpSpeakerCache()
def test_put_bgp_speaker(self):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.assertEqual(self.expected_cache, self.bs_cache.cache)
def test_put_bgp_speaker_existing(self):
prev_bs_info = {'id': 'foo-id'}
with mock.patch.object(self.bs_cache,
'remove_bgp_speaker_by_id') as remove:
self.bs_cache.cache[FAKE_BGP_SPEAKER['id']] = prev_bs_info
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
remove.assert_called_once_with(prev_bs_info)
self.assertEqual(self.expected_cache, self.bs_cache.cache)
def remove_bgp_speaker_by_id(self):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.assertEqual(1, len(self.bs_cache.cache))
self.bs_cache.remove_bgp_speaker_by_id(FAKE_BGP_SPEAKER['id'])
self.assertEqual(0, len(self.bs_cache.cache))
def test_get_bgp_speaker_by_id(self):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.assertEqual(
FAKE_BGP_SPEAKER,
self.bs_cache.get_bgp_speaker_by_id(FAKE_BGP_SPEAKER['id']))
def test_get_bgp_speaker_ids(self):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.assertEqual([FAKE_BGP_SPEAKER['id']],
list(self.bs_cache.get_bgp_speaker_ids()))
def _test_bgp_peer_helper(self, remove=False):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.bs_cache.put_bgp_peer(FAKE_BGP_SPEAKER['id'], FAKE_BGP_PEER)
expected_cache = copy.deepcopy(self.expected_cache)
expected_cache[FAKE_BGP_SPEAKER['id']]['peers'] = {
FAKE_BGP_PEER['peer_ip']: FAKE_BGP_PEER}
self.assertEqual(expected_cache, self.bs_cache.cache)
if remove:
self.bs_cache.remove_bgp_peer_by_ip(FAKE_BGP_SPEAKER['id'],
'foo-ip')
self.assertEqual(expected_cache, self.bs_cache.cache)
self.bs_cache.remove_bgp_peer_by_ip(FAKE_BGP_SPEAKER['id'],
FAKE_BGP_PEER['peer_ip'])
self.assertEqual(self.expected_cache, self.bs_cache.cache)
def test_put_bgp_peer(self):
self._test_bgp_peer_helper()
def test_remove_bgp_peer(self):
self._test_bgp_peer_helper(remove=True)
def _test_bgp_speaker_adv_route_helper(self, remove=False):
self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER)
self.bs_cache.put_adv_route(FAKE_BGP_SPEAKER['id'], FAKE_ROUTE)
expected_cache = copy.deepcopy(self.expected_cache)
expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'].append(
FAKE_ROUTE)
self.assertEqual(expected_cache, self.bs_cache.cache)
fake_route_2 = copy.deepcopy(FAKE_ROUTE)
fake_route_2['destination'] = '4.4.4.4/32'
self.bs_cache.put_adv_route(FAKE_BGP_SPEAKER['id'], fake_route_2)
expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'].append(
fake_route_2)
self.assertEqual(expected_cache, self.bs_cache.cache)
if remove:
self.bs_cache.remove_adv_route(FAKE_BGP_SPEAKER['id'],
fake_route_2)
expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'] = (
[FAKE_ROUTE])
self.assertEqual(expected_cache, self.bs_cache.cache)
self.bs_cache.remove_adv_route(FAKE_BGP_SPEAKER['id'],
FAKE_ROUTE)
self.assertEqual(self.expected_cache, self.bs_cache.cache)
def test_put_bgp_speaker_adv_route(self):
self._test_bgp_speaker_adv_route_helper()
def test_remove_bgp_speaker_adv_route(self):
self._test_bgp_speaker_adv_route_helper(remove=True)
def test_is_bgp_speaker_adv_route_present(self):
self._test_bgp_speaker_adv_route_helper()
self.assertTrue(self.bs_cache.is_route_advertised(
FAKE_BGP_SPEAKER['id'], FAKE_ROUTE))
self.assertFalse(self.bs_cache.is_route_advertised(
FAKE_BGP_SPEAKER['id'], {'destination': 'foo-destination',
'next_hop': 'foo-next-hop'}))

View File

@ -1,250 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver.ryu import driver as ryu_driver
from neutron.tests import base
# Test variables for BGP Speaker
FAKE_LOCAL_AS1 = 12345
FAKE_LOCAL_AS2 = 23456
FAKE_ROUTER_ID = '1.1.1.1'
# Test variables for BGP Peer
FAKE_PEER_AS = 45678
FAKE_PEER_IP = '2.2.2.5'
FAKE_AUTH_TYPE = 'md5'
FAKE_PEER_PASSWORD = 'awesome'
# Test variables for Route
FAKE_ROUTE = '2.2.2.0/24'
FAKE_NEXTHOP = '5.5.5.5'
class TestRyuBgpDriver(base.BaseTestCase):
def setUp(self):
super(TestRyuBgpDriver, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test with an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', FAKE_PEER_PASSWORD)
# Test with none auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', 1234)
# Test with a valid auth-type and no password
self.assertRaises(bgp_driver_exc.PasswordNotSpecified,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
FAKE_AUTH_TYPE, None)
def test_add_bgp_peer_with_invalid_asnum_range(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536)
def test_add_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS)
def test_remove_bgp_peer_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, 12345)
def test_remove_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP)
def test_advertise_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, 12345, FAKE_NEXTHOP)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, 12345)
def test_advertise_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP)
def test_withdraw_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
def test_withdraw_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, FAKE_ROUTE)
def test_add_multiple_bgp_speakers(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())

View File

@ -1,48 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.services.bgp.driver import utils
from neutron.tests import base
FAKE_LOCAL_AS = 12345
FAKE_RYU_SPEAKER = {}
class TestBgpMultiSpeakerCache(base.BaseTestCase):
def setUp(self):
super(TestBgpMultiSpeakerCache, self).setUp()
self.expected_cache = {FAKE_LOCAL_AS: FAKE_RYU_SPEAKER}
self.bs_cache = utils.BgpMultiSpeakerCache()
def test_put_bgp_speaker(self):
self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER)
self.assertEqual(self.expected_cache, self.bs_cache.cache)
def test_remove_bgp_speaker(self):
self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER)
self.assertEqual(1, len(self.bs_cache.cache))
self.bs_cache.remove_bgp_speaker(FAKE_LOCAL_AS)
self.assertEqual(0, len(self.bs_cache.cache))
def test_get_bgp_speaker(self):
self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER)
self.assertEqual(
FAKE_RYU_SPEAKER,
self.bs_cache.get_bgp_speaker(FAKE_LOCAL_AS))
def test_get_hosted_bgp_speakers_count(self):
self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER)
self.assertEqual(1, self.bs_cache.get_hosted_bgp_speakers_count())

View File

@ -1,224 +0,0 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from oslo_utils import importutils
from neutron import context
from neutron.db import bgp_db
from neutron.db import bgp_dragentscheduler_db as bgp_dras_db
from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class TestBgpDrAgentSchedulerBaseTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TestBgpDrAgentSchedulerBaseTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.bgp_speaker = {'id': 'foo_bgp_speaker_id'}
self.bgp_speaker_id = 'foo_bgp_speaker_id'
self._save_bgp_speaker(self.bgp_speaker_id)
def _create_and_set_agents_down(self, hosts, down_agent_count=0,
admin_state_up=True):
agents = []
for i, host in enumerate(hosts):
is_alive = i >= down_agent_count
agents.append(helpers.register_bgp_dragent(
host,
admin_state_up=admin_state_up,
alive=is_alive))
return agents
def _save_bgp_speaker(self, bgp_speaker_id):
cls = bgp_db.BgpDbMixin()
bgp_speaker_body = {'bgp_speaker': {
'name': 'fake_bgp_speaker',
'ip_version': '4',
'local_as': '123',
'advertise_floating_ip_host_routes': '0',
'advertise_tenant_networks': '0',
'peers': [],
'networks': []}}
cls._save_bgp_speaker(self.ctx, bgp_speaker_body, uuid=bgp_speaker_id)
def _test_schedule_bind_bgp_speaker(self, agents, bgp_speaker_id):
scheduler = bgp_dras.ChanceScheduler()
scheduler.resource_filter.bind(self.ctx, agents, bgp_speaker_id)
results = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding).filter_by(
bgp_speaker_id=bgp_speaker_id).all()
for result in results:
self.assertEqual(bgp_speaker_id, result.bgp_speaker_id)
class TestBgpDrAgentScheduler(TestBgpDrAgentSchedulerBaseTestCase,
bgp_db.BgpDbMixin):
def test_schedule_bind_bgp_speaker_single_agent(self):
agents = self._create_and_set_agents_down(['host-a'])
self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id)
def test_schedule_bind_bgp_speaker_multi_agents(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id)
class TestBgpAgentFilter(TestBgpDrAgentSchedulerBaseTestCase,
bgp_db.BgpDbMixin,
bgp_dras_db.BgpDrAgentSchedulerDbMixin):
def setUp(self):
super(TestBgpAgentFilter, self).setUp()
self.bgp_drscheduler = importutils.import_object(
'neutron.services.bgp.scheduler.'
'bgp_dragent_scheduler.ChanceScheduler'
)
self.plugin = self
def _test_filter_agents_helper(self, bgp_speaker,
expected_filtered_dragent_ids=None,
expected_num_agents=1):
filtered_agents = (
self.plugin.bgp_drscheduler.resource_filter.filter_agents(
self.plugin, self.ctx, bgp_speaker))
self.assertEqual(expected_num_agents,
filtered_agents['n_agents'])
actual_filtered_dragent_ids = [
agent.id for agent in filtered_agents['hostable_agents']]
if expected_filtered_dragent_ids is None:
expected_filtered_dragent_ids = []
self.assertEqual(len(expected_filtered_dragent_ids),
len(actual_filtered_dragent_ids))
for filtered_agent_id in actual_filtered_dragent_ids:
self.assertIn(filtered_agent_id, expected_filtered_dragent_ids)
def test_filter_agents_single_agent(self):
agents = self._create_and_set_agents_down(['host-a'])
expected_filtered_dragent_ids = [agents[0].id]
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
def test_filter_agents_no_agents(self):
expected_filtered_dragent_ids = []
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids,
expected_num_agents=0)
def test_filter_agents_two_agents(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
expected_filtered_dragent_ids = [agent.id for agent in agents]
self._test_filter_agents_helper(
self.bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
def test_filter_agents_agent_already_scheduled(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id)
self._test_filter_agents_helper(self.bgp_speaker,
expected_num_agents=0)
def test_filter_agents_multiple_agents_bgp_speakers(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id)
bgp_speaker = {'id': 'bar-speaker-id'}
self._save_bgp_speaker(bgp_speaker['id'])
expected_filtered_dragent_ids = [agents[1].id]
self._test_filter_agents_helper(
bgp_speaker,
expected_filtered_dragent_ids=expected_filtered_dragent_ids)
class TestAutoScheduleBgpSpeakers(TestBgpDrAgentSchedulerBaseTestCase):
"""Unit test scenarios for schedule_unscheduled_bgp_speakers.
bgp_speaker_present
BGP speaker is present or not
scheduled_already
BGP speaker is already scheduled to the agent or not
agent_down
BGP DRAgent is down or alive
valid_host
If true, then an valid host is passed to schedule BGP speaker,
else an invalid host is passed.
"""
scenarios = [
('BGP speaker present',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=False,
valid_host=True,
expected_result=True)),
('No BGP speaker',
dict(bgp_speaker_present=False,
scheduled_already=False,
agent_down=False,
valid_host=True,
expected_result=False)),
('BGP speaker already scheduled',
dict(bgp_speaker_present=True,
scheduled_already=True,
agent_down=False,
valid_host=True,
expected_result=False)),
('BGP DR agent down',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=True,
valid_host=False,
expected_result=False)),
('Invalid host',
dict(bgp_speaker_present=True,
scheduled_already=False,
agent_down=False,
valid_host=False,
expected_result=False)),
]
def test_auto_schedule_bgp_speaker(self):
scheduler = bgp_dras.ChanceScheduler()
if self.bgp_speaker_present:
down_agent_count = 1 if self.agent_down else 0
agents = self._create_and_set_agents_down(
['host-a'], down_agent_count=down_agent_count)
if self.scheduled_already:
self._test_schedule_bind_bgp_speaker(agents,
self.bgp_speaker_id)
expected_hosted_agents = (1 if self.bgp_speaker_present and
self.valid_host else 0)
host = "host-a" if self.valid_host else "host-b"
observed_ret_value = scheduler.schedule_unscheduled_bgp_speakers(
self.ctx, host)
self.assertEqual(self.expected_result, observed_ret_value)
hosted_agents = self.ctx.session.query(
bgp_dras_db.BgpSpeakerDrAgentBinding).all()
self.assertEqual(expected_hosted_agents, len(hosted_agents))

View File

@ -0,0 +1,23 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import six
if six.PY2:
gettext.install('neutron', unicode=1)
else:
gettext.install('neutron')

View File

@ -0,0 +1,42 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "neutron_dynamic_routing"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -0,0 +1,30 @@
# Copyright (c) 2016 Huawei Technologies India Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for `neutron_dynamic_routing` module"""
from oslotest import base
class TestNeutron_dynamic_routing(base.BaseTestCase):
"""TestNeutron_dynamic_routing base class"""
def setUp(self):
"""setUp function"""
super(TestNeutron_dynamic_routing, self).setUp()
def test_dummy(self):
"""Added dummy test just for test"""
pass

View File

@ -0,0 +1,17 @@
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('neutron_dynamic_routing')

View File

@ -1,23 +0,0 @@
---
prelude: >
Announcement of tenant prefixes and host routes for floating
IP's via BGP is supported
features:
- Announcement of tenant subnets via BGP using centralized Neutron
router gateway port as the next-hop
- Announcement of floating IP host routes via BGP using the centralized
Neutron router gateway port as the next-hop
- Announcement of floating IP host routes via BGP using the floating
IP agent gateway as the next-hop when the floating IP is associated
through a distributed router
issues:
- When using DVR, if a floating IP is associated to a fixed IP direct
access to the fixed IP is not possible when traffic is sent from
outside of a Neutron tenant network (north-south traffic). Traffic
sent between tenant networks (east-west traffic) is not affected.
When using a distributed router, the floating IP will mask the fixed
IP making it inaccessible, even though the tenant subnet is being
announced as accessible through the centralized SNAT router. In such
a case, traffic sent to the instance should be directed to the
floating IP. This is a limitation of the Neutron L3 agent when using
DVR and will be addressed in a future release.

View File

@ -0,0 +1,10 @@
===========================================
Neutron Dynamic Routing Release Notes Howto
===========================================
Release notes are a new feature for documenting new features in
OpenStack projects. Background on the process, tooling, and
methodology is documented in a `mailing list post by Doug Hellman <http://lists.openstack.org/pipermail/openstack-dev/2015-November/078301.html>`_.
For information on how to create release notes, please consult the
`Release Notes documentation <http://docs.openstack.org/developer/reno/>`_.

275
releasenotes/source/conf.py Normal file
View File

@ -0,0 +1,275 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Neutron Dynamic Routing Release Notes documentation build configuration file,
# created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron Dynamic Routing Release Notes'
copyright = u'2015, Neutron Dynamic Routing Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from neutron_dynamic_routing.version import version_info as neutron_dr_version
# The full version, including alpha/beta/rc tags.
release = neutron_dr_version.version_string_with_vcs()
# The short X.Y version.
version = neutron_dr_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NeutronDynamicRoutingReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NeutronDynamicRoutingReleaseNotes.tex',
u'Neutron Dynamic Routing Release Notes Documentation',
u'Neutron Dynamic Routing Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'neutrondynamicroutingreleasenotes', u'Neutron Dynamic Routing',
' Release Notes Documentation', [u'Neutron Dynamic Routing Developers'],
1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NeutronDynamicRoutingReleaseNotes', u'Neutron Dynamic Routing',
' Release Notes Documentation', u'Neutron Dynamic Routing Developers',
'NeutronDynamicRoutingReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False

View File

@ -0,0 +1,9 @@
======================================
Neutron Dynamic Routing Release Notes
======================================
.. toctree::
:maxdepth: 1
README.rst
unreleased

View File

@ -0,0 +1,5 @@
=============================
Current Series Release Notes
=============================
.. release-notes::

24
requirements.txt Normal file
View File

@ -0,0 +1,24 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=1.6 # Apache-2.0
eventlet!=0.18.3,>=0.18.2 # MIT
httplib2>=0.7.5 # MIT
netaddr!=0.7.16,>=0.7.12 # BSD
SQLAlchemy<1.1.0,>=1.0.10 # MIT
alembic>=0.8.4 # MIT
six>=1.9.0 # MIT
neutron-lib>=0.1.0 # Apache-2.0
oslo.config>=3.9.0 # Apache-2.0
oslo.db>=4.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.messaging>=4.5.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.0.0 # Apache-2.0
oslo.utils>=3.5.0 # Apache-2.0
# This project does depend on neutron as a library, but the
# openstack tooling does not play nicely with projects that
# are not publicly available in pypi.
# -e git+https://git.openstack.org/openstack/neutron#egg=neutron

167
setup.cfg
View File

@ -1,6 +1,6 @@
[metadata]
name = neutron
summary = OpenStack Networking
name = neutron-dynamic-routing
summary = Neutron Dynamic Routing
description-file =
README.rst
author = OpenStack
@ -15,154 +15,14 @@ classifier =
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
[files]
packages =
neutron
data_files =
etc/neutron =
etc/api-paste.ini
etc/policy.json
etc/rootwrap.conf
etc/neutron/rootwrap.d =
etc/neutron/rootwrap.d/debug.filters
etc/neutron/rootwrap.d/dhcp.filters
etc/neutron/rootwrap.d/iptables-firewall.filters
etc/neutron/rootwrap.d/ebtables.filters
etc/neutron/rootwrap.d/ipset-firewall.filters
etc/neutron/rootwrap.d/l3.filters
etc/neutron/rootwrap.d/linuxbridge-plugin.filters
etc/neutron/rootwrap.d/openvswitch-plugin.filters
scripts =
bin/neutron-rootwrap-xen-dom0
neutron_dynamic_routing
[entry_points]
console_scripts =
neutron-bgp-dragent = neutron.cmd.eventlet.agents.bgp_dragent:main
neutron-db-manage = neutron.db.migration.cli:main
neutron-debug = neutron.debug.shell:main
neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main
neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main
neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main
neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main
neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main
neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main
neutron-macvtap-agent = neutron.cmd.eventlet.plugins.macvtap_neutron_agent:main
neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main
neutron-netns-cleanup = neutron.cmd.netns_cleanup:main
neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main
neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main
neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main
neutron-pd-notify = neutron.cmd.pd_notify:main
neutron-server = neutron.cmd.eventlet.server:main
neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet
neutron-rootwrap = oslo_rootwrap.cmd:main
neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon
neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main
neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main
neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main
neutron-sanity-check = neutron.cmd.sanity_check:main
neutron.core_plugins =
ml2 = neutron.plugins.ml2.plugin:Ml2Plugin
neutron.service_plugins =
dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin
router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin
firewall = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
lbaas = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
metering = neutron.services.metering.metering_plugin:MeteringPlugin
neutron.services.firewall.fwaas_plugin.FirewallPlugin = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin
neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin
neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin
qos = neutron.services.qos.qos_plugin:QoSPlugin
bgp = neutron.services.bgp.bgp_plugin:BgpPlugin
tag = neutron.services.tag.tag_plugin:TagPlugin
flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin
auto_allocate = neutron.services.auto_allocate.plugin:Plugin
network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin
timestamp_core = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin
neutron.qos.notification_drivers =
message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver
neutron.ml2.type_drivers =
flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver
local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver
vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver
geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver
gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver
vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver
neutron.ml2.mechanism_drivers =
logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver
test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver
linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver
macvtap = neutron.plugins.ml2.drivers.macvtap.mech_driver.mech_macvtap:MacvtapMechanismDriver
openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver
l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver
sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver
fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver
neutron.ml2.extension_drivers =
test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver
testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver
port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver
qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver
dns = neutron.plugins.ml2.extensions.dns_integration:DNSExtensionDriverML2
neutron.openstack.common.cache.backends =
memory = neutron.openstack.common.cache._backends.memory:MemoryBackend
neutron.ipam_drivers =
fake = neutron.tests.unit.ipam.fake_driver:FakeDriver
internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool
neutron.agent.l2.extensions =
qos = neutron.agent.l2.extensions.qos:QosAgentExtension
neutron.qos.agent_drivers =
ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver
sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver
linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers.qos_driver:QosLinuxbridgeAgentDriver
neutron.agent.linux.pd_drivers =
dibbler = neutron.agent.linux.dibbler:PDDibbler
neutron.services.external_dns_drivers =
designate = neutron.services.externaldns.drivers.designate.driver:Designate
# These are for backwards compat with Icehouse notification_driver configuration values
# TODO(mriedem): Remove these once liberty-eol happens.
oslo.messaging.notify.drivers =
neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver
neutron.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver
neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver
neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver
neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver
oslo.config.opts =
neutron = neutron.opts:list_opts
neutron.agent = neutron.opts:list_agent_opts
neutron.base.agent = neutron.opts:list_base_agent_opts
neutron.bgp.agent = neutron.services.bgp.common.opts:list_bgp_agent_opts
neutron.db = neutron.opts:list_db_opts
neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts
neutron.extensions = neutron.opts:list_extension_opts
neutron.l3.agent = neutron.opts:list_l3_agent_opts
neutron.metadata.agent = neutron.opts:list_metadata_agent_opts
neutron.metering.agent = neutron.opts:list_metering_agent_opts
neutron.ml2 = neutron.opts:list_ml2_conf_opts
neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts
neutron.ml2.macvtap.agent = neutron.opts:list_macvtap_opts
neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts
neutron.ml2.sriov = neutron.opts:list_ml2_conf_sriov_opts
neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts
neutron.qos = neutron.opts:list_qos_opts
nova.auth = neutron.opts:list_auth_opts
oslo.config.opts.defaults =
neutron = neutron.common.config:set_cors_middleware_defaults
neutron.db.alembic_migrations =
neutron = neutron.db.migration:alembic_migrations
neutron.interface_drivers =
ivs = neutron.agent.linux.interface:IVSInterfaceDriver
linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver
null = neutron.agent.linux.interface:NullDriver
openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver
neutron.agent.firewall_drivers =
noop = neutron.agent.firewall:NoopFirewallDriver
iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver
iptables_hybrid = neutron.agent.linux.iptables_firewall:OVSHybridIptablesFirewallDriver
openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver
[global]
setup-hooks =
pbr.hooks.setup_hook
[build_sphinx]
all_files = 1
@ -172,19 +32,16 @@ source-dir = doc/source
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = neutron/locale/neutron.pot
output_file = neutron_dynamic_routing/locale/neutron_dynamic_routing.pot
[compile_catalog]
directory = neutron/locale
domain = neutron
directory = neutron_dynamic_routing/locale
domain = neutron_dynamic_routing
[update_catalog]
domain = neutron
output_dir = neutron/locale
input_file = neutron/locale/neutron.pot
domain = neutron_dynamic_routing
output_dir = neutron_dynamic_routing/locale
input_file = neutron_dynamic_routing/locale/neutron_dynamic_routing.pot
[wheel]
universal = 1
[pbr]
warnerrors = true

View File

@ -1,4 +1,4 @@
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# Copyright (c) 2016 Huawei Technologies India Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,17 +13,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
from neutron._i18n import _
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
BGP_DRIVER_OPTS = [
cfg.StrOpt('bgp_speaker_driver',
help=_("BGP speaker driver class to be instantiated."))
]
BGP_PROTO_CONFIG_OPTS = [
cfg.StrOpt('bgp_router_id',
help=_("32-bit BGP identifier, typically an IPv4 address "
"owned by the system running the BGP DrAgent."))
]
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)

21
test-requirements.txt Normal file
View File

@ -0,0 +1,21 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.2 # Apache-2.0
coverage>=3.6 # Apache-2.0
fixtures<2.0,>=1.3.1 # Apache-2.0/BSD
mock>=1.2 # BSD
python-subunit>=0.0.18 # Apache-2.0/BSD
requests-mock>=0.7.0 # Apache-2.0
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
oslo.concurrency>=3.5.0 # Apache-2.0
testrepository>=0.0.18 # Apache-2.0/BSD
testresources>=0.2.4 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
testscenarios>=0.4 # Apache-2.0/BSD
WebOb>=1.2.3 # MIT
WebTest>=2.0 # MIT
oslotest>=1.10.0 # Apache-2.0
reno>=1.6.2 # Apache2

153
tools/check_i18n.py Normal file
View File

@ -0,0 +1,153 @@
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import compiler
import imp
import os.path
import sys
def is_localized(node):
"""Check message wrapped by _()"""
if isinstance(node.parent, compiler.ast.CallFunc):
if isinstance(node.parent.node, compiler.ast.Name):
if node.parent.node.name == '_':
return True
return False
class ASTWalker(compiler.visitor.ASTVisitor):
def default(self, node, *args):
for child in node.getChildNodes():
child.parent = node
compiler.visitor.ASTVisitor.default(self, node, *args)
class Visitor(object):
def __init__(self, filename, i18n_msg_predicates,
msg_format_checkers, debug):
self.filename = filename
self.debug = debug
self.error = 0
self.i18n_msg_predicates = i18n_msg_predicates
self.msg_format_checkers = msg_format_checkers
with open(filename) as f:
self.lines = f.readlines()
def visitConst(self, node):
if not isinstance(node.value, str):
return
if is_localized(node):
for (checker, msg) in self.msg_format_checkers:
if checker(node):
print('%s:%d %s: %s Error: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
checker.__name__, msg),
file=sys.stderr)
self.error = 1
return
if debug:
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Pass"))
else:
for (predicate, action, msg) in self.i18n_msg_predicates:
if predicate(node):
if action == 'skip':
if debug:
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Pass"))
return
elif action == 'error':
print('%s:%d %s: %s Error: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
predicate.__name__, msg),
file=sys.stderr)
self.error = 1
return
elif action == 'warn':
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Warn: %s" % msg))
return
print('Predicate with wrong action!', file=sys.stderr)
def is_file_in_black_list(black_list, f):
for f in black_list:
if os.path.abspath(input_file).startswith(
os.path.abspath(f)):
return True
return False
def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug):
input_mod = compiler.parseFile(input_file)
v = compiler.visitor.walk(input_mod,
Visitor(input_file,
i18n_msg_predicates,
msg_format_checkers,
debug),
ASTWalker())
return v.error
if __name__ == '__main__':
input_path = sys.argv[1]
cfg_path = sys.argv[2]
try:
cfg_mod = imp.load_source('', cfg_path)
except Exception:
print("Load cfg module failed", file=sys.stderr)
sys.exit(1)
i18n_msg_predicates = cfg_mod.i18n_msg_predicates
msg_format_checkers = cfg_mod.msg_format_checkers
black_list = cfg_mod.file_black_list
debug = False
if len(sys.argv) > 3:
if sys.argv[3] == '-d':
debug = True
if os.path.isfile(input_path):
sys.exit(check_i18n(input_path,
i18n_msg_predicates,
msg_format_checkers,
debug))
error = 0
for dirpath, dirs, files in os.walk(input_path):
for f in files:
if not f.endswith('.py'):
continue
input_file = os.path.join(dirpath, f)
if is_file_in_black_list(black_list, input_file):
continue
if check_i18n(input_file,
i18n_msg_predicates,
msg_format_checkers,
debug):
error = 1
sys.exit(error)

View File

@ -0,0 +1,67 @@
# test-case for check_i18n.py
# python check_i18n.py check_i18n.txt -d
# message format checking
# capital checking
msg = _("hello world, error")
msg = _("hello world_var, error")
msg = _('file_list xyz, pass')
msg = _("Hello world, pass")
# format specifier checking
msg = _("Hello %s world %d, error")
msg = _("Hello %s world, pass")
msg = _("Hello %(var1)s world %(var2)s, pass")
# message has been localized
# is_localized
msg = _("Hello world, pass")
msg = _("Hello world, pass") % var
LOG.debug(_('Hello world, pass'))
LOG.info(_('Hello world, pass'))
raise x.y.Exception(_('Hello world, pass'))
raise Exception(_('Hello world, pass'))
# message need be localized
# is_log_callfunc
LOG.debug('hello world, error')
LOG.debug('hello world, error' % xyz)
sys.append('hello world, warn')
# is_log_i18n_msg_with_mod
LOG.debug(_('Hello world, error') % xyz)
# default warn
msg = 'hello world, warn'
msg = 'hello world, warn' % var
# message needn't be localized
# skip only one word
msg = ''
msg = "hello,pass"
# skip dict
msg = {'hello world, pass': 1}
# skip list
msg = ["hello world, pass"]
# skip subscript
msg['hello world, pass']
# skip xml marker
msg = "<test><t></t></test>, pass"
# skip sql statement
msg = "SELECT * FROM xyz WHERE hello=1, pass"
msg = "select * from xyz, pass"
# skip add statement
msg = 'hello world' + e + 'world hello, pass'
# skip doc string
"""
Hello world, pass
"""
class Msg:
pass

View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
# This script identifies the unit test modules that do not correspond
# directly with a module in the code tree. See TESTING.rst for the
# intended structure.
neutron_path=$(cd "$(dirname "$0")/.." && pwd)
base_test_path=neutron_dynamic_routing/tests/unit
test_path=$neutron_path/$base_test_path
test_files=$(find ${test_path} -iname 'test_*.py')
ignore_regexes=(
"^plugins.*$"
)
error_count=0
ignore_count=0
total_count=0
for test_file in ${test_files[@]}; do
relative_path=${test_file#$test_path/}
expected_path=$(dirname $neutron_path/neutron_dynamic_routing/$relative_path)
test_filename=$(basename "$test_file")
expected_filename=${test_filename#test_}
# Module filename (e.g. foo/bar.py -> foo/test_bar.py)
filename=$expected_path/$expected_filename
# Package dir (e.g. foo/ -> test_foo.py)
package_dir=${filename%.py}
if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
for ignore_regex in ${ignore_regexes[@]}; do
if [[ "$relative_path" =~ $ignore_regex ]]; then
((ignore_count++))
continue 2
fi
done
echo "Unexpected test file: $base_test_path/$relative_path"
((error_count++))
fi
((total_count++))
done
if [ "$ignore_count" -ne 0 ]; then
echo "$ignore_count unmatched test modules were ignored"
fi
if [ "$error_count" -eq 0 ]; then
echo 'Success! All test modules match targets in the code tree.'
exit 0
else
echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
exit 1
fi

5
tools/clean.sh Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
rm -rf */*.deb
rm -rf ./plugins/**/build/ ./plugins/**/dist
rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*

View File

@ -0,0 +1,28 @@
#!/bin/sh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
GEN_CMD=oslo-config-generator
if ! type "$GEN_CMD" > /dev/null; then
echo "ERROR: $GEN_CMD not installed on the system."
exit 1
fi
for file in `ls etc/oslo-config-generator/*`; do
$GEN_CMD --config-file=$file
done
set -x

97
tools/i18n_cfg.py Normal file
View File

@ -0,0 +1,97 @@
import compiler
import re
def is_log_callfunc(n):
"""LOG.xxx('hello %s' % xyz) and LOG('hello')"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_log_i18n_msg_with_mod(n):
"""LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
if not isinstance(n.parent.parent, compiler.ast.Mod):
return False
n = n.parent.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_wrong_i18n_format(n):
"""Check _('hello %s' % xyz)"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Name):
if n.parent.node.name == '_':
return True
return False
"""
Used for check message need be localized or not.
(predicate_func, action, message)
"""
i18n_msg_predicates = [
# Skip ['hello world', 1]
(lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
# Skip {'hellow world', 1}
(lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
# Skip msg['hello world']
(lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
# Skip doc string
(lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
# Skip msg = "hello", in normal, message should more than one word
(lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
# Skip msg = 'hello world' + vars + 'world hello'
(lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
# Skip xml markers msg = "<test></test>"
(lambda n: len(re.compile("</.*>").findall(n.value)) > 0, 'skip', ''),
# Skip sql statement
(lambda n: len(
re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
'skip', ''),
# LOG.xxx()
(is_log_callfunc, 'error', 'Message must be localized'),
# _('hello %s' % xyz) should be _('hello %s') % xyz
(is_wrong_i18n_format, 'error',
("Message format was wrong, _('hello %s' % xyz) "
"should be _('hello %s') % xyz")),
# default
(lambda n: True, 'warn', 'Message might need localized')
]
"""
Used for checking message format. (checker_func, message)
"""
msg_format_checkers = [
# If message contain more than on format specifier, it should use
# mapping key
(lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
"The message shouldn't contain more than one format specifier"),
# Check capital
(lambda n: n.value.split(' ')[0].count('_') == 0 and
n.value[0].isalpha() and
n.value[0].islower(),
"First letter must be capital"),
(is_log_i18n_msg_with_mod,
'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
]
file_black_list = ["./neutron/tests/unit",
"./neutron/openstack",
"./neutron/plugins/bigswitch/tests"]

72
tools/install_venv.py Normal file
View File

@ -0,0 +1,72 @@
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Neutron's development virtualenv
"""
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv
def print_help():
help = """
Neutron development environment setup is complete.
Neutron development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Neutron virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)

View File

@ -0,0 +1,172 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install.")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()

6
tools/pretty_tox.sh Executable file
View File

@ -0,0 +1,6 @@
#! /bin/sh
TESTRARGS=$1
exec 3>&1
status=$(exec 4>&1 >&3; (python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status

307
tools/subunit-trace.py Executable file
View File

@ -0,0 +1,307 @@
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import functools
import os
import re
import sys
import mimeparse
import subunit
import testtools
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
class Starts(testtools.StreamResult):
def __init__(self, output):
super(Starts, self).__init__()
self._output = output
def startTestRun(self):
self._neednewline = False
self._emitted = set()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
super(Starts, self).status(
test_id, test_status,
test_tags=test_tags, runnable=runnable, file_name=file_name,
file_bytes=file_bytes, eof=eof, mime_type=mime_type,
route_code=route_code, timestamp=timestamp)
if not test_id:
if not file_bytes:
return
if not mime_type or mime_type == 'test/plain;charset=utf8':
mime_type = 'text/plain; charset=utf-8'
primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
content_type = testtools.content_type.ContentType(
primary, sub, parameters)
content = testtools.content.Content(
content_type, lambda: [file_bytes])
text = content.as_text()
if text and text[-1] not in '\r\n':
self._neednewline = True
self._output.write(text)
elif test_status == 'inprogress' and test_id not in self._emitted:
if self._neednewline:
self._neednewline = False
self._output.write('\n')
worker = ''
for tag in test_tags or ():
if tag.startswith('worker-'):
worker = '(' + tag[7:] + ') '
if timestamp:
timestr = timestamp.isoformat()
else:
timestr = ''
self._output.write('%s: %s%s [start]\n' %
(timestr, worker, test_id))
self._emitted.add(test_id)
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the test scenarios information (not to
be confused with tempest scenarios) however that's often needed to
identify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 'NaN'
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
stream.write(" %s\n" % line)
def show_outcome(stream, test, print_failures=False, failonly=False):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'fail':
FAILS.append(test)
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif not failonly:
if status == 'success':
stream.write('{%s} %s [%s] ... ok\n' % (
worker, name, duration))
print_attachments(stream, test)
elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % (
worker, name, test['details']['reason'].as_text()))
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
runtime += float(get_duration(test['timestamps']).strip('s'))
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
return num_tests, delta
def print_summary(stream):
stream.write("\n======\nTotals\n======\n")
stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
run_time()))
stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
stream.write(" - Worker %s (%s tests) => %ss\n" %
(w, num, time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
starts = Starts(sys.stdout)
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures,
failonly=args.failonly
))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([starts, outcomes, summary])
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
return 1
if args.post_fails:
print_fails(sys.stdout)
print_summary(sys.stdout)
return (0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
sys.exit(main())

46
tools/tox_install.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/sh
# Many of neutron's repos suffer from the problem of depending on neutron,
# but it not existing on pypi.
# This wrapper for tox's package installer will use the existing package
# if it exists, else use zuul-cloner if that program exists, else grab it
# from neutron master via a hard-coded URL. That last case should only
# happen with devs running unit tests locally.
ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner
neutron_installed=$(echo "import neutron" | python 2>/dev/null ; echo $?)
BRANCH_NAME=master
set -e
CONSTRAINTS_FILE=$1
shift
install_cmd="pip install"
if [ $CONSTRAINTS_FILE != "unconstrained" ]; then
install_cmd="$install_cmd -c$CONSTRAINTS_FILE"
fi
if [ $neutron_installed -eq 0 ]; then
echo "ALREADY INSTALLED" > /tmp/tox_install.txt
echo "Neutron already installed; using existing package"
elif [ -x "$ZUUL_CLONER" ]; then
echo "ZUUL CLONER" > /tmp/tox_install.txt
cwd=$(/bin/pwd)
cd /tmp
$ZUUL_CLONER --cache-dir \
/opt/git \
--branch $BRANCH_NAME \
git://git.openstack.org \
openstack/neutron
cd openstack/neutron
$install_cmd -e .
cd "$cwd"
else
echo "PIP HARDCODE" > /tmp/tox_install.txt
$install_cmd -U -egit+https://git.openstack.org/openstack/neutron@$BRANCH_NAME#egg=neutron
fi
$install_cmd -U $*
exit $?

19
tools/with_venv.sh Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TOOLS=`dirname $0`
VENV=$TOOLS/../.venv
source $VENV/bin/activate && "$@"

70
tox.ini Normal file
View File

@ -0,0 +1,70 @@
[tox]
envlist = py34,py27,pep8,pylint
minversion = 1.6
skipsdist = True
[testenv]
setenv = VIRTUAL_ENV={envdir}
usedevelop = True
install_command =
{toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh
commands =
find . -type f -name "*.py[c|o]" -delete
find . -type d -name "__pycache__" -delete
sh tools/pretty_tox.sh '{posargs}'
# there is also secret magic in pretty_tox.sh which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
[testenv:releasenotes]
# TODO(ihrachys): remove once infra supports constraints for this target
install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages}
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:pep8]
commands = flake8
[testenv:i18n]
commands = python ./tools/check_i18n.py ./neutron-dynamic-routing ./tools/i18n_cfg.py
[testenv:cover]
# TODO(ihrachys): remove once infra supports constraints for this target
install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages}
commands =
python setup.py test --coverage --coverage-package-name=neutron_dynamic_routing --testr-args='{posargs}'
[testenv:venv]
# TODO(ihrachys): remove once infra supports constraints for this target
install_command = pip install -U {opts} {packages}
commands = {posargs}
[testenv:docs]
commands = python setup.py build_sphinx
[flake8]
# E125 continuation line does not distinguish itself from next logical line
# E126 continuation line over-indented for hanging indent
# E128 continuation line under-indented for visual indent
# E129 visually indented line with same indent as next logical line
# E265 block comment should start with #
# H405 multi line docstring summary not separated with an empty line
# TODO(marun) H404 multi line docstring should start with a summary
ignore = E125,E126,E128,E129,E265,H404,H405
show-source = true
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios
[testenv:pylint]
deps =
{[testenv]deps}
pylint
commands =
pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron_dynamic_routing}
[hacking]
import_exceptions = neutron_dynamic_routing._i18n
local-check-factory = neutron.hacking.checks.factory
[testenv:genconfig]
commands = {toxinidir}/tools/generate_config_file_samples.sh