Retire Packaging Deb project repos

This commit is part of a series to retire the Packaging Deb
project. Step 2 is to remove all content from the project
repos, replacing it with a README notification where to find
ongoing work, and how to recover the repo if needed at some
future point (as in
https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project).

Change-Id: Ifedb11bcb9d0651f1d05d25b62de3bc6acc2d9ed
This commit is contained in:
Tony Breeds 2017-09-12 15:42:09 -06:00
parent 708bd97dee
commit ba60414dda
444 changed files with 14 additions and 100877 deletions

View File

@ -1,8 +0,0 @@
[run]
branch = True
source = networking_cisco
omit = networking_cisco/tests/*,networking_cisco/openstack/*
concurrency = greenlet
[report]
ignore_errors = True

32
.gitignore vendored
View File

@ -1,32 +0,0 @@
AUTHORS
build/*
build-stamp
ChangeLog
cover/
covhtml/
dist/
doc/build
*.DS_Store
*.pyc
*.egg-info/
neutron/vcsversion.py
neutron/versioninfo
pbr*.egg/
quantum.egg-info/
quantum/vcsversion.py
quantum/versioninfo
src/
setuptools*.egg/
*.log
*.mo
*.sw?
*~
/.*
!/.coveragerc
!/.gitignore
!/.gitreview
!/.mailmap
!/.pylintrc
!/.testr.conf
*.tar.gz
releasenotes/build

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/networking-cisco.git

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

117
.pylintrc
View File

@ -1,117 +0,0 @@
# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add <file or directory> to the black list. It should be a base name, not a
# path. You may set this option multiple times.
#
# Note the 'openstack' below is intended to match only
# neutron.openstack.common. If we ever have another 'openstack'
# dirname, then we'll need to expand the ignore features in pylint :/
ignore=.git,tests,openstack
[MESSAGES CONTROL]
# NOTE(gus): This is a long list. A number of these are important and
# should be re-enabled once the offending code is fixed (or marked
# with a local disable)
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
locally-disabled,
# "E" Error for important programming issues (likely bugs)
access-member-before-definition,
no-member,
no-method-argument,
no-self-argument,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
arguments-differ,
attribute-defined-outside-init,
bad-builtin,
bad-indentation,
broad-except,
dangerous-default-value,
deprecated-lambda,
duplicate-key,
expression-not-assigned,
fixme,
global-statement,
global-variable-not-assigned,
no-init,
non-parent-init-called,
protected-access,
redefined-builtin,
redefined-outer-name,
signature-differs,
star-args,
super-init-not-called,
unpacking-non-sequence,
unused-argument,
unused-import,
unused-variable,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
old-style-class,
superfluous-parens,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_
[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use openstack.common.jsonutils
json
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems
[REPORTS]
# Tells whether to display a full report or only the messages
reports=no

View File

@ -1,6 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 \
${PYTHON:-python} -m subunit.run discover -t ./ \
${OS_TEST_PATH:-./networking_cisco/tests/unit} $LISTOPT $IDOPTION | cat
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,25 +0,0 @@
If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/networking-cisco
Tox environments provided in networking-cisco:
* py27, py34 - Unit tests run against Mitaka neutron, on different python2.7 and python3.4
* newton - Unit tests run against Newton neutron with python2.7
* master - Unit tests run against master neutron with python2.7
* coverage - provides a report on the test coverage
* compare-coverage - compares coverage reports from before and after the current changes
* pep8 - Checks code against the pep8 and OpenStack hacking rules

View File

@ -1,32 +0,0 @@
Neutron Style Commandments
=======================
- Step 1: Read the OpenStack Style Commandments
http://docs.openstack.org/developer/hacking/
- Step 2: Read on
Neutron Specific Commandments
--------------------------
- [N319] Validate that debug level logs are not translated
- [N320] Validate that LOG messages, except debug ones, have translations
- [N321] Validate that jsonutils module is used instead of json
- [N322] Detect common errors with assert_called_once_with
- [N323] Enforce namespace-less imports for oslo libraries
Creating Unit Tests
-------------------
For every new feature, unit tests should be created that both test and
(implicitly) document the usage of said feature. If submitting a patch for a
bug that had no unit test, a new passing unit test should be added. If a
submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
All unittest classes must ultimately inherit from testtools.TestCase. In the
Neutron test suite, this should be done by inheriting from
neutron.tests.base.BaseTestCase.
All setUp and tearDown methods must upcall using the super() method.
tearDown methods should be avoided and addCleanup calls should be preferred.
Never manually create tempfiles. Always use the tempfile fixtures from
the fixture library to ensure that they are cleaned up.

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,14 +0,0 @@
include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include neutron/db/migration/README
include neutron/db/migration/alembic.ini
include neutron/db/migration/alembic_migrations/script.py.mako
include neutron/db/migration/alembic_migrations/versions/README
recursive-include neutron/locale *
exclude .gitignore
exclude .gitreview
global-exclude *.pyc

14
README Normal file
View File

@ -0,0 +1,14 @@
This project is no longer maintained.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
For ongoing work on maintaining OpenStack packages in the Debian
distribution, please see the Debian OpenStack packaging team at
https://wiki.debian.org/OpenStack/.
For any further questions, please email
openstack-dev@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1,25 +0,0 @@
================
networking-cisco
================
The networking-cisco project's goal is to provide support for Cisco networking
hardware and software in OpenStack deployments. This includes ML2 drivers and
agents for neutron, as well as other pieces of software which interact with
neutron to best utilise your Cisco products with OpenStack.
* Free software: Apache license
* Documentation: http://docwiki.cisco.com/wiki/OpenStack
* Source: http://git.openstack.org/cgit/openstack/networking-cisco
* Bugs: http://bugs.launchpad.net/networking-cisco
Releases and Version Support
----------------------------
From Mitaka forward, networking-cisco is branchless and releases will be made
from master. We have a goal to maintain compatibility with multiple versions of
OpenStack for as long as possible starting from version 4.0.0 which is
compatible with both Mitaka and Newton OpenStack releases.
* 4.X.X Mitaka and Newton onwards
* 3.X.X Mitaka
* 2.X.X Liberty

View File

@ -1,219 +0,0 @@
Testing Neutron
=============================================================
Overview
--------
The unit tests (neutron/test/unit/) are meant to cover as much code as
possible and should be executed without the service running. They are
designed to test the various pieces of the neutron tree to make sure
any new changes don't break existing functionality.
The functional tests (neutron/tests/functional/) are intended to
validate actual system interaction. Mocks should be used sparingly,
if at all. Care should be taken to ensure that existing system
resources are not modified and that resources created in tests are
properly cleaned up.
Development process
-------------------
It is expected that any new changes that are proposed for merge
come with tests for that feature or code area. Ideally any bugs
fixes that are submitted also have tests to prove that they stay
fixed! In addition, before proposing for merge, all of the
current tests should be passing.
Virtual environments
~~~~~~~~~~~~~~~~~~~~
Testing OpenStack projects, including Neutron, is made easier with `DevStack <https://git.openstack.org/cgit/openstack-dev/devstack>`_.
Create a machine (such as a VM or Vagrant box) running a distribution supported
by DevStack and install DevStack there. For example, there is a Vagrant script
for DevStack at https://github.com/bcwaldon/vagrant_devstack.
.. note::
If you prefer not to use DevStack, you can still check out source code on your local
machine and develop from there.
Running unit tests
------------------
There are three mechanisms for running tests: run_tests.sh, tox,
and nose2. Before submitting a patch for review you should always
ensure all test pass; a tox run is triggered by the jenkins gate
executed on gerrit for each patch pushed for review.
With these mechanisms you can either run the tests in the standard
environment or create a virtual environment to run them in.
By default after running all of the tests, any pep8 errors
found in the tree will be reported.
With `run_tests.sh`
~~~~~~~~~~~~~~~~~~~
You can use the `run_tests.sh` script in the root source directory to execute
tests in a virtualenv::
./run_tests -V
With `nose2`
~~~~~~~~~~~
You can use `nose2`_ to run individual tests, as well as use for debugging
portions of your code::
source .venv/bin/activate
pip install nose2
nose2
There are disadvantages to running nose2 - the tests are run sequentially, so
race condition bugs will not be triggered, and the full test suite will
take significantly longer than tox & testr. The upside is that testr has
some rough edges when it comes to diagnosing errors and failures, and there is
no easy way to set a breakpoint in the Neutron code, and enter an
interactive debugging session while using testr.
It is also possible to use nose2's predecessor, `nose`_, to run the tests::
source .venv/bin/activate
pip install nose
nosetests
nose has one additional disadvantage over nose2 - it does not
understand the `load_tests protocol`_ introduced in Python 2.7. This
limitation will result in errors being reported for modules that
depend on load_tests (usually due to use of `testscenarios`_).
.. _nose2: http://nose2.readthedocs.org/en/latest/index.html
.. _nose: https://nose.readthedocs.org/en/latest/index.html
.. _load_tests protocol: https://docs.python.org/2/library/unittest.html#load-tests-protocol
.. _testscenarios: https://pypi.python.org/pypi/testscenarios/
With `tox`
~~~~~~~~~~
Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual
environments for running test cases. It uses `Testr`_ for managing the running
of the test cases.
Tox handles the creation of a series of `virtualenvs`_ that target specific
versions of Python (2.6, 2.7, 3.3, etc).
Testr handles the parallel execution of series of test cases as well as
the tracking of long-running tests and other things.
Running unit tests is as easy as executing this in the root directory of the
Neutron source code::
tox
To run functional tests that do not require sudo privileges or
specific-system dependencies::
tox -e functional
To run all the functional tests in an environment that has been configured
by devstack to support sudo and system-specific dependencies::
tox -e dsvm-functional
For more information on the standard Tox-based test infrastructure used by
OpenStack and how to do some common test/debugging procedures with Testr,
see this wiki page:
https://wiki.openstack.org/wiki/Testr
.. _Testr: https://wiki.openstack.org/wiki/Testr
.. _tox: http://tox.readthedocs.org/en/latest/
.. _virtualenvs: https://pypi.python.org/pypi/virtualenv
Running individual tests
~~~~~~~~~~~~~~~~~~~~~~~~
For running individual test modules or cases, you just need to pass
the dot-separated path to the module you want as an argument to it.
For executing a specific test case, specify the name of the test case
class separating it from the module path with a colon.
For example, the following would run only the JSONV2TestCase tests from
neutron/tests/unit/test_api_v2.py::
$ ./run_tests.sh neutron.tests.unit.test_api_v2.JSONV2TestCase
or::
$ tox -e py27 neutron.tests.unit.test_api_v2.JSONV2TestCase
Adding more tests
~~~~~~~~~~~~~~~~~
Neutron has a fast growing code base and there is plenty of areas that
need to be covered by unit and functional tests.
To get a grasp of the areas where tests are needed, you can check
current coverage by running::
$ ./run_tests.sh -c
Debugging
---------
By default, calls to pdb.set_trace() will be ignored when tests
are run. For pdb statements to work, invoke run_tests as follows::
$ ./run_tests.sh -d [test module path]
It's possible to debug tests in a tox environment::
$ tox -e venv -- python -m testtools.run [test module path]
Tox-created virtual environments (venv's) can also be activated
after a tox run and reused for debugging::
$ tox -e venv
$ . .tox/venv/bin/activate
$ python -m testtools.run [test module path]
Tox packages and installs the neutron source tree in a given venv
on every invocation, but if modifications need to be made between
invocation (e.g. adding more pdb statements), it is recommended
that the source tree be installed in the venv in editable mode::
# run this only after activating the venv
$ pip install --editable .
Editable mode ensures that changes made to the source tree are
automatically reflected in the venv, and that such changes are not
overwritten during the next tox run.
Post-mortem debugging
~~~~~~~~~~~~~~~~~~~~~
Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure
that the debugger .post_mortem() method will be invoked on test failure::
$ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path]
Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based
visual debugger for Python which let you inspect variables, the stack,
and breakpoints in a very visual way, keeping a high degree of compatibility
with pdb::
$ ./.venv/bin/pip install pudb
$ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path]
References
==========
.. [#pudb] PUDB debugger:
https://pypi.python.org/pypi/pudb

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,15 +0,0 @@
======================
Enabling in Devstack
======================
1. Download DevStack
2. Add this repo as an external repository::
> cat local.conf
[[local|localrc]]
enable_plugin networking-cisco https://git.openstack.org/openstack/networking-cisco.git
enable_service net-cisco
3. run ``stack.sh``

View File

@ -1,19 +0,0 @@
======================
Enable CSR1kv script for CSR1kv plugins in Devstack
======================
1. Download DevStack
2. To enable CSR1kv Routing-aaS, in file localrc or local.conf in the root
DevStack directory, add networking-cisco repo as an external repository and
enable q-ciscorouter and ciscocfgagent.
Refer to following examples for the settings:
local.conf.csr1kv_and_n1kv
localrc.csr1kv_and_n1kv
3. To enable CSR1kv FWaaS, in file localrc or local.conf in the root DevStack
directory, include the following line:
enable_service cisco-fwaas
4. run "stack.sh"

View File

@ -1,827 +0,0 @@
# Neutron Cisco plugin
# ---------------------------
# Save trace setting
CISCO_XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Name of cisco router plugin
Q_CISCO_ROUTER_PLUGIN=${Q_CISCO_ROUTER_PLUGIN:-}
# Indicate if a VSM instance should be created
Q_CISCO_PLUGIN_DEVSTACK_VSM=${Q_CISCO_PLUGIN_DEVSTACK_VSM:-True}
# Specify the VSM image file
Q_CISCO_PLUGIN_VSM_ISO_IMAGE=${Q_CISCO_PLUGIN_VSM_ISO_IMAGE:-}
# Scecify the VSM parameters
Q_CISCO_PLUGIN_VSM_IP=${Q_CISCO_PLUGIN_VSM_IP:-192.168.168.2}
# Specify the VSM username
Q_CISCO_PLUGIN_VSM_USERNAME=${Q_CISCO_PLUGIN_VSM_USERNAME:-admin}
# Specify the VSM passward for above username
Q_CISCO_PLUGIN_VSM_PASSWORD=${Q_CISCO_PLUGIN_VSM_PASSWORD:-Sfish123}
# Specify the uVEM image/module
Q_CISCO_PLUGIN_UVEM_DEB_IMAGE=${Q_CISCO_PLUGIN_UVEM_DEB_IMAGE:-}
# Specify the uVEM integration bridge name
Q_CISCO_PLUGIN_INTEGRATION_BRIDGE=${Q_CISCO_PLUGIN_INTEGRATION_BRIDGE:-br-int}
# Specify the host management interface required by uVEM
Q_CISCO_PLUGIN_HOST_MGMT_INTF=${Q_CISCO_PLUGIN_HOST_MGMT_INTF:-eth0}
# Specify the upstream (public) interface required by uVEM
Q_CISCO_PLUGIN_UPSTREAM_INTF=${Q_CISCO_PLUGIN_UPSTREAM_INTF:-}
# Specify if tunneling is enabled
Q_CISCO_PLUGIN_ENABLE_TUNNELING=${Q_CISCO_PLUGIN_ENABLE_TUNNELING:-True}
# Specify the VXLAN range
Q_CISCO_PLUGIN_VXLAN_ID_RANGES=${Q_CISCO_PLUGIN_VXLAN_ID_RANGES:-5000:10000}
# Specify the VLAN range
Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094}
# Specify if VSM should be restarted
Q_CISCO_PLUGIN_RESTART_VSM=${Q_CISCO_PLUGIN_RESTART_VSM:-no}
# Specify ncclient package information
NCCLIENT_DIR=$DEST/ncclient
NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
# This routine put a prefix on an existing function name
function _prefix_function {
declare -F $1 > /dev/null || die "$1 doesn't exist"
eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)"
}
function _has_cisco_router_plugin {
if [[ "$Q_CISCO_ROUTER_PLUGIN" != "" ]]; then
return 0
fi
return 1
}
# Prefix openvswitch plugin routines with "ovs" in order to differentiate from
# cisco plugin routines. This means, ovs plugin routines will coexist with cisco
# plugin routines in this script.
source $TOP_DIR/lib/neutron_plugins/openvswitch
#_prefix_function net_neutron_plugin_configure_plugin_agent ovs
#_prefix_function net_neutron_plugin_configure_service ovs
# Check the version of the installed ncclient package
function check_ncclient_version {
python << EOF
version = '$NCCLIENT_VERSION'
import sys
try:
import pkg_resources
import ncclient
module_version = pkg_resources.get_distribution('ncclient').version
if version != module_version:
sys.exit(1)
except:
sys.exit(1)
EOF
}
# Install the ncclient package
function install_ncclient {
sudo -E pip install ncclient
}
# Check if the required version of ncclient has been installed
function is_ncclient_installed {
# Check if the Cisco ncclient repository exists
if [[ ! -d $NCCLIENT_DIR ]]; then
return 1
fi
# Check if the ncclient is installed with the right version
if ! check_ncclient_version; then
return 1
fi
return 0
}
function _configure_cisco_router_plugin {
# Install a known compatible ncclient from the Cisco repository if necessary
if ! is_ncclient_installed; then
install_ncclient
fi
}
# Return the current VSM state
function _get_vsm_state {
sudo virsh list --all | awk '(NR > 2) {if ($2 == "DEVSTACK_VSM") {$1="";$2="";print;}}'
}
# This routine keeps contacting the VSM in 10s interval until it responds
function _wait_for_vsm_to_comeup {
echo "Wait for the VSM to come up; This may take a while"
sh -c "while ! http_proxy= curl -s -m 5 http://$2:$3@$1; do sleep 10; done" &> /dev/null
}
# Check if the VSM console is on
function _is_vsm_console_on {
local vnc_no=$(sudo virsh vncdisplay DEVSTACK_VSM 2> /dev/null)
local running=$(ps --no-headers -o cmd -C krdc | sed -n 's/krdc localhost\(:\d*\)/\1/p' | awk -v vno=$vnc_no '$1 == vno {print}')
if [[ -n $running ]]; then
return 0
else
return 1
fi
}
# Open the VSM console
function _open_vsm_console {
vnc_no=$(sudo virsh vncdisplay DEVSTACK_VSM)
krdc localhost$vnc_no&
# remove the process from the current job so that it's not killed at the
# completion of stack.sh
disown %%
}
# Close the VSM console
function _close_vsm_console {
local vnc_no=$(sudo virsh vncdisplay DEVSTACK_VSM 2> /dev/null)
local console_pid=$(ps --no-headers -o pid,cmd -C krdc | sed -n 's/\(\d*\) krdc localhost\(:\d*\)/\1 \2/p' | awk -v vno=$vnc_no '$2 == vno {print $1}')
if [[ -n $console_pid ]]; then
kill $console_pid
fi
}
function _repackage_iso_with_ovfenv {
local vsm_tap_ip=$1
local vsm_ip=$2
local vsm_iso_image=$3
local ovf_env_file=$DATA_DIR/neutron/cisco/ovf-env.xml
if [[ -f $ovf_env_file ]]; then
rm $ovf_env_file
fi
cat > $ovf_env_file <<-EOF
<?xml version="1.0" encoding="UTF-8"?>
<Environment
xmlns="http://schemas.dmtf.org/ovf/environment/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
xmlns:ve="http://www.vmware.com/schema/ovfenv"
oe:id="">
<PlatformSection>
<Kind>KVM</Kind>
<Locale>en</Locale>
</PlatformSection>
<PropertySection>
<Property oe:key="DomainId" oe:value="1"/>
<Property oe:key="EnableTelnet" oe:value="True"/>
<Property oe:key="GatewayIpV4" oe:value="$vsm_tap_ip"/>
<Property oe:key="HostName" oe:value="OSVSM"/>
<Property oe:key="ManagementIpV4" oe:value="$vsm_ip"/>
<Property oe:key="ManagementIpV4Subnet" oe:value="255.255.255.0"/>
<Property oe:key="OvfDeployment" oe:value="installer"/>
<Property oe:key="SvsMode" oe:value="L3"/>
<Property oe:key="Password" oe:value="$Q_CISCO_PLUGIN_VSM_PASSWORD"/>
<Property oe:key="HARole" oe:value="standalone"/>
</PropertySection> \n'
</Environment> \n'
EOF
local mntdir=$(mktemp -d)
local ddir=$(mktemp -d)
sudo /bin/mount -o loop -t iso9660 $Q_CISCO_PLUGIN_VSM_ISO_IMAGE $mntdir
cp -r $mntdir/* $ddir
sudo /bin/umount $mntdir
cp $ovf_env_file $ddir
if [[ -f $ddir/isolinux/isolinux.bin ]]; then
(cd $ddir; sudo /usr/bin/mkisofs -uid 0 -gid 0 -J -R -A Cisco_Nexus_1000V_VSM -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -o $vsm_iso_image .)
else
(cd $ddir; sudo /usr/bin/mkisofs -uid 0 -gid 0 -J -R -A Cisco_Nexus_1000V_VSM -b boot/grub/iso9660_stage1_5 -no-emul-boot -boot-load-size 4 -boot-info-table -o $vsm_iso_image .)
fi
}
function _get_vsm_net {
first=$(echo $Q_CISCO_PLUGIN_VSM_IP | sed -n 's/^\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.[0-9]\{1,3\}$/\1/p')
second=$(echo $Q_CISCO_PLUGIN_VSM_IP | sed -n 's/^\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.[0-9]\{1,3\}$/\2/p')
third=$(echo $Q_CISCO_PLUGIN_VSM_IP | sed -n 's/^\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.\([0-9]\{1,3\}\)\.[0-9]\{1,3\}$/\3/p')
if [[ -z $first || -z $second || -z $third ]]; then
die $LINENO "Incorrect network address: $Q_CISCO_PLUGIN_VSM_IP"
fi
declare -i f=$first
declare -i s=$second
declare -i t=$third
if (( $f > 255 || $s > 255 || $t > 255 )); then
die $LINENO "Incorrect network address: $Q_CISCO_PLUGIN_VSM_IP"
fi
echo $first.$second.$third
}
# This routine retrieves the domain id and the primary mac address from the VSM
function _get_vsm_info {
# Node: no code with output should be added here. The caller depends on the
# output from the below expect output
vsm_ip_addr=$1 user=$2 passwd=$3 expect -c '
spawn /usr/bin/telnet $env(vsm_ip_addr)
set timeout 240
expect {
-re "Trying.*Connected.*Escape.*Nexus .*login: " {
send "$env(user)\n"
exp_continue
#look for the password prompt
}
"*?assword:*" {
send "$env(passwd)\n"
}
}
expect {
-re ".*# " {
send "show interface control 0\n"
expect -indices -re ".*# "
puts [string range $expect_out(buffer) \
0 [expr $expect_out(0,start) - 1]]
send "show svs domain\n"
expect -indices -re ".*# "
puts [string range $expect_out(buffer) \
0 [expr $expect_out(0,start) - 1]]
}
}' | sed -n -e 's/ Hardware: Ethernet, address: \(..\)\(..\)\.\(..\)\(..\)\.\(..\)\(..\).*/\1:\2:\3:\4:\5:\6/p' -e 's/ Domain id: *\(\d*\)/\1/p'
# Node: no code with output should be added here. The caller depends on the
# output from the above expect output
}
function _configure_vsm {
install_package expect
vsm_ip_addr=$1 user=$2 passwd=$3 expect -c '
spawn /usr/bin/telnet $env(vsm_ip_addr)
expect {
-re "Trying.*Connected.*Escape.*Nexus .*login: " {
send "$env(user)\n"
exp_continue
#look for the password prompt
}
"*?assword:*" {
send "$env(passwd)\n"
}
}
expect -re ".*# "
send "config te\n"
expect -re ".*# "
send "feature http-server\n"
expect -re ".*# "
send "feature network-segmentation-manager\n"
expect -re ".*# "
send "feature segmentation\n"
expect -re ".*# "
send "port-profile test-profile\n"
expect -re ".*# "
send "no shut\n"
expect -re ".*# "
send "state enabled\n"
expect -re ".*# "
send "publish port-profile\n"
expect -re ".*# "
send "exit\n"
expect -re ".*# "
send "port-profile dhcp_pp\n"
expect -re ".*# "
send "no shut\n"
expect -re ".*# "
send "state enabled\n"
expect -re ".*# "
send "publish port-profile\n"
expect -re ".*# "
send "end\n"
expect -re ".*# "
send "exit\n"
'
}
function send_vsm_arp {
set +o xtrace
sudo sysctl -w net.ipv4.ip_nonlocal_bind=1
while true; do
sudo arping -U -A -c 3 -q -I $Q_CISCO_PLUGIN_HOST_MGMT_INTF $Q_CISCO_PLUGIN_VSM_IP
sleep 5
done
}
# This routine creates a virtual machine that runs VSM
function _create_devstack_vsm {
local vsm_image_file=$DATA_DIR/neutron/cisco/DEVSTACK_VSM.img
local vsm_iso_image
local vsm_xml_file=$DATA_DIR/neutron/cisco/DEVSTACK_VSM.xml
local vsm_arp_proc=$DATA_DIR/neutron/cisco/vsm_arp_proc
local vsm_ip
local vsm_tap_ip
local vsm_net
local vsm_state
local recreate=no
local config_vsm=False
# Intall krdc package. krdc is used to connect to the VSM desktop/console
install_package krdc
# Ignore all errors for this function (as latest DevStack will abort on any error)
set +o errexit
if [[ -z "$Q_CISCO_PLUGIN_VSM_ISO_IMAGE" ]]; then
die $LINENO "Please specify your VSM iso image in the localrc/local.conf file!"
fi
if [[ ! -f "$Q_CISCO_PLUGIN_VSM_ISO_IMAGE" ]]; then
die $LINENO "Can't find the VSM iso image file $Q_CISCO_PLUGIN_VSM_ISO_IMAGE!"
fi
vsm_iso_image=$DATA_DIR/neutron/cisco/$(basename $Q_CISCO_PLUGIN_VSM_ISO_IMAGE)
if [[ ! -d $DATA_DIR/neutron/cisco ]]; then
mkdir -p $DATA_DIR/neutron/cisco
fi
vsm_net=$(_get_vsm_net)
declare -i vsm_ip=${Q_CISCO_PLUGIN_VSM_IP/#$vsm_net./}
local vsm_tap_ip
if [[ $vsm_ip != 1 ]]; then
vsm_tap_ip=${vsm_net}.1
else
vsm_tap_ip=${vsm_net}.2
fi
vsm_state=$(_get_vsm_state)
# Check if the vsm image is changed
if [[ -f $vsm_xml_file ]]; then
matched=$(grep -c -m 1 "<source file='$vsm_iso_image'/>" $vsm_xml_file)
# The image file is changed; the VM needs to be recreated
if [[ "$matched" == "0" ]]; then
recreate=yes
fi
else
recreate=yes
fi
# In case of restart or recreate, kill the console and destroy the VM
if [[ ( "$vsm_state" == " running" && "$Q_CISCO_PLUGIN_RESTART_VSM" == "yes" ) || "$recreate" == "yes" ]]; then
_close_vsm_console
sudo virsh destroy DEVSTACK_VSM
if [[ "$recreate" == "yes" ]]; then
sudo virsh undefine DEVSTACK_VSM
rm -f $vsm_iso_image
rm -f $vsm_image_file
fi
fi
vsm_state=$(_get_vsm_state)
# VSM doesn't exist; create one
if [[ -z $vsm_state ]]; then
local vsm_uuid=$(uuidgen -t)
local libvirt_type=$LIBVIRT_TYPE
config_vsm=True
# Prepare for the iso image with ovf-env.xml inserted
if [[ ! -f $vsm_iso_image ]]; then
_repackage_iso_with_ovfenv $vsm_tap_ip $Q_CISCO_PLUGIN_VSM_IP $vsm_iso_image
fi
# Create the VSM disk image file
if [[ ! -f $vsm_image_file ]]; then
sudo qemu-img create $vsm_image_file 8G
fi
# Determine the libvirt type
if [[ "$libvirt_type" == "kvm" ]]; then
sudo modprobe kvm || true
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU for VSM"
libvirt_type=qemu
fi
fi
# Note, mac addresses for NICs are hard-coded. VSM seems to take MAC
# addresses in certain ranges. Therefore, random-generated MAC addresses
# may not work.
cat > $vsm_xml_file <<-EOF
<domain type='$libvirt_type'>
<name>DEVSTACK_VSM</name>
<uuid>$vsm_uuid</uuid>
<memory unit='KiB'>2048000</memory>
<currentMemory unit='KiB'>1024000</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
</os>
<features> <acpi/> <pae/> </features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>destroy</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='raw'/>
<source file='$vsm_image_file'/>
<target dev='hda' bus='ide'/>
<alias name='ide0-0-0'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='$vsm_iso_image'/>
<target dev='hdb' bus='ide'/>
<readonly/>
<alias name='ide0-0-1'/>
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
</disk>
<controller type='usb' index='0'>
<alias name='usb0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='ide' index='0'>
<alias name='ide0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
<interface type='bridge'>
<mac address='0e:1f:35:ab:45:2e'/>
<source bridge='ds-vsm-bridge'/>
<target dev='ds-vsm-vnet0'/>
<model type='e1000'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='0e:1f:35:ab:45:3e'/>
<source bridge='ds-vsm-bridge'/>
<target dev='ds-vsm-vnet1'/>
<model type='e1000'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='0e:1f:35:ab:45:4e'/>
<source bridge='ds-vsm-bridge'/>
<target dev='ds-vsm-vnet2'/>
<model type='e1000'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='5900' autoport='yes' listen='0.0.0.0' keymap='en-us'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</memballoon>
</devices>
</domain>
EOF
sudo virsh define $vsm_xml_file
vsm_state=$(_get_vsm_state)
fi
if [[ "$vsm_state" == " shut off" ]]; then
config_vsm=True
# Create the VSM bridge
sudo brctl addbr ds-vsm-bridge
sudo ip link set ds-vsm-bridge up
# Create a veth pair for communication between the host and the VSM
sudo ip link add tap-ds-vsm type veth peer name ds-vsm-veth
sudo ip link set tap-ds-vsm up
sudo ip link set tap-ds-vsm promisc on
sudo ip link set ds-vsm-veth up
sudo ip link set ds-vsm-veth promisc on
sudo brctl addif ds-vsm-bridge tap-ds-vsm
sudo ip -4 addr add $vsm_tap_ip/24 dev ds-vsm-veth
if sudo virsh start DEVSTACK_VSM; then
_open_vsm_console
fi
_wait_for_vsm_to_comeup $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD
# In multi-host environment, send gratuitous arp to the management
# network so that the compute nodes can reach the VSM
MULTI_HOST=`trueorfalse False $MULTI_HOST`
if [[ "$MULTI_HOST" == "True" ]]; then
local send_arp=True
if [[ -e $vsm_arp_proc ]]; then
local vsm_arp_proc_pid=$(cat $vsm_arp_proc | cut -d" " -f1)
local vsm_ipaddr=$(cat $vsm_arp_proc | cut -d" " -f2)
local cmd = $(ps --no-headers -p $vsm_arp_proc_pid -o cmd)
if [[ -n $cmd ]]; then
if [[ $vsm_ipaddr != $Q_CISCO_PLUGIN_VSM_IP ]]; then
kill $vsm_arp_proc_pid
else
send_arp=False
fi
fi
fi
if [[ $send_arp == "True" ]]; then
send_vsm_arp&
disown %%
echo "$! $Q_CISCO_PLUGIN_VSM_IP" > $vsm_arp_proc
fi
fi
else
if ! _is_vsm_console_on; then
_open_vsm_console
fi
fi
if [[ "$config_vsm" == "True" ]]; then
_configure_vsm $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD
fi
# Turn exit on error, back on
set -o errexit
}
function write_n1kv_conf {
local n1kv_cfg_file=$1
sudo cat > $n1kv_cfg_file <<-EOF
# This is a sample N1KV configurtion file.
# <n1kv.conf> file contains all the configuration parameters for UVEM operation.
# Please find below a brief explanation of these parameters and their meaning.
# Optional Parameters and Default Values of parameters are explicitly stated.
# Note:
# a) Mandatory parameters are needed for proper UVEM operation.
# N1KV DP/DPA should start even if these are not specified.
# But there will be functional impact. For eg: in VSM connectivity
# b)For most of the mandatory parameters, you can use 'vemcmd' configuration mode.
# But to be persistent, please edit this configuration file.
#
#<vsm-connection-params>
#
# TAG: switch-domain
# Description:
# Optional: No
# Default: 1
switch-domain 1
# TAG: l3control-ipaddr
# Description: IP Address of VSM's Control I/F
# Optional: No
# Default: n/a
l3control-ipaddr 1.2.3.4
# TAG: system-primary-mac
# Description: MAC address of VSM's Control I/F
# Optional: No
# Default: n/a
# system-primary-mac -INTF
# TAG: host-mgmt-intf
# Description: Management interface of the Host
# Optional: No (Even if not on N1KV, we need this
# for Host Identification on VSM).
# Default: n/a
host-mgmt-intf eth1
#
#<system-port-profile-Info>
# Description: System Port Profiles.
# Optional: Yes (If there are no System Interfaces: Mgmt I/F etc)
#
#Trunk Profile Format
#profile <name> trunk <vlan>
#profile <name> native-vlan <vlan>
#profile <name> mtu <mtu-size>
#
#Access Profile
#profile <name> access <vlan>
#profile <name> mtu <mtu-size>
#<Port-Profile Mapping>
# Description: Port-Profile mapping for all UVEM managed Interfaces.
# Optional: Uplinks: NO. System-Veth: NO.
# : Non-System Veth: YES. (Assume it will be populated by 'libvirt')
#
# Format:
# phys <port-name> profile <profile-name>
# virt <port-name> profile <profile-name>
# TBD: For uplinks UUID also need to be specified.
#phys eth1 profile sys-uplink
#phys eth2 profile uplink2
# <host-uuid>
# Description: Host UUID
# Optional : YES. If not specified UVEM would pick host UUID using 'dmidecode'.
# host-uuid <host-uuid>
# <dvswitch-uuid>
# Description: N1KV DVS UUID. Not to be confused with Open VSwitch UUID
# Optional : YES.
# dvswitch-uuid <sw-uuid>
# TBD
# <log-path>
# Description: Log Directory Path for DP/DPA
# Optional: YES.
# Default:
# Format:
# log-path:/opt/cisco/n1kv/logs
# <uvem-ovs-brname>
#
# Description: Default Open VSwitch Bridge Name
# Optional: YES.
# Default: n1kvdvs
# Format:
# uvem-ovs-brname n1kvdvs
uvem-ovs-brname br-int
EOF
}
function n1k_iniset {
local file=$1
local option=$2
local value=$3
if ! grep -q "^$option" "$file"; then
# Add at the end
echo "option does not exist-add to bottom"
echo -e "\n$option $value" >>"$file"
else
# Replace it
echo "n1k_iniset:Replace it $option $value $file"
sed -i "s/^$option.*/$option $value/" "$file"
fi
}
# Configure/Install the uVEM
function _configure_uvem {
# Specify uVEM configuration information
local N1KV_CONF_DIR=/etc/n1kv
local N1KV_CONF=$N1KV_CONF_DIR/n1kv.conf
local UVEM_LOCAL_DIR=$TOP_DIR/files/images/cisco
install_package expect
# Put config files in ``N1KV_CONF_DIR``
if [[ ! -d $N1KV_CONF_DIR ]]; then
sudo mkdir -p $N1KV_CONF_DIR
sudo chown $STACK_USER $N1KV_CONF_DIR
fi
local n1kv_temp_file=`mktemp`
if [[ ! -f $N1KV_CONF ]]; then
write_n1kv_conf $n1kv_temp_file
else
cp $N1KV_CONF $n1kv_temp_file
fi
# install a route to VSM on a compute node
if ! is_service_enabled q-svc ; then
local vsm_net=$(_get_vsm_net)
sudo ip route add ${vsm_net}.0/24 dev $Q_CISCO_PLUGIN_HOST_MGMT_INTF
if [[ $? != 0 && $? != 2 ]]; then
die $LINENO "Cannot add route to the VSM"
fi
fi
# Wait for VSM to come up before requesting information from it
_wait_for_vsm_to_comeup $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD
local vsm_info=($(_get_vsm_info $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD))
if [[ "${vsm_info[1]}" != "" ]]; then
n1k_iniset $n1kv_temp_file switch-domain ${vsm_info[1]}
fi
if [[ "$Q_CISCO_PLUGIN_VSM_IP" != "" ]]; then
n1k_iniset $n1kv_temp_file l3control-ipaddr $Q_CISCO_PLUGIN_VSM_IP
fi
if [[ "$Q_CISCO_PLUGIN_HOST_MGMT_INTF" != "" ]]; then
n1k_iniset $n1kv_temp_file host-mgmt-intf $Q_CISCO_PLUGIN_HOST_MGMT_INTF
fi
if [[ "$Q_CISCO_PLUGIN_UPSTREAM_INTF" != "" ]]; then
n1k_iniset $n1kv_temp_file phys "$Q_CISCO_PLUGIN_UPSTREAM_INTF profile sys-uplink"
# Make sure we add it to br-int as well, and force it up
sudo ovs-vsctl -- --may-exist add-port br-int $Q_CISCO_PLUGIN_UPSTREAM_INTF
sudo ifconfig $Q_CISCO_PLUGIN_UPSTREAM_INTF up
fi
if [[ "$Q_CISCO_PLUGIN_INTEGRATION_BRIDGE" != "" ]]; then
n1k_iniset $n1kv_temp_file uvem-ovs-brname $Q_CISCO_PLUGIN_INTEGRATION_BRIDGE
fi
sudo cp $n1kv_temp_file $N1KV_CONF
sudo chmod a+r $N1KV_CONF
rm $n1kv_temp_file
#copy the uVEM image
if [[ -z "$Q_CISCO_PLUGIN_UVEM_DEB_IMAGE" ]]; then
die $LINENO "Please specify your UVEM image in the localrc/local.conf file!"
fi
if [[ ! -f "$Q_CISCO_PLUGIN_UVEM_DEB_IMAGE" ]]; then
die $LINENO "Can't find the UVEM image file $Q_CISCO_PLUGIN_UVEM_DEB_IMAGE!"
fi
local uvem_image=`basename $Q_CISCO_PLUGIN_UVEM_DEB_IMAGE`
if [[ ! -f $UVEM_LOCAL_DIR/$uvem_image ]]; then
mkdir -p $UVEM_LOCAL_DIR
if sudo dpkg -s nexus1000v; then
sudo dpkg -r nexus1000v
fi
if [[ -e $UVEM_LOCAL_DIR/*.deb ]]; then
rm $UVEM_LOCAL_DIR/*deb
fi
cp $Q_CISCO_PLUGIN_UVEM_DEB_IMAGE $UVEM_LOCAL_DIR
#install the uVEM
install_package libnl1
sudo dpkg -i $UVEM_LOCAL_DIR/$uvem_image
else
#restart in case of change in the VSM configuration
sudo /etc/init.d/n1kv restart
fi
}
# Configure n1kv plugin
function _configure_n1kv_subplugin {
local cisco_cfg_file=$1
# if Embedded VSM is deployed, launch the VSM
if [[ "$Q_CISCO_PLUGIN_DEVSTACK_VSM" == "True" ]]; then
_create_devstack_vsm
fi
iniset $cisco_cfg_file CISCO_N1K default_policy_profile test-profile
iniset $cisco_cfg_file CISCO_N1K restrict_network_profiles False
# Configure uVEM on compute nodes
if is_service_enabled n-cpu; then
_configure_uvem
fi
}
function net_neutron_plugin_configure_plugin_agent {
# Assuming q-agt will be enabled on all the compute nodes
# uVEM is only configured/installed with q-agt enabled
_configure_uvem
}
function net_neutron_plugin_configure_service {
local cisco_cfg_file
cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
if _has_n1kv_subplugin; then
_configure_n1kv_subplugin $cisco_cfg_file
fi
if _has_cisco_router_plugin; then
_configure_cisco_router_plugin
fi
}
function _has_n1kv_subplugin {
local subplugin
for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
if [[ "$subplugin" == "n1kv" ]]; then
return 0
fi
done
return 1
}
# Restore xtrace
$XTRACE

View File

@ -1,43 +0,0 @@
# cisco-fwaas
# -------------
# This file implements functions required to configure the Cisco FWaaS drivers
# for use with DevStack. To include this file, specify the following
# variables in localrc/local.conf:
#
# * enable_service cisco-fwaas
#
# This cannot be used with the q-l3 or q-fwaas services and should be used with
# the ciscocfgagent and q-ciscorouter services.
# Save trace setting
CSR_FWAAS_XTRACE=$(set +o | grep xtrace)
set +o xtrace
CISCO_FWAAS_PLUGIN=neutron_fwaas.services.firewall.plugins.cisco.cisco_fwaas_plugin.CSRFirewallPlugin
function install_cisco_fwaas {
git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH
setup_develop $NEUTRON_FWAAS_DIR
}
function configure_cisco_fwaas {
echo "Checking preconditions for Cisco FWaaS ..."
if is_service_enabled q-fwaas q-l3; then
echo "Cannot use cisco-fwaas service when q-fwaas or q-l3 are enabled."
echo "Aborting..."
die $LINENO "Cannot use cisco-fwaas with q-fwaas or q-l3. Exiting!"
fi
_neutron_service_plugin_class_add $CISCO_FWAAS_PLUGIN
}
function start_cisco_fwaas {
$NEUTRON_BIN_DIR/neutron-db-manage --subproject neutron-fwaas --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
}
function cisco_fwaas_stop {
:
}
# Restore xtrace
$CSR_FWAAS_XTRACE

View File

@ -1,275 +0,0 @@
#!/usr/bin/env bash
#
# lib/neutron
# functions - functions specific to neutron
# Dependencies:
# ``functions`` file
# ``DEST`` must be defined
# ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# - net_configure_neutron
# - net_start_neutron_agents
# - net_create_neutron_initial_network
#
# ``unstack.sh`` calls the entry points in this order:
#
# - net_stop_neutron
# Neutron Networking
# ------------------
# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want
# to run Neutron on this host, make sure that q-svc is also in
# ``ENABLED_SERVICES``.
#
# See "Neutron Network Configuration" below for additional variables
# that must be set in localrc for connectivity across hosts with
# Neutron.
#
# With Neutron networking the NETWORK_MANAGER variable is ignored.
# Settings
# --------
# Set up default directories
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
count=0
Q_CISCO_CSR1KV_SETUP_SCRIPT_DIR=${Q_CISCO_CSR1KV_SETUP_SCRIPT_DIR:-$DIR_CISCO/devstack/csr1kv}
# Default Neutron Plugin
Q_PLUGIN=${Q_PLUGIN:-cisco}
# Default Neutron Port
Q_PORT=${Q_PORT:-9696}
# Default protocol
Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
# RHEL's support for namespaces requires using veths with ovs
Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
# nova vif driver that all plugins should use
Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
Q_CISCO_MGMT_CFG_AGENT_IP=10.0.100.2
# Enable ASR1K
Q_CISCO_ASR1K_ENABLED=${Q_CISCO_ASR1K_ENABLED:-True}
# MySQL info
MYSQL_USER=${MYSQL_USER:root}
MYSQL_PASSWORD=${MYSQL_PASSWORD:password}
source $DIR_CISCO/devstack/csr1kv/cisco
source $DIR_CISCO/devstack/csr1kv/cisco_router
source $DIR_CISCO/devstack/csr1kv/ciscocfgagent
# Cisco Routing Service Plugin functions
# ---------------------------------
# Use security group or not
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Functions
# ---------
# Test if any Neutron services are enabled
# is_neutron_enabled
function is_neutron_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
return 1
}
function configure_cisco_csr_router {
net_create_neutron_accounts
net_configure_neutron
net_start_neutron_agents
Q_L3_ENABLED="False"
}
function start_cisco_csr_router {
if [[ "$count" == 0 ]];then
if [[ "$Q_CISCO_ASR1K_ENABLED" == "True" ]]; then
setup_for_asr1k
else
setup_for_csr1kv
fi
Q_L3_ENABLED="True"
net_create_neutron_initial_network
count=$((count+1))
fi
}
# net_configure_neutron()
# Set common config for Cisco router after neutron server and agents.
function net_configure_neutron {
# goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
if is_service_enabled q-ciscorouter; then
_configure_neutron_cisco_router
fi
if is_service_enabled q-agt q-svc; then
_configure_neutron_service
fi
}
function get_or_add_user_role {
# Gets user role id
local user_role_id=$(openstack user role list \
$2 \
--project $3 \
--column "ID" \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user
user_role_id=$(openstack role add \
$1 \
--user $2 \
--project $3 \
| grep " id " | get_field 2)
fi
echo $user_role_id
}
# net_create_neutron_accounts() - Set up common required neutron accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service neutron admin # if enabled
# Migrated from keystone_data.sh
function net_create_neutron_accounts {
local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
local service_role=$(openstack role list | awk "/ service / { print \$2 }")
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
local neutron_user=$(get_or_create_user "neutron" \
"$SERVICE_PASSWORD" $service_tenant)
get_or_add_user_role $service_role $neutron_user $service_tenant
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
local neutron_service=$(get_or_create_service "neutron" \
"network" "Neutron Service")
get_or_create_endpoint $neutron_service \
"$REGION_NAME" \
"$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
"$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
"$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
fi
fi
}
function setup_for_asr1k {
if [[ "$Q_PLUGIN" == "ml2" ]]; then
plugin=ovs
fi
echo "Running ASR1K setup scripts with ${MYSQL_USER} ${MYSQL_PASSWORD}"
if [[ -f $TOP_DIR/localrc ]]; then
localrc_name=$TOP_DIR/localrc
else
localrc_name=$TOP_DIR/local.conf
fi
(cd $Q_CISCO_CSR1KV_SETUP_SCRIPT_DIR; ./csr1kv_install_all.sh neutron $plugin $localrc_name $MYSQL_USER $MYSQL_PASSWORD $Q_CISCO_MGMT_CFG_AGENT_IP True)
}
function setup_for_csr1kv {
if [[ "$Q_PLUGIN" == "csr1kv_openvswitch" ]]; then
plugin=ovs
elif [[ "$Q_PLUGIN" == "cisco" || "${Q_CISCO_PLUGIN_SUBPLUGINS[0]}" == "n1kv" ]]; then
plugin=n1kv
else
die $LINENO "Not a deployment with CSR1kv. Exiting!"
fi
echo "Running CSR1Kv setup with ${MYSQL_USER} ${MYSQL_PASSWORD}"
if [[ -f $TOP_DIR/localrc ]]; then
localrc_name=$TOP_DIR/localrc
else
localrc_name=$TOP_DIR/local.conf
fi
(cd $Q_CISCO_CSR1KV_SETUP_SCRIPT_DIR; ./csr1kv_install_all.sh neutron $plugin $localrc_name $MYSQL_USER $MYSQL_PASSWORD $Q_CISCO_MGMT_CFG_AGENT_IP False)
}
function net_create_neutron_initial_network {
if [ "$Q_L3_ENABLED" == "True" && $NEUTRON_CREATE_INITIAL_NETWORK = "True" ]; then
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
DEMO_TENANT_NAME="demo"
DEMO_TENANT_ID=$(openstack project list | awk "/ $DEMO_TENANT_NAME / { print \$2 }")
ROUTER_ID=$(neutron router-create --tenant-id $DEMO_TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
fi
# Create an external network, and a subnet. Configure the external network as router gw
if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
else
EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
fi
die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
if [[ "$IP_VERSION" =~ 4.* ]]; then
# Configure router for IPv4 public access
_neutron_configure_router_v4
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
# Configure router for IPv6 public access
_neutron_configure_router_v6
fi
fi
}
# Start running processes, including screen
function net_start_neutron_agents {
if is_service_enabled ciscocfgagent; then
start_the_ciscocfgagent
fi
}
# net_stop_neutron() - Stop running processes (non-screen)
function net_stop_neutron {
if is_service_enabled q-ciscorouter; then
neutron_cisco_router_stop
fi
}
function _configure_neutron_cisco_router {
neutron_cisco_router_configure_common
}
# _configure_neutron_service() - Set config files for neutron service
# It is called when q-svc is enabled.
function _configure_neutron_service {
Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
# Update either configuration file with plugin
iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
# Configuration for neutron notifations to nova.
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
iniset $NEUTRON_CONF DEFAULT nova_admin_username nova
iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
# Configure plugin
net_neutron_plugin_configure_service
}
# Restore xtrace
$XTRACE
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End:

View File

@ -1,124 +0,0 @@
# Cisco router service plugin
# ---------------------------
# Save trace setting
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
Q_CISCO_DEFAULT_ROUTER_TYPE=${Q_CISCO_DEFAULT_ROUTER_TYPE:-ASR1k_router}
CISCO_ROUTER_PLUGIN=networking_cisco.plugins.cisco.service_plugins.cisco_router_plugin.CiscoRouterPlugin
CISCO_DEVMGR_PLUGIN=networking_cisco.plugins.cisco.service_plugins.cisco_device_manager_plugin.CiscoDeviceManagerPlugin
CISCO_ROUTER_PLUGIN_CONF_FILE=cisco_router_plugin.ini
CISCO_DEVMGR_PLUGIN_CONF_FILE=cisco_device_manager_plugin.ini
NET_CISCO_SOURCE_CONF_DIR=etc/neutron/plugins/cisco
NET_CISCO_PLUGIN_DIR=/plugins/cisco
PLUGIN_CONFIG_DIR=$NEUTRON_CONF_DIR$NET_CISCO_PLUGIN_DIR
NET_CISCO_CONF_DIR=$DIR_CISCO$PLUGIN_CONFIG_DIR
function neutron_cisco_router_configure_common() {
Q_L3_ENABLED=True
Q_L3_ROUTER_PER_TENANT=True
_clear_L3_plugin
_neutron_service_plugin_class_add $CISCO_DEVMGR_PLUGIN
_neutron_service_plugin_class_add $CISCO_ROUTER_PLUGIN
mkdir -p $PLUGIN_CONFIG_DIR
cp $NET_CISCO_SOURCE_CONF_DIR/$CISCO_ROUTER_PLUGIN_CONF_FILE $PLUGIN_CONFIG_DIR/
cp $NET_CISCO_SOURCE_CONF_DIR/$CISCO_DEVMGR_PLUGIN_CONF_FILE $PLUGIN_CONFIG_DIR/
NEUTRON_VERSION=$(pip show neutron | grep Version| egrep -o '[0-9]+' | head -1)
if [[ NEUTRON_VERSION -ge 10 ]]; then
Q_CISCO_ROUTER_PLUGIN_CONF_FILE=$NET_CISCO_PLUGIN_DIR/$CISCO_ROUTER_PLUGIN_CONF_FILE
Q_CISCO_DEVMGR_PLUGIN_CONF_FILE=$NET_CISCO_PLUGIN_DIR/$CISCO_DEVMGR_PLUGIN_CONF_FILE
else
Q_CISCO_ROUTER_PLUGIN_CONF_FILE=$PLUGIN_CONFIG_DIR/$CISCO_ROUTER_PLUGIN_CONF_FILE
Q_CISCO_DEVMGR_PLUGIN_CONF_FILE=$PLUGIN_CONFIG_DIR/$CISCO_DEVMGR_PLUGIN_CONF_FILE
fi
Q_PLUGIN_EXTRA_CONF_FILES=(${Q_PLUGIN_EXTRA_CONF_FILES[@]} $Q_CISCO_ROUTER_PLUGIN_CONF_FILE)
Q_PLUGIN_EXTRA_CONF_FILES=(${Q_PLUGIN_EXTRA_CONF_FILES[@]} $Q_CISCO_DEVMGR_PLUGIN_CONF_FILE)
# Copy policy.json from networking-cisco/etc to the neutron/etc folder
echo "Copying policy.json from ${DIR_CISCO}/etc/ to ${NEUTRON_CONF_DIR}"
cp $DIR_CISCO/etc/policy.json $Q_POLICY_FILE
_inject_deployer_config
_patch_neutron_legacy
iniset $NEUTRON_CONF_DIR$Q_CISCO_ROUTER_PLUGIN_CONF_FILE routing default_router_type $Q_CISCO_DEFAULT_ROUTER_TYPE
}
function neutron_cisco_router_configure_driver() {
:
}
function neutron_cisco_router_stop() {
:
}
# inject any deployer provided configurations
function _inject_deployer_config() {
ROUTER_INJECT_FILE=$TOP_DIR/cisco_router_plugin.inject
# inject any deployer provided configurations
if [[ -f $ROUTER_INJECT_FILE ]]; then
cat $ROUTER_INJECT_FILE >> $NEUTRON_CONF_DIR$Q_CISCO_ROUTER_PLUGIN_CONF_FILE
fi
# Inject device manager configs
DEVMGR_INJECT_FILE=$TOP_DIR/cisco_device_manager_plugin.inject
if [[ -f $DEVMGR_INJECT_FILE ]]; then
cat $DEVMGR_INJECT_FILE >> $NEUTRON_CONF_DIR$Q_CISCO_DEVMGR_PLUGIN_CONF_FILE
fi
}
function _clear_L3_plugin() {
if [[ ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${ML2_L3_PLUGIN}, ]]; then
echo "Found L3 plugin configured..Proceeding to delete it"
Q_SERVICE_PLUGIN_CLASSES=${Q_SERVICE_PLUGIN_CLASSES##$ML2_L3_PLUGIN}
echo "service plugins="$Q_SERVICE_PLUGIN_CLASSES
fi
}
# lines in neutron-legacy that need to be changed to be compatible with cisco router plugin
TO_REPLACE[0]=$(cat <<'EOF'
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'`;
EOF
)
TO_REPLACE[1]=$(cat <<'EOF'
IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'`;
EOF
)
REPLACE_WITH[0]=$(cat <<'EOF'
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner -c device_id | awk -F '|' -v router_id=$ROUTER_ID -v subnet_id=$PUB_SUBNET_ID '/gateway/ { gsub(" ", "", $4); if ($4 == router_id) { split($2, res, "\\\""); if (res[4] == subnet_id) print res[8]; } }'`;
EOF
)
REPLACE_WITH[1]=$(cat <<'EOF'
IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_id | awk -F '|' -v router_id=$ROUTER_ID -v subnet_id=$ipv6_pub_subnet_id '{ gsub(" ", "", $3); if ($3 == router_id) { split($2, res, "\\\""); if (res[4] == subnet_id) print res[8]; } }'`;
EOF
)
function _patch_neutron_legacy() {
# Redefine functions in neutron-legacy that configure v4 and v6 routers so gateway ip is determined in a way compatible with our plugin
original_definition=$(declare -f _neutron_configure_router_v4)
new_definition=$(echo "$original_definition" | awk -v to_replace="${TO_REPLACE[0]}" -v replace_with="${REPLACE_WITH[0]}" '{ match($0, /^ */); if (substr($0, RLENGTH+1) == to_replace) print substr($0, 1, RLENGTH)replace_with; else print $0; }')
eval "$new_definition"
original_definition=$(declare -f _neutron_configure_router_v6)
new_definition=$(echo "$original_definition" | awk -v to_replace="${TO_REPLACE[1]}" -v replace_with="${REPLACE_WITH[1]}" '{ match($0, /^ */); if (substr($0, RLENGTH+1) == to_replace) print substr($0, 1, RLENGTH)replace_with; else print $0; }')
eval "$new_definition"
}
# Restore xtrace
$MY_XTRACE

View File

@ -1,64 +0,0 @@
# ciscocfgagent
# -------------
# This file implements functions required to configure ciscocfgagent as the third-party
# system used with devstack's Neutron. To include this file, specify the following
# variables in localrc:
#
# * enable_service ciscocfgagent
#
# Save trace setting
CISCO_XTRACE=$(set +o | grep xtrace)
set +o xtrace
function configure_ciscocfgagent {
Q_CISCO_CFGAGENT_ENABLED=True
AGENT_CISCO_CFGAGENT_BINARY=${AGENT_CISCO_CFGAGENT_BINARY:-"$NEUTRON_BIN_DIR/neutron-cisco-cfg-agent"}
CISCO_CFGAGENT_CONFIG_DIR=$NEUTRON_CONF_DIR/plugins/cisco
mkdir -p $CISCO_CFGAGENT_CONFIG_DIR
Q_CISCO_CFGAGENT_CONF_FILE=$CISCO_CFGAGENT_CONFIG_DIR/cisco_cfg_agent.ini
if [[ -f $NEUTRON_DIR/etc/neutron/plugins/cisco/cisco_cfg_agent.ini ]]; then
cp $NEUTRON_DIR/etc/neutron/plugins/cisco/cisco_cfg_agent.ini $Q_CISCO_CFGAGENT_CONF_FILE
else
touch $Q_CISCO_CFGAGENT_CONF_FILE
fi
iniset $Q_CISCO_CFGAGENT_CONF_FILE DEFAULT verbose True
iniset $Q_CISCO_CFGAGENT_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
}
function init_ciscocfgagent {
:
}
function install_ciscocfgagent {
:
}
function start_ciscocfgagent {
# We do not start the cfg agent using the 3rd party system mechanism
# since it starts the 3rd party systems before neutron server.
:
}
function start_the_ciscocfgagent {
# We start the cfg agent when the other agents are started
# Need to enable q-ciscocfgagent due to how is_service_enabled function works
enable_service q-ciscocfgagent
configure_ciscocfgagent
CISCO_CFG_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_CISCO_CFGAGENT_CONF_FILE"
screen_it q-ciscocfgagent "cd $NEUTRON_DIR && python $AGENT_CISCO_CFGAGENT_BINARY $CISCO_CFG_CONF_FILES"
}
function stop_ciscocfgagent {
:
}
function check_ciscocfgagent {
:
}
# Restore xtrace
$CISCO_XTRACE

View File

@ -1,71 +0,0 @@
#!/usr/bin/env bash
# Runs all install and demo scripts in the right order.
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
plugin=${2:-ovs}
localrc=$3
TOP_DIR=$(cd $(dirname $localrc) && pwd)
mysql_user=$4
mysql_password=$5
mgmt_ip=$6
Q_CISCO_ASR1K_ENABLED=${7:-True}
# Adopted from Devstack scripts:
# Normalize config values to True or False
# Accepts as False: 0 no No NO false False FALSE
# Accepts as True: 1 yes Yes YES true True TRUE
# VAR=$(trueorfalse default-value test-value)
function pause(){
read -p "Press [Enter] to continue ......"
}
function trueorfalse {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local default=$1
local testval=$2
[[ -z "$testval" ]] && { echo "$default"; return; }
[[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
[[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
$xtrace
}
if [[ ! -z $localrc && -f $localrc ]]; then
eval $(grep ^Q_CISCO_CREATE_TEST_NETWORKS= $localrc)
fi
CREATE_TEST_NETWORKS=$(trueorfalse "False" $Q_CISCO_CREATE_TEST_NETWORKS)
if [[ "$Q_CISCO_ASR1K_ENABLED" == "True" ]]; then
echo "***************** Setting up Keystone for ASR1k *****************"
#pause
./setup_keystone_for_csr1kv_l3.sh $osn
else
source ${TOP_DIR}/openrc admin demo
echo "***************** Setting up Keystone for CSR1kv *****************"
./setup_keystone_for_csr1kv_l3.sh $osn
# pause
source ${TOP_DIR}/openrc $osn L3AdminTenant
echo "***************** Setting up Nova & Glance for CSR1kv *****************"
./setup_nova_and_glance_for_csr1kv_l3.sh $osn $plugin $localrc $mysql_user $mysql_password
# pause
echo "***************** Setting up Neutron for CSR1kv *****************"
./setup_neutron_for_csr1kv_l3.sh $osn $plugin $localrc
# pause
echo "***************** Setting up CfgAgent connectivity *****************"
./setup_l3cfgagent_networking.sh $osn $plugin $localrc $mgmt_ip
if [[ "$CREATE_TEST_NETWORKS" == "True" ]]; then
source ${TOP_DIR}/openrc admin demo
echo "***************** Setting up test networks *****************"
./setup_test_networks.sh $osn $plugin
./setup_interface_on_extnet1_for_demo.sh $osn $plugin
fi
fi
echo 'Done!...'

View File

@ -1,125 +0,0 @@
# +------------------------------------------------------------------------------------------------+
# | |
# | PLEASE NOTE: You MUST set those variables below that are marked with <SET THIS VARIABLE!!!>. |
# | |
# +------------------------------------------------------------------------------------------------+
[[local|localrc]]
OFFLINE=False
RECLONE=yes
DEBUG=True
VERBOSE=True
IP_VERSION=4
# ``HOST_IP`` should be set manually for best results if the NIC configuration
# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the
# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate
# on later runs due to the IP moving from an Ethernet interface to a bridge on
# the host. Setting it here also makes it available for ``openrc`` to include
# when setting ``OS_AUTH_URL``.
# ``HOST_IP`` is not set by default.
#HOST_IP=10.1.59.194
HOST_IP=<SET THIS VARIABLE!!!>
#FLOATING_RANGE=10.1.59.224/27
FLOATING_RANGE=<SET THIS VARIABLE!!!>
FIXED_RANGE=10.11.12.0/24
FIXED_NETWORK_SIZE=256
FLAT_INTERFACE=eth0
NETWORK_GATEWAY=10.11.12.1
#FLOATING_RANGE=172.16.6.32/27
#PUBLIC_NETWORK_GATEWAY=172.16.6.33
PUBLIC_NETWORK_GATEWAY=<SET THIS VARIABLE!!!>
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
NOVA_USE_QUANTUM_API=v2
# Use br-int as bridge to reach external networks
PUBLIC_BRIDGE=br-int
our_pw=<SET THIS VARIABLE!!!>
# Must use hard coded value, as scripts grep for the following variables.
MYSQL_PASSWORD=<SET THIS VARIABLE!!!>
RABBIT_PASSWORD=$our_pw
SERVICE_TOKEN=$our_pw
SERVICE_PASSWORD=$our_pw
ADMIN_PASSWORD=$our_pw
disable_service n-net
enable_service neutron
enable_service q-svc
disable_service q-agt
disable_service q-l3
enable_service q-dhcp
enable_service ciscocfgagent
enable_service q-ciscorouter
# Add networking-cisco Repo
enable_plugin networking-cisco https://github.com/openstack/networking-cisco.git master
enable_service net-cisco
# Enable CSR
enable_service cisco-csr
# Default routertype for Neutron routers
Q_CISCO_DEFAULT_ROUTER_TYPE=CSR1kv_router
# Destination path for installation of the OpenStack components.
# There is no need to specify it unless you want the code in
# some particular location (like in a directory shared by all VMs).
DEST=/opt/stack
SCREEN_LOGDIR=$DEST/logs
LOGFILE=~/devstack/stack.sh.log
# Settings to get NoVNC to work.
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
# Type of virtualization to use. Options: kvm, lxc, qemu
LIBVIRT_TYPE=kvm
# Uncomment this to use LXC virtualization.
#LIBVIRT_TYPE=lxc
# List of images to use.
# ----------------------
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
IMAGE_URLS="http://cloud-images.ubuntu.com/releases/14.04.1/release/ubuntu-14.04-server-cloudimg-amd64.tar.gz,http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-rootfs.img.gz";;
*) # otherwise, use the uec style image (with kernel, ramdisk, disk)
IMAGE_URLS="http://cloud-images.ubuntu.com/releases/14.04.1/release/ubuntu-14.04-server-cloudimg-amd64.tar.gz,http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-uec.tar.gz";;
esac
# Sets the maximum number of workers for most services. Must be 0 for now
API_WORKERS=0
#Q_PLUGIN=openvswitch
Q_PLUGIN=cisco
declare -a Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv)
Q_CISCO_PLUGIN_RESTART_VSM=yes
Q_CISCO_PLUGIN_VSM_IP=192.168.168.2
#Q_CISCO_PLUGIN_VSM_USERNAME=admin
Q_CISCO_PLUGIN_VSM_USERNAME=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_VSM_PASSWORD=Sfish123
Q_CISCO_PLUGIN_VSM_PASSWORD=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_VSM_ISO_IMAGE=$HOME/img/n1kv/n1000v-dk9.5.2.1.SK1.3.0.135.iso
Q_CISCO_PLUGIN_VSM_ISO_IMAGE=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_UVEM_DEB_IMAGE=$HOME/img/n1kv/nexus_1000v_vem-12.04-5.2.1.SK1.3.0.135.S0-0gdb.deb
Q_CISCO_PLUGIN_UVEM_DEB_IMAGE=<SET THIS VARIABLE!!!>
Q_CISCO_PLUGIN_HOST_MGMT_INTF=eth0
N1KV_VLAN_NET_PROFILE_NAME=default_network_profile
N1KV_VLAN_NET_SEGMENT_RANGE=101-499
Q_CISCO_ROUTER_PLUGIN=yes
#Q_CISCO_CSR1KV_QCOW2_IMAGE=$HOME/img/csr1kv/3.13/csr1000v-universalk9.BLD_MCP_DEV_LATEST_20140531_013025.qcow2
Q_CISCO_CSR1KV_QCOW2_IMAGE=<SET THIS VARIABLE!!!>
GIT_BASE=https://github.com
# Set mem_ballon stats
[[post-config|$NOVA_CONF]]
[libvirt]
mem_stats_period_seconds = 0

View File

@ -1,118 +0,0 @@
# +------------------------------------------------------------------------------------------------+
# | |
# | PLEASE NOTE: You MUST set those variables below that are marked with <SET THIS VARIABLE!!!>. |
# | |
# +------------------------------------------------------------------------------------------------+
OFFLINE=False
RECLONE=yes
DEBUG=True
VERBOSE=True
IP_VERSION=4
# ``HOST_IP`` should be set manually for best results if the NIC configuration
# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the
# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate
# on later runs due to the IP moving from an Ethernet interface to a bridge on
# the host. Setting it here also makes it available for ``openrc`` to include
# when setting ``OS_AUTH_URL``.
# ``HOST_IP`` is not set by default.
#HOST_IP=10.1.59.194
HOST_IP=<SET THIS VARIABLE!!!>
#FLOATING_RANGE=10.1.59.224/27
FLOATING_RANGE=<SET THIS VARIABLE!!!>
FIXED_RANGE=10.11.12.0/24
FIXED_NETWORK_SIZE=256
FLAT_INTERFACE=eth0
NETWORK_GATEWAY=10.11.12.1
#FLOATING_RANGE=172.16.6.32/27
#PUBLIC_NETWORK_GATEWAY=172.16.6.33
PUBLIC_NETWORK_GATEWAY=<SET THIS VARIABLE!!!>
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
NOVA_USE_QUANTUM_API=v2
# Use br-int as bridge to reach external networks
PUBLIC_BRIDGE=br-int
our_pw=<SET THIS VARIABLE!!!>
# Must use hard coded value, as scripts grep for the following variables.
MYSQL_PASSWORD=<SET THIS VARIABLE!!!>
RABBIT_PASSWORD=$our_pw
SERVICE_TOKEN=$our_pw
SERVICE_PASSWORD=$our_pw
ADMIN_PASSWORD=$our_pw
disable_service n-net
enable_service neutron
enable_service q-svc
disable_service q-agt
disable_service q-l3
enable_service q-dhcp
enable_service ciscocfgagent
enable_service q-ciscorouter
# Add networking-cisco Repo
enable_plugin networking-cisco https://github.com/openstack/networking-cisco.git master
enable_service net-cisco
# Enable CSR
enable_service cisco-csr
# Default routertype for Neutron routers
Q_CISCO_DEFAULT_ROUTER_TYPE=CSR1kv_router
# Destination path for installation of the OpenStack components.
# There is no need to specify it unless you want the code in
# some particular location (like in a directory shared by all VMs).
DEST=/opt/stack
SCREEN_LOGDIR=$DEST/logs
LOGFILE=~/devstack/stack.sh.log
# Settings to get NoVNC to work.
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
# Type of virtualization to use. Options: kvm, lxc, qemu
LIBVIRT_TYPE=kvm
# Uncomment this to use LXC virtualization.
#LIBVIRT_TYPE=lxc
# List of images to use.
# ----------------------
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
IMAGE_URLS="http://cloud-images.ubuntu.com/releases/14.04.1/release/ubuntu-14.04-server-cloudimg-amd64.tar.gz,http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-rootfs.img.gz";;
*) # otherwise, use the uec style image (with kernel, ramdisk, disk)
IMAGE_URLS="http://cloud-images.ubuntu.com/releases/14.04.1/release/ubuntu-14.04-server-cloudimg-amd64.tar.gz,http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-uec.tar.gz";;
esac
# Sets the maximum number of workers for most services. Must be 0 for now
API_WORKERS=0
#Q_PLUGIN=openvswitch
Q_PLUGIN=cisco
declare -a Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv)
Q_CISCO_PLUGIN_RESTART_VSM=yes
Q_CISCO_PLUGIN_VSM_IP=192.168.168.2
#Q_CISCO_PLUGIN_VSM_USERNAME=admin
Q_CISCO_PLUGIN_VSM_USERNAME=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_VSM_PASSWORD=Sfish123
Q_CISCO_PLUGIN_VSM_PASSWORD=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_VSM_ISO_IMAGE=$HOME/img/n1kv/n1000v-dk9.5.2.1.SK1.3.0.135.iso
Q_CISCO_PLUGIN_VSM_ISO_IMAGE=<SET THIS VARIABLE!!!>
#Q_CISCO_PLUGIN_UVEM_DEB_IMAGE=$HOME/img/n1kv/nexus_1000v_vem-12.04-5.2.1.SK1.3.0.135.S0-0gdb.deb
Q_CISCO_PLUGIN_UVEM_DEB_IMAGE=<SET THIS VARIABLE!!!>
Q_CISCO_PLUGIN_HOST_MGMT_INTF=eth0
N1KV_VLAN_NET_PROFILE_NAME=default_network_profile
N1KV_VLAN_NET_SEGMENT_RANGE=101-499
Q_CISCO_ROUTER_PLUGIN=yes
#Q_CISCO_CSR1KV_QCOW2_IMAGE=$HOME/img/csr1kv/3.13/csr1000v-universalk9.BLD_MCP_DEV_LATEST_20140531_013025.qcow2
Q_CISCO_CSR1KV_QCOW2_IMAGE=<SET THIS VARIABLE!!!>
GIT_BASE=https://github.com

View File

@ -1,62 +0,0 @@
#!/usr/bin/env bash
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
function delete_service_resources_by_name() {
service=$1
resource=$2
name=$3
local list_command="list --field=id --field=name"
local delete_command="delete"
if [[ "$service" == "$osn" ]]; then
list_command=$resource"-list --field=id --field=name"
delete_command=$resource"-"$delete_command
fi
ids=($($service $list_command | awk -v n=$name '$4 ~ n { print $2; }'))
if [[ ${#ids[@]} > 0 ]]; then
echo "Deleting ${#ids[@]} $resource resources named $name"
for id in "${ids[@]}"; do
echo " $service $delete_command $id"
$service $delete_command $id
done
if [[ "$service" == "nova" ]]; then
wait_time=7
echo "Waiting $wait_time seconds to let Nova clean up"
sleep $wait_time
fi
else
echo "No $resource resources named $name to delete"
fi
}
devstack_dir=$(find -L $HOME -name devstack -type d)
source $devstack_dir/openrc $osn L3AdminTenant
delete_service_resources_by_name nova server CSR1kv_nrouter
delete_service_resources_by_name $osn port mgmt
delete_service_resources_by_name $osn port t1_p:
delete_service_resources_by_name $osn port t2_p:
delete_service_resources_by_name $osn subnet t1_sn:
delete_service_resources_by_name $osn subnet t2_sn:
delete_service_resources_by_name $osn net t1_n:
delete_service_resources_by_name $osn net t2_n:
eval $(grep ^MYSQL_USER= $devstack_dir/lib/database)
eval $(grep ^MYSQL_USER= $devstack_dir/localrc)
eval $(grep ^MYSQL_PASSWORD= $devstack_dir/localrc)
eval $(grep ^Q_PLUGIN= $devstack_dir/localrc)
table="$Q_PLUGIN_$osn"
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "use $table; delete from hostingdevices;"
echo
echo "Now please RESTART $osn SERVER and CISCO CFG AGENT!"

View File

@ -1,79 +0,0 @@
#!/usr/bin/env bash
# Default values
# --------------
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
plugin=${2:-n1kv}
osnExtNwName=test_extnet1
osnExtNwLen=24
hostportIP=10.0.21.3
portName=hostOnExtNw
n1kvPortPolicyProfileNames=(test-profile osn_t1_pp osn_t2_pp)
vethHostSideName=hostOnExtNw_hs
vethBridgeSideName=hostOnExtNw_bs
function get_port_profile_id() {
name=$1
local c=0
pProfileId=None
while [ $c -le 15 ] && [ "$pProfileId" == "None" ]; do
pProfileId=`$osn cisco-policy-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
if [[ "$pProfileId" == "None" ]]; then
let c+=1
sleep 5
fi
done
}
if [ "$plugin" == "n1kv" ]; then
get_port_profile_id ${n1kvPortPolicyProfileNames[0]}
extra_port_params="--n1kv:profile_id $pProfileId"
elif [ "$plugin" == "ovs" ]; then
nw=`$osn net-show $osnExtNwName`
extNwVLAN=`echo "$nw" | awk '/provider:segmentation_id/ { print $4; }'`
if [ -z ${extNwVLAN+x} ] || [ "$extNwVLAN" == "" ]; then
echo "Failed to lookup VLAN of $osnExtNwName network, please check health of plugin and VSM then re-run this script."
echo "Aborting!"
exit 1
fi
fi
echo -n "Checking if $portName port exists ..."
port=`$osn port-show $portName 2>&1`
hasPort=`echo $port | awk '/Unable to find|Value/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasPort" == "No" ]; then
echo " No, it does not. Creating it."
port=`$osn port-create --name $portName --fixed-ip ip_address=$hostportIP $osnExtNwName $extra_port_params`
else
echo " Yes, it does."
fi
macAddr=`echo "$port" | awk '/mac_address/ { print $4; }'`
if [ -z ${macAddr+x} ] || [ "$macAddr" == "" ]; then
echo "Failed to create $portName port, please check health of plugin and VSM then re-run this script."
echo "Aborting!"
exit 1
fi
portId=`echo "$port" | awk '/ id/ { print $4; }'`
hasVeth=`ip link show | awk '/'"$vethHostSideName"'/ { print $2; }'`
if [ "$hasVeth" != "" ]; then
echo "Deleting existing $vethHostSideName device"
sudo ip link del $vethHostSideName
sudo ovs-vsctl -- --if-exists del-port $vethBridgeSideName
fi
echo "Creating and plugging $vethHostSideName device into $osnExtNwName network"
sudo ip link add $vethHostSideName address $macAddr type veth peer name $vethBridgeSideName
sudo ip link set $vethHostSideName up
sudo ip link set $vethBridgeSideName up
sudo ip -4 addr add $hostportIP/$osnExtNwLen dev $vethHostSideName
if [ "$plugin" == "ovs" ]; then
extra_ovs_params="tag=$extNwVLAN"
fi
sudo ovs-vsctl -- --may-exist add-port br-int $vethBridgeSideName $extra_ovs_params -- set interface $vethBridgeSideName external-ids:iface-id=$portId -- set interface $vethBridgeSideName external-ids:attached-mac=$macAddr -- set interface $vethBridgeSideName external-ids:iface-status=active

View File

@ -1,70 +0,0 @@
#!/usr/bin/env bash
## Users, roles, tenants ##
adminUser=${1:-neutron}
adminRole=admin
l3AdminTenant=L3AdminTenant
serviceTenant=service
# Below user is just for demos so that we don't see all logical instances.
regularUser=viewer
password=viewer
echo -n "Checking if $l3AdminTenant tenant exists ..."
tenantId=`openstack project show $l3AdminTenant 2>&1 | awk '/No|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo " No, it does not. Creating it."
tenantId=$(openstack project create $l3AdminTenant --domain="default" --or-show -f value -c id)
echo $tenantId
else
echo " Yes, it does."
fi
echo -n "Checking if $regularUser user exists ..."
userId=`openstack user show $regularUser 2>&1 | awk '/No user|id/ { if ($1 == "No") print "No"; else print $4; }'`
if [ "$userId" == "No" ]; then
echo " No, it does not. Creating it."
userId=$(openstack user create $regularUser --password $password --domain="default" --or-show -f value -c id)
echo $userId
else
echo " Yes, it does."
fi
echo -n "Checking if $adminUser user has admin privileges in $l3AdminTenant tenant ..."
isAdmin=`openstack --os-username $adminUser --os-project-name $l3AdminTenant user role list 2>&1 | awk 'BEGIN { res="No" } { if ($4 == "admin") res="Yes"; } END { print res; }'`
if [ "$isAdmin" == "No" ]; then
echo " No, it does not. Giving it admin rights."
admUserId=`openstack user show $adminUser | awk '{ if ($2 == "id") print $4 }'`
admRoleId=`openstack role show $adminRole | awk '{ if ($2 == "id") print $4 }'`
openstack role add $admRoleId --user $admUserId --project $tenantId
else
echo " Yes, it has."
fi
# What follows can be removed once L3AdminTenant is used to lookup UUID of L3AdminTenant
echo -n "Determining UUID of $serviceTenant tenant ..."
tenantId=`openstack project show $serviceTenant 2>&1 | awk '/No tenant|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo "Error: $serviceTenant tenant does not seem to exist. Aborting!"
exit 1
else
echo " Done."
fi
echo -n "Checking if $adminUser user has admin privileges in $serviceTenant tenant ..."
isAdmin=`openstack --os-username $adminUser --os-project-name $serviceTenant user role list 2>&1 | awk 'BEGIN { res="No" } { if ($4 == "admin") res="Yes"; } END { print res; }'`
if [ "$isAdmin" == "No" ]; then
echo " No, it does not. Giving it admin rights."
admUserId=`openstack user show $adminUser | awk '{ if ($2 == "id") print $4 }'`
admRoleId=`openstack role show $adminRole | awk '{ if ($2 == "id") print $4 }'`
openstack role add $admRoleId --user $admUserId --project $tenantId
else
echo " Yes, it has."
fi

View File

@ -1,114 +0,0 @@
#!/usr/bin/env bash
# Default values
# --------------
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
plugin=${2:-n1kv}
localrc=$3
if [[ ! -z $localrc && -f $localrc ]]; then
eval $(grep ^OVS_PHYSICAL_BRIDGE= $localrc)
fi
if ! `sudo ovs-vsctl br-exists $OVS_PHYSICAL_BRIDGE`; then
echo "EEROR! Cannot find bridge $OVS_PHYSICAL_BRIDGE. Please create it and then rerun this script"
exit 1
fi
adminUser=$osn
l3AdminTenant=L3AdminTenant
osnMgmtNwName=osn_mgmt_nw
osnMgmtNwLen=24
l3CfgAgentMgmtIP=${4:-10.0.100.2}
portName=l3CfgAgent1
n1kvPortPolicyProfileNames=(osn_mgmt_pp osn_t1_pp osn_t2_pp)
vethHostSideName=l3cfgagent_hs
vethBridgeSideName=l3cfgagent_bs
echo -n "Checking if $l3AdminTenant exists ..."
tenantId=`keystone tenant-get $l3AdminTenant 2>&1 | awk '/No tenant|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo " No it does not, please create one using the setup_keystone... script then re-run this script."
echo "Aborting!"
exit 1
else
echo " Yes, it does."
fi
function get_port_profile_id() {
local name=$1
local c=0
pProfileId=None
while [ $c -le 15 ] && [ "$pProfileId" == "None" ]; do
pProfileId=`$osn cisco-policy-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
if [[ "$pProfileId" == "None" ]]; then
let c+=1
sleep 5
fi
done
}
if [ "$plugin" == "n1kv" ]; then
get_port_profile_id ${n1kvPortPolicyProfileNames[0]}
extra_port_params="--n1kv:profile_id $pProfileId"
elif [ "$plugin" == "ovs" ]; then
nw=`$osn net-show $osnMgmtNwName`
mgmtVLAN=`echo "$nw" | awk '/provider:segmentation_id/ { print $4; }'`
if [ -z ${mgmtVLAN+x} ] || [ "$mgmtVLAN" == "" ]; then
echo "Failed to lookup VLAN of $osnMgmtNwName network, please check health of ML2 plugin."
echo "Aborting!"
exit 1
else
echo "MgmtVAN is $mgmtVLAN"
fi
fi
echo -n "Checking if $portName port exists ..."
port=`$osn port-show $portName 2>&1`
hasPort=`echo $port | awk '/Unable to find|Value/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasPort" == "No" ]; then
echo " No, it does not. Creating it."
port=`$osn port-create --name $portName --tenant-id $tenantId --fixed-ip ip_address=$l3CfgAgentMgmtIP $osnMgmtNwName $extra_port_params`
else
echo " Yes, it does."
fi
macAddr=`echo "$port" | awk '/mac_address/ { print $4; }'`
if [ -z ${macAddr+x} ] || [ "$macAddr" == "" ]; then
echo "Failed to create $portName port, please check health of ML2 plugin."
echo "Aborting!"
exit 1
fi
echo "Mac address is $macAddr"
portId=`echo "$port" | awk '/ id/ { print $4; }'`
echo "Portid is $portId"
hasVeth=`ip link show | awk '/'"$vethHostSideName"'/ { print $2; }'`
if [ "$hasVeth" != "" ]; then
echo "Deleting existing $vethHostSideName device"
sudo ip link del $vethHostSideName
sudo ovs-vsctl -- --if-exists del-port $vethBridgeSideName
fi
echo "Creating and plugging $vethHostSideName device into $osnMgmtNwName network"
sudo ip link add $vethHostSideName address $macAddr type veth peer name $vethBridgeSideName
sudo ip link set $vethHostSideName up
sudo ip link set $vethBridgeSideName up
sudo ip -4 addr add $l3CfgAgentMgmtIP/$osnMgmtNwLen dev $vethHostSideName
if [ "$plugin" == "n1kv" ]; then
plugging_bridge=$OVS_BRIDGE
else # We are in ovs (with ml2)
plugging_bridge=$OVS_PHYSICAL_BRIDGE
echo "Plugging bridge: $plugging_bridge"
extra_ovs_params="tag=$mgmtVLAN"
echo "extra ovs params : $extra_ovs_params"
fi
sudo ovs-vsctl -- --may-exist add-port $plugging_bridge $vethBridgeSideName $extra_ovs_params -- set interface $vethBridgeSideName external-ids:iface-id=$portId -- set interface $vethBridgeSideName external-ids:attached-mac=$macAddr -- set interface $vethBridgeSideName external-ids:iface-status=active

View File

@ -1,280 +0,0 @@
#!/usr/bin/env bash
# Default values
# --------------
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
plugin=${2:-n1kv}
localrc=$3
TOP_DIR=$(cd $(dirname $localrc) && pwd)
net_cisco=${4:-networking-cisco}
if [[ ! -z $localrc && -f $localrc ]]; then
eval $(grep ^Q_CISCO_PLUGIN_VSM_IP= $localrc)
eval $(grep Q_CISCO_PLUGIN_VSM_USERNAME= $localrc)
eval $(grep ^Q_CISCO_PLUGIN_VSM_PASSWORD= $localrc)
eval $(grep ^Q_CISCO_MGMT_SUBNET= $localrc)
eval $(grep ^Q_CISCO_MGMT_SUBNET_LENGTH= $localrc)
eval $(grep ^Q_CISCO_MGMT_SUBNET_USAGE_RANGE_START= $localrc)
eval $(grep ^Q_CISCO_MGMT_SUBNET_USAGE_RANGE_END= $localrc)
fi
adminUser=$osn
l3AdminTenant=L3AdminTenant
vsmIP=${Q_CISCO_PLUGIN_VSM_IP:-192.168.168.2}
vsmUsername=${Q_CISCO_PLUGIN_VSM_USERNAME:-admin}
vsmPassword=${Q_CISCO_PLUGIN_VSM_PASSWORD:-Sfish123}
base_dir=/opt/stack/data/$net_cisco/cisco
DIR_CISCO=/opt/stack/networking-cisco
templates_dir=$base_dir/templates
template_name=csr1kv_cfg_template
template_file=$templates_dir/$template_name
template_file_src=$DIR_CISCO/networking_cisco/plugins/cisco/device_manager/configdrive_templates/$template_name
osnMgmtNwName=osn_mgmt_nw
mgmtSecGrp=mgmt_sec_grp
mgmtProviderNwName=mgmt_net
mgmtProviderVlanId=100
osnMgmtSubnetName=osn_mgmt_subnet
# note that the size of this network sets the limit on number of CSR instances
osnMgmtNw=${Q_CISCO_MGMT_SUBNET:-10.0.100.0}
osnMgmtNwLen=${Q_CISCO_MGMT_SUBNET_LENGTH:-24}
osnMgmtSubnet=$osnMgmtNw/$osnMgmtNwLen
# the first 9 addresses are set aside for L3CfgAgents and similar
osnMgmtRangeStart=${Q_CISCO_MGMT_SUBNET_USAGE_RANGE_START:-10.0.100.10}
osnMgmtRangeEnd=${Q_CISCO_MGMT_SUBNET_USAGE_RANGE_END:-10.0.100.254}
# Items in the arrays below correspond to settings for
# the Mgmt, T1 (i.e., VLAN) and T2 (i.e., VXLAN) networks/ports.
# the N1kv only supports one physical network so far
n1kvPhyNwNames=(osn_phy_network osn_phy_network osn_phy_network)
n1kvNwProfileNames=(osn_mgmt_np osn_t1_np osn_t2_np)
n1kvNwProfileTypes=(vlan trunk trunk)
n1kvNwSubprofileTypes=(None vlan vlan)
n1kvNwProfileSegRange=($mgmtProviderVlanId-$mgmtProviderVlanId None None)
n1kvPortPolicyProfileNames=(osn_mgmt_pp osn_t1_pp osn_t2_pp sys-uplink)
n1kvPortPolicyProfileTypes=(vethernet vethernet vethernet ethernet)
function _configure_vsm_port_profiles() {
# Package 'expect' must be installed for this function to work
vsm_ip_addr=$1 user=$2 passwd=$3 profile_name=$4 ptype=$5 expect -c '
spawn /usr/bin/telnet $env(vsm_ip_addr)
expect {
-re "Trying.*Connected.*Escape.*Nexus .*login: " {
send "$env(user)\n"
exp_continue
#look for the password prompt
}
"*?assword:*" {
send "$env(passwd)\n"
}
}
expect -re ".*# "
send "config te\n"
expect -re ".*# "
send "feature network-segmentation-manager\n"
expect -re ".*# "
send "port-profile type $env(ptype) $env(profile_name)\n"
expect -re ".*# "
if {$env(ptype) == "ethernet"} {
send "switchport mode trunk\n"
expect -re ".*# "
}
send "no shut\n"
expect -re ".*# "
send "state enabled\n"
expect -re ".*# "
send "publish port-profile\n"
expect -re ".*# "
send "end\n"
expect -re ".*# "
send "exit\n"
'
}
function get_network_profile_id() {
local name=$1
local phyNet=$2
local type=$3
local subType=$4
local segRange=$5
local c=0
local opt_param=
nProfileId=`$osn cisco-network-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
if [ "$nProfileId" == "None" ]; then
echo " Network profile $name does not exist. Creating it."
if [ "$subType" != "None" ]; then
opt_param="--sub_type $subType"
fi
if [ "$segRange" != "None" ]; then
opt_param=$opt_param" --segment_range $segRange"
fi
echo $tenantId
echo $phyNet
echo $opt_param
echo $name
echo $type
$osn cisco-network-profile-create --tenant-id $tenantId --physical_network $phyNet $opt_param $name $type
fi
while [ $c -le 15 ] && [ "$nProfileId" == "None" ]; do
nProfileId=`$osn cisco-network-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
let c+=1
sleep 5
done
}
function get_port_profile_id() {
local name=$1
local porttype=$2
local c=0
pProfileId=`$osn cisco-policy-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
if [ "$pProfileId" == "None" ]; then
echo " Port policy profile $name does not exist. Creating it."
_configure_vsm_port_profiles $vsmIP $vsmUsername $vsmPassword $name $porttype
fi
if [ "${n1kvPortPolicyProfileNames[$i]}" == "sys-uplink" ]; then
# The n1kv plugin does not list the above policies so we cannot verify them
return
fi
while [ $c -le 15 ] && [ "$pProfileId" == "None" ]; do
pProfileId=`$osn cisco-policy-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
let c+=1
sleep 5
done
}
tenantId=`keystone tenant-get $l3AdminTenant 2>&1 | awk '/No tenant|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo "No $l3AdminTenant exists, please create one using the setup_keystone... script then re-run this script."
echo "Aborting!"
exit 1
fi
source $TOP_DIR/openrc $adminUser $L3adminTenant
echo -n "Checking if $templates_dir exists..."
if [ -d $templates_dir ]; then
echo "Yes, it does."
else
echo "No, it does not. Creating it."
mkdir -p $templates_dir
fi
#Hareesh - Copying of template file everytime to cater for template file changes
echo "Copying base template in $template_file_src to $template_file ..."
cp $template_file_src $template_file
if [ "$plugin" == "n1kv" ]; then
echo "Verifying that required N1kv network profiles exist:"
for (( i=0; i<${#n1kvNwProfileNames[@]}; i++ )); do
echo " Checking ${n1kvNwProfileNames[$i]} ..."
get_network_profile_id ${n1kvNwProfileNames[$i]} ${n1kvPhyNwNames[$i]} ${n1kvNwProfileTypes[$i]} ${n1kvNwSubprofileTypes[$i]} ${n1kvNwProfileSegRange[$i]}
if [ $nProfileId == "None" ]; then
echo " Failed to verify network profile ${n1kvNwProfileNames[$i]}, please check health of the N1kv plugin and the VSM."
echo " Aborting!"
exit 1
else
echo " Done"
fi
done
echo "Verifying that required N1kv port policy profiles exist:"
for (( i=0; i<${#n1kvPortPolicyProfileNames[@]}; i++ )); do
echo " Checking ${n1kvPortPolicyProfileNames[$i]} ..."
get_port_profile_id ${n1kvPortPolicyProfileNames[$i]} ${n1kvPortPolicyProfileTypes[$i]}
if [ $pProfileId == "None" ] && [ "${n1kvPortPolicyProfileNames[$i]}" != "sys-uplink" ]; then
echo " Failed to verify port profile ${n1kvPortPolicyProfileNames[$i]}, please check health of the VSM then re-run this script."
echo " Aborting!"
exit 1
else
echo " Done"
fi
done
fi
echo -n ""
echo -n "Checking if $osnMgmtNwName network exists ..."
hasMgmtNetwork=`$osn net-show $osnMgmtNwName 2>&1 | awk '/Unable to find|enabled/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasMgmtNetwork" == "No" ]; then
echo " No, it does not. Creating it."
if [ "$plugin" == "n1kv" ]; then
get_network_profile_id ${n1kvNwProfileNames[0]} ${n1kvPhyNwNames[0]} ${n1kvNwProfileTypes[0]} ${n1kvNwSubprofileTypes[0]} ${n1kvNwProfileSegRange[0]}
$osn net-create --tenant-id $tenantId $osnMgmtNwName --n1kv:profile_id $nProfileId
else
$osn net-create --tenant-id $tenantId $osnMgmtNwName --provider:network_type vlan --provider:physical_network pvnet1 --provider:segmentation_id $mgmtProviderVlanId
fi
else
echo " Yes, it does."
fi
echo -n "Checking if $osnMgmtSubnetName subnet exists ..."
hasMgmtSubnet=`$osn subnet-show $osnMgmtSubnetName 2>&1 | awk '/Unable to find|Value/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasMgmtSubnet" == "No" ]; then
echo " No, it does not. Creating it."
# Disabling DHCP on mgmt subnet due to Nova bug #1220856 (https://bugs.launchpad.net/nova/+bug/1220856)
$osn subnet-create --name $osnMgmtSubnetName --tenant-id $tenantId --allocation-pool start=$osnMgmtRangeStart,end=$osnMgmtRangeEnd $osnMgmtNwName $osnMgmtSubnet --disable-dhcp
else
echo " Yes, it does."
fi
if [ "$plugin" == "n1kv" ]; then
# security groups are not implemented by N1kv plugin so we stop here
exit 0
fi
echo -n "Checking if $mgmtSecGrp security group exists ..."
hasMgmtSecGrp=`$osn security-group-show $mgmtSecGrp 2>&1 | awk '/Unable to find|Value/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasMgmtSecGrp" == "No" ]; then
echo " No, it does not. Creating it."
$osn security-group-create --description "For CSR1kv management network" --tenant-id $tenantId $mgmtSecGrp
else
echo " Yes, it does."
fi
proto="icmp"
echo -n "Checking if $mgmtSecGrp security group has $proto rule ..."
def=`$osn security-group-rule-list | awk -v grp=$mgmtSecGrp -v p=$proto '/'"$proto"'|protocol/ { if ($4 == grp && $8 == p && $10 == "0.0.0.0/0") n++; } END { if (n > 0) print "Yes"; else print "No"; }'`
if [ "$def" == "No" ]; then
echo " No, it does not. Creating it."
$osn security-group-rule-create --tenant-id $tenantId --protocol icmp --remote-ip-prefix 0.0.0.0/0 $mgmtSecGrp
else
echo " Yes, it does."
fi
proto="tcp"
echo -n "Checking if $mgmtSecGrp security group has $proto rule ..."
def=`$osn security-group-rule-list | awk -v grp=$mgmtSecGrp -v p=$proto '/'"$proto"'|protocol/ { if ($4 == grp && $8 == p && $10 == "0.0.0.0/0") n++; } END { if (n > 0) print "Yes"; else print "No"; }'`
if [ "$def" == "No" ]; then
echo " No, it does not. Creating it."
$osn security-group-rule-create --tenant-id $tenantId --protocol tcp --port-range-min 22 --port-range-max 22 --remote-ip-prefix 0.0.0.0/0 $mgmtSecGrp
else
echo " Yes, it does."
fi

View File

@ -1,165 +0,0 @@
#!/usr/bin/env bash
# Default values
# --------------
# adminUser is same as name of OpenStack network service,
# it should be 'neutron'.
adminUser=${1:-neutron}
osn=$adminUser
plugin=${2:-n1kv}
localrc=$3
TOP_DIR=$(cd $(dirname $localrc) && pwd)
mysql_user=$4
mysql_password=$5
if [[ -n $mysql_user && -n $mysql_password ]]; then
mysql_auth="-u $mysql_user -p$mysql_password"
fi
if [[ ! -z $localrc && -f $localrc ]]; then
eval $(grep ^Q_CISCO_CSR1KV_QCOW2_IMAGE= $localrc)
fi
l3AdminTenant="L3AdminTenant"
csr1kvFlavorName="csr1kv_router"
csr1kvFlavorId=621
networkHostsAggregateName="compute_network_hosts"
aggregateMetadataKey="aggregate_instance_extra_specs:network_host"
aggregateMetadataValue="True"
aggregateMetadata="$aggregateMetadataKey=$aggregateMetadataValue"
max_attempts=200
computeNetworkNodes=( $(hostname) )
csr1kvImageSrc=$Q_CISCO_CSR1KV_QCOW2_IMAGE
csr1kvImageName="csr1kv_openstack_img"
csr1kvDiskFormat="qcow2"
csr1kvContainerFormat="bare"
#csr1kvGlanceExtraParams="--property hw_vif_model=e1000 --property hw_disk_bus=ide --property hw_cdrom_bus=ide"
# We need to add hosts to aggregates in a separate process
# that can wait for nova compute to start as this script
# may run before nova compute has started
function add_host_to_aggregate {
local host_array_name=$1[@]
local hosts=("${!host_array_name}")
local attempt=1
echo "Configuring compute nodes to act as network hosts ..."
while [ ${#hosts[@]} -gt 0 -a $attempt -le $max_attempts ]; do
for host in "${hosts[@]}"; do
host_exists=`nova host-describe $host 2>&1 | awk 'BEGIN { res = "Yes" } /ERROR/ { if ($1 == "ERROR") res = "No"; } END { print res; } '`
if [ "$host_exists" == "Yes" ]; then
host_added=`nova aggregate-details $aggregateId 2>&1 | awk -v host=$host 'BEGIN { res = "No" } { if (index($8, host) > 0) res = "Yes"; } END { print res }'`
if [ "$host_added" == "No" ]; then
echo " Adding host '$host' to '$networkHostsAggregateName' aggregate"
nova aggregate-add-host $aggregateId $host > /dev/null 2>&1
fi
else
remaining[${#remaining[@]}]=$host
fi
done
hosts=(${remaining[@]})
remaining=( )
attempt=$(($attempt+1))
sleep 1
done
exit 0
}
tenantId=`keystone tenant-get $l3AdminTenant 2>&1 | awk '/No tenant|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo "No $l3AdminTenant exists, please create one using the setup_keystone... script then re-run this script."
echo "Aborting!"
exit 1
fi
source $TOP_DIR/openrc $adminUser $L3AdminTenant
echo -n "Checking if flavor '$csr1kvFlavorName' exists ..."
flavorId=`nova flavor-show $csr1kvFlavorId 2>&1 | awk '/No flavor|id|endpoint/ {
if (index($0, "endpoint") > 0) {
print "NO SERVER"; nextfile;
}
else if (index($0, "No flavor") > 0)
print "No";
else
print $4;
}'`
if [ "$flavorId" == "No" ]; then
echo " No, it does not. Creating it."
flavorId=`nova flavor-create $csr1kvFlavorName $csr1kvFlavorId 4096 0 4 --is-public False | awk -v r=$csr1kvFlavorName '$0 ~ r { print $2 }'`
elif [ "$flavorId" == "NO SERVER" ]; then
echo " Nova does not seem to be running. Skipping!"
else
echo " Yes, it does."
fi
# We disable scheduling by aggregate metadata for now.
if false; then
echo -n "Checking if flavor '$csr1kvFlavorName' has metadata '$aggregateMetadata' ..."
hasMetadata=`nova flavor-show 621 2>&1 | awk -v key=$aggregateMetadataKey -v value=$aggregateMetadataValue '
BEGIN { res = "No" }
{
if ($2 == "extra_specs" && index($4, key) > 0 && index($5, value) > 0)
res = "Yes"
}
END { print res }'`
if [ "$hasMetadata" == "No" ]; then
echo " No, it does not. Adding it."
nova flavor-key $csr1kvFlavorId set $aggregateMetadata > /dev/null 2>&1
else
echo " Yes, it does."
fi
echo -n "Checking if aggregate '$networkHostsAggregateName' exists ..."
aggregateId=`nova aggregate-list 2>&1 | awk -v name=$networkHostsAggregateName -v r=$networkHostsAggregateName"|Id" '
BEGIN { res = "No" }
$0 ~ r {
if ($2 != "Id" && $4 == name)
res = $2;
}
END { print res; }'`
if [ "$aggregateId" == "No" ]; then
echo " No, it does not. Creating it."
aggregateId=`nova aggregate-create $networkHostsAggregateName 2>&1 | awk -v name=$networkHostsAggregateName -v r=$networkHostsAggregateName"|Id" 'BEGIN { res = "No" } $0 ~ r { if ($2 != "Id" && $4 == name) res = $2; } END { print res; }'`
else
echo " Yes, it does."
fi
echo "Setting metadata for aggregate '$networkHostsAggregateName'"
nova aggregate-set-metadata $aggregateId $aggregateMetadata > /dev/null 2>&1
# Add nodes to the aggregate in a separate process that can run until
# the nova compute has started on the hosts.
( add_host_to_aggregate computeNetworkNodes ) &
fi
if [ "$flavorId" != "NO SERVER" ]; then
echo "Removing relevant quota limits ..."
nova quota-update --cores -1 --instances -1 --ram -1 $tenantId > /dev/null 2>&1
fi
echo -n "Checking if image '$csr1kvImageName' exists ..."
hasImage=`glance image-show $csr1kvImageName 2>&1 | awk '
/Property|No|endpoint/ {
if (index($0, "endpoint") > 0) {
print "NO SERVER"; nextfile;
}
else if (index($0, "No image") > 0)
print "No";
else
print "Yes";
}'`
if [ "$hasImage" == "No" ]; then
echo " No, it does not. Creating it."
glance image-create --name $csr1kvImageName --disk-format $csr1kvDiskFormat --container-format $csr1kvContainerFormat --file $csr1kvImageSrc $csr1kvGlanceExtraParams
elif [ "$hasImage" == "NO SERVER" ]; then
echo " Glance does not seem to be running. Skipping!"
else
echo " Yes, it does."
fi

View File

@ -1,86 +0,0 @@
#!/usr/bin/env bash
# Default values
# --------------
# osn is the name of OpenStack network service, i.e.,
# it should be 'neutron'.
osn=${1:-neutron}
plugin=${2:-n1kv}
n1kvPhyNwNames=(osn_phy_network osn_phy_network)
n1kvNwProfileNames=(test_net_profile1 test_net_profile2)
n1kvNwProfileTypes=(vlan vlan)
n1kvNwSubprofileTypes=(None None)
n1kvNwProfileSegRange=(500-599 600-699)
testNetworks=(test_net1 test_net2 test_net3 test_net4 test_net5 test_net6 test_extnet1)
testNetworkOpts=('' '' '' '' '' '' '--router:external=True')
testSubnetNames=(test_subnet1 test_subnet2 test_subnet3 test_subnet4 test_subnet5 test_subnet6 test_extsubnet1)
testSubnetCIDRs=('10.0.11.0/24' '10.0.12.0/24' '10.0.13.0/24' '10.0.14.0/24' '10.0.15.0/24' '10.0.16.0/24' '10.0.21.0/24')
testSubnetOpts=('' '' '' '' '' '' '--disable-dhcp --allocation-pool start=10.0.21.10,end=10.0.21.254')
function get_network_profile_id() {
index=$1
name=$2
phyNet=$3
type=$4
subType=$5
segRange=$6
local c=0
local opt_param=
nProfileId[$index]=`$osn cisco-network-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
if [ "${nProfileId[$index]}" == "None" ]; then
echo " Network profile $name does not exist. Creating it."
if [ "$subType" != "None" ]; then
opt_param="--sub_type $subType"
fi
if [ "$segRange" != "None" ]; then
opt_param=$opt_param" --segment_range $segRange"
fi
$osn cisco-network-profile-create --physical_network $phyNet $opt_param $name $type
fi
while [ $c -le 5 ] && [ "$nProfileId" == "None" ]; do
nProfileId=`$osn cisco-network-profile-list | awk 'BEGIN { res="None"; } /'"$name"'/ { res=$2; } END { print res;}'`
let c+=1
done
}
if [ "$plugin" == "n1kv" ]; then
echo "Verifying that required N1kv network profiles exist:"
for (( i=0; i<${#n1kvNwProfileNames[@]}; i++ )); do
echo " Checking ${n1kvNwProfileNames[$i]} ..."
get_network_profile_id $i ${n1kvNwProfileNames[$i]} ${n1kvPhyNwNames[$i]} ${n1kvNwProfileTypes[$i]} ${n1kvNwSubprofileTypes[$i]} ${n1kvNwProfileSegRange[$i]}
if [ $nProfileId == "None" ]; then
echo " Failed to verify network profile ${n1kvNwProfileNames[$i]}, please check health of the N1kv plugin and the VSM."
echo " Aborting!"
exit 1
else
echo " Done"
fi
done
profile_opt='--n1kv:profile_id='${nProfileId[0]}
fi
for (( i=0; i<${#testNetworks[@]}; i++)); do
echo -n "Checking if ${testNetworks[$i]} network exists ..."
hasNw=`$osn net-show ${testNetworks[$i]} 2>&1 | awk '/Unable to find|enabled/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasNw" == "No" ]; then
echo " No it does not. Creating it."
$osn net-create $profile_opt ${testNetworkOpts[$i]} ${testNetworks[$i]}
else
echo " Yes, it does."
fi
done
for (( i=0; i<${#testSubnetNames[@]}; i++)); do
echo -n "Checking if ${testSubnetNames[$i]} subnet exists ..."
hasSubNw=`$osn subnet-show ${testSubnetNames[$i]} 2>&1 | awk '/Unable to find|Value/ { if ($1 == "Unable") print "No"; else print "Yes"; }'`
if [ "$hasSubNw" == "No" ]; then
echo " No it does not. Creating it."
$osn subnet-create --name ${testSubnetNames[$i]} ${testSubnetOpts[$i]} ${testNetworks[$i]} ${testSubnetCIDRs[$i]}
else
echo " Yes, it does."
fi
done

View File

@ -1,89 +0,0 @@
[hosting_device_credentials]
[cisco_hosting_device_credential:1]
name="Universal credential"
description="Credential used for all hosting devices"
user_name=stack
password=cisco
type=
[hosting_devices_templates]
[cisco_hosting_device_template:1]
name=NetworkNode
enabled=True
host_category=Network_Node
service_types=router:FW:VPN
image=
flavor=
default_credentials_id=1
configuration_mechanism=
protocol_port=22
booting_time=360
slot_capacity=2000
desired_slots_free=0
tenant_bound=
device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.noop_hd_driver.NoopHostingDeviceDriver
plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.noop_plugging_driver.NoopPluggingDriver
[cisco_hosting_device_template:2]
name="CSR1kv template"
enabled=True
host_category=Hardware
service_types=router:FW:VPN
image=csr1kv_openstack_img
flavor=621
default_credentials_id=1
configuration_mechanism=
protocol_port=22
booting_time=360
slot_capacity=2000
desired_slots_free=0
tenant_bound=
device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.n1kv_ml2_trunking_driver.N1kvML2TrunkingPlugDriver
[cisco_hosting_device_template:3]
name="ASR1k template"
enabled=True
host_category=Hardware
service_types=router:FW:VPN
image=
flavor=
default_credentials_id=1
configuration_mechanism=
protocol_port=22
booting_time=360
slot_capacity=4000
desired_slots_free=0
tenant_bound=
device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.noop_hd_driver.NoopHostingDeviceDriver
plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.hw_vlan_trunking_driver.HwVLANTrunkingPlugDriver
[hosting_devices]
[cisco_hosting_device:3]
template_id=3
credentials_id=1
device_id=SN:abcd1234efgh
admin_state_up=True
management_ip_address=10.86.7.54
protocol_port=22
tenant_bound=
auto_delete=False
[cisco_hosting_device:4]
template_id=3
credentials_id=1
device_id=SN:efgh5678ijkl
admin_state_up=True
management_ip_address=10.86.7.55
protocol_port=22
tenant_bound=
auto_delete=False
[plugging_drivers]
[HwVLANTrunkingPlugDriver:3]
internal_net_interface_1=*:GigabitEthernet0/0/0
external_net_interface_1=*:GigabitEthernet0/0/0
[HwVLANTrunkingPlugDriver:4]
internal_net_interface_1=*:GigabitEthernet0/2/1
external_net_interface_1=*:GigabitEthernet0/2/1

View File

@ -1,37 +0,0 @@
[router_types]
[cisco_router_type:1]
name=Namespace_Neutron_router
description="Neutron router implemented in Linux network namespace"
template_id=1
ha_enabled_by_default=False
shared=True
slot_need=0
scheduler=
driver=
cfg_agent_service_helper=
cfg_agent_driver=
[cisco_router_type:2]
name=CSR1kv_router
description="Neutron router implemented in Cisco CSR1kv device"
template_id=2
ha_enabled_by_default=False
shared=True
slot_need=10
scheduler=networking_cisco.plugins.cisco.l3.schedulers.l3_router_hosting_device_scheduler.L3RouterHostingDeviceLongestRunningScheduler
driver=networking_cisco.plugins.cisco.l3.drivers.noop_routertype_driver.NoopL3RouterDriver
cfg_agent_service_helper=networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
cfg_agent_driver=networking_cisco.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
[cisco_router_type:3]
name=ASR1k_router
description="Neutron router implemented in Cisco ASR1k device"
template_id=3
ha_enabled_by_default=True
shared=True
slot_need=1
scheduler=networking_cisco.plugins.cisco.l3.schedulers.l3_router_hosting_device_scheduler.L3RouterHostingDeviceHARandomScheduler
driver=networking_cisco.plugins.cisco.l3.drivers.asr1k.asr1k_routertype_driver.ASR1kL3RouterDriver
cfg_agent_service_helper=networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
cfg_agent_driver=networking_cisco.plugins.cisco.cfg_agent.device_drivers.asr1k.asr1k_routing_driver.ASR1kRoutingDriver

View File

@ -1,70 +0,0 @@
#!/usr/bin/env bash
DIR_CISCO=$DEST/networking-cisco
if is_service_enabled net-cisco; then
if [[ "$1" == "source" ]]; then
:
fi
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then :
if is_service_enabled cisco-saf; then
source $DIR_CISCO/devstack/saf/cisco_saf
echo "Setting up config for cisco-saf"
setup_saf_config $DIR_CISCO
fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
cd $DIR_CISCO
echo "Installing Networking-Cisco"
setup_develop $DIR_CISCO
if is_service_enabled cisco-fwaas; then
echo "Installing neutron-fwaas"
source $DIR_CISCO/devstack/csr1kv/cisco_fwaas
install_cisco_fwaas
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled q-ciscorouter && is_service_enabled ciscocfgagent; then
source $DIR_CISCO/devstack/csr1kv/cisco_neutron
if is_service_enabled cisco-fwaas; then
configure_cisco_fwaas
fi
configure_cisco_csr_router
fi
if is_service_enabled cisco-saf; then
echo "Adding cisco-saf configuration parameters"
configure_cisco_saf $DIR_CISCO
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled q-ciscorouter && is_service_enabled ciscocfgagent; then
if is_service_enabled cisco-fwaas; then
start_cisco_fwaas
fi
start_cisco_csr_router
fi
if is_service_enabled cisco-saf; then
echo "Starting cisco-saf processes"
start_cisco_saf_processes
fi
fi
if [[ "$1" == "unstack" ]]; then
source $DIR_CISCO/devstack/csr1kv/cisco_neutron
net_stop_neutron
if is_service_enabled cisco-saf; then
source $DIR_CISCO/devstack/saf/cisco_saf
echo "Stop cisco-saf processes"
stop_cisco_saf_processes
fi
fi
if [[ "$1" == "clean" ]]; then
:
fi
fi

View File

@ -1,262 +0,0 @@
#!/usr/bin/env bash
#
# cisco-saf
#
# This file contains functions that is used by devstack plugins.sh when
# cisco-saf is enabled.
#
# Save trace setting
SAF_XTRACE=$(set +o | grep xtrace)
set +o xtrace
#
# Default settings
#
ENABLER_DCNM_USER=${ENABLER_DCNM_USER:-root}
ENABLER_DCNM_AMQP_USER=${ENABLER_DCNM_AMQP_USER:-admin}
ENABLER_SEG_ID_MIN=${ENABLER_SEG_ID_MIN:-10000}
ENABLER_SEG_ID_MAX=${ENABLER_SEG_ID_MAX:-20000}
ENABLER_LOG_LEVEL=${ENABLER_LOG_LEVEL:-WARNING}
ENABLER_LOG_DIR=${ENABLER_LOG_DIR:-~/Logs}
ENABLER_LOG_FILE=${ENABLER_LOG_FILE:-fabric_enabler.log}
ENABLER_DB_NAME=${ENABLER_DB_NAME:-cisco_dfa}
ENABLER_MYSQL_USER=${ENABLER_MYSQL_USER:-dfa}
ENABLER_MYSQL_HOST=${MYSQL_HOST}
ENABLER_DB_PASSWORD=${MYSQL_PASSWORD}
ENABLER_DCNM_DHCP=${ENABLER_DCNM_DHCP:-False}
NETWORKING_CISCO_ETC_SAF_DIR="etc/saf"
ENABLER_FABRIC_TYPE=${FABRIC_TYPE:-evpn}
ENABLER_FIREWALL_IN_IP_START=${FIREWALL_IN_IP_START:-100.121.10.0/24}
ENABLER_FIREWALL_IN_IP_END=${FIREWALL_IN_IP_END:-100.121.200.0/24}
ENABLER_FIREWALL_OUT_IP_START=${FIREWALL_OUT_IP_START:-200.121.10.0/24}
ENABLER_FIREWALL_OUT_IP_END=${FIREWALL_OUT_IP_END:-200.121.200.0/24}
ENABLER_FIREWALL_DUMMY_SUBNET=${FIREWALL_DUMMY_SUBNET:-9.9.9.0/24}
ENABLER_FIREWALL_HOST_NETWORK_PROFILE_EVPN_DYNAMIC=${FIREWALL_HOST_NETWORK_PROFILE_EVPN_DYNAMIC:-'serviceNetworkDynamicRoutingESChainLBESEvpnProfile'}
ENABLER_FIREWALL_HOST_NETWORK_PROFILE_EVPN_STATIC=${FIREWALL_HOST_NETWORK_PROFILE_EVPN_STATIC:-'serviceNetworkEvpnProfile'}
ENABLER_FIREWALL_HOST_NETWORK_PROFILE_DFA_DYNAMIC=${FIREWALL_HOST_NETWORK_PROFILE_DFA_DYNAMIC:-'serviceNetworkUniversalDynamicRoutingESProfile'}
ENABLER_FIREWALL_HOST_NETWORK_PROFILE_DFA_STATIC=${FIREWALL_HOST_NETWORK_PROFILE_DFA_STATIC:-'serviceNetworkUniversalTfStaticRoutingProfile'}
ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_DYNAMIC=${FIREWALL_EXT_PART_VRF_PROFILE_EVPN_DYNAMIC:-'vrf-common-evpn-external-dynamic-ES'}
ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_STATIC=${FIREWALL_EXT_PART_VRF_PROFILE_EVPN_STATIC:-'vrf-common-universal-external-static'}
ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_DYNAMIC=${FIREWALL_EXT_PART_VRF_PROFILE_DFA_DYNAMIC:-'vrf-common-universal-external-dynamic-ES'}
ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_STATIC=${FIREWALL_EXT_PART_VRF_PROFILE_DFA_STATIC:-'vrf-common-universal-external-static'}
ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_DYNAMIC=${FIREWALL_EXT_NETWORK_PROFILE_EVPN_DYNAMIC:-'externalNetworkDynamicRoutingESEvpnProfile'}
ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_STATIC=${FIREWALL_EXT_NETWORK_PROFILE_EVPN_STATIC:-'externalNetworkESEvpnProfile'}
ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_DYNAMIC=${FIREWALL_EXT_NETWORK_PROFILE_DFA_DYNAMIC:-'externalNetworkUniversalDynamicRoutingESProfile'}
ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_STATIC=${FIREWALL_EXT_NETWORK_PROFILE_DFA_STATIC:-'externalNetworkUniversalTfStaticRoutingESProfile'}
# Setup configuration parameters in enabler_conf.ini
function setup_saf_config {
CFG_DIR=$1/${NETWORKING_CISCO_ETC_SAF_DIR}
local cfgfile=${CFG_DIR}/enabler_conf.ini
# DCNM settings
iniset $cfgfile dcnm dcnm_ip $ENABLER_DCNM_IP_ADDR
iniset $cfgfile dcnm dcnm_user $ENABLER_DCNM_USER
iniset $cfgfile dcnm dcnm_amqp_user $ENABLER_DCNM_AMQP_USER
iniset $cfgfile dcnm dcnm_password $ENABLER_DCNM_PASSWORD
iniset $cfgfile dcnm segmentation_id_min $ENABLER_SEG_ID_MIN
iniset $cfgfile dcnm segmentation_id_max $ENABLER_SEG_ID_MAX
iniset $cfgfile dcnm dcnm_dhcp $ENABLER_DCNM_DHCP
# RPC setting
iniset $cfgfile dfa_rpc transport_url "rabbit://${RABBIT_USERID}:${RABBIT_PASSWORD}@${SERVICE_HOST}:5672//"
# MySQL setting
iniset $cfgfile dfa_mysql connection mysql://$ENABLER_MYSQL_USER:$ENABLER_DB_PASSWORD@$ENABLER_MYSQL_HOST/$ENABLER_DB_NAME?charset=utf8
# Log setting
iniset $cfgfile dfa_log log_dir $ENABLER_LOG_DIR
iniset $cfgfile dfa_log log_file $ENABLER_LOG_FILE
iniset $cfgfile dfa_log log_level $ENABLER_LOG_LEVEL
FW_DEV=`echo $FIREWALL_DEVICE | awk -F ',' '{print $1}'`
if [[ $FW_DEV != '' ]]; then
iniset $cfgfile firewall device "[$FIREWALL_DEVICE]"
iniset $cfgfile firewall fw_mgmt_ip $FIREWALL_MGMT_IP
if [[ $FW_DEV != 'native' ]]; then
iniset $cfgfile firewall fw_username $FIREWALL_USERNAME
iniset $cfgfile firewall fw_password $FIREWALL_PASSWORD
iniset $cfgfile firewall fw_interface_in $FIREWALL_INTERFACE_IN
iniset $cfgfile firewall fw_interface_out $FIREWALL_INTERFACE_OUT
fi
iniset $cfgfile firewall fw_service_in_ip_start $ENABLER_FIREWALL_IN_IP_START
iniset $cfgfile firewall fw_service_in_ip_end $ENABLER_FIREWALL_IN_IP_END
iniset $cfgfile firewall fw_service_out_ip_start $ENABLER_FIREWALL_OUT_IP_START
iniset $cfgfile firewall fw_service_out_ip_end $ENABLER_FIREWALL_OUT_IP_END
iniset $cfgfile firewall fw_service_dummy_ip_subnet $ENABLER_FIREWALL_DUMMY_SUBNET
if [[ $ENABLER_FABRIC_TYPE == 'evpn' ]]; then
if [[ $FW_DEV == 'phy_asa' ]]; then
iniset $cfgfile firewall fw_service_host_profile $ENABLER_FIREWALL_HOST_NETWORK_PROFILE_EVPN_DYNAMIC
iniset $cfgfile firewall fw_service_part_vrf_profile $ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_DYNAMIC
iniset $cfgfile firewall fw_service_ext_profile $ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_DYNAMIC
elif [[ $FW_DEV == 'native' ]]; then
iniset $cfgfile firewall fw_service_host_profile $ENABLER_FIREWALL_HOST_NETWORK_PROFILE_EVPN_STATIC
iniset $cfgfile firewall fw_service_part_vrf_profile $ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_STATIC
iniset $cfgfile firewall fw_service_ext_profile $ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_STATIC
fi
elif [[ $FABRIC_TYPE == 'dfa' ]]; then
if [[ $FW_DEV == 'phy_asa' ]]; then
iniset $cfgfile firewall fw_service_host_profile $ENABLER_FIREWALL_HOST_NETWORK_PROFILE_DFA_DYNAMIC
iniset $cfgfile firewall fw_service_part_vrf_profile $ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_DYNAMIC
iniset $cfgfile firewall fw_service_ext_profile $ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_DYNAMIC
elif [[ $FW_DEV == 'native' ]]; then
iniset $cfgfile firewall fw_service_host_profile $ENABLER_FIREWALL_HOST_NETWORK_PROFILE_DFA_STATIC
iniset $cfgfile firewall fw_service_part_vrf_profile $ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_STATIC
iniset $cfgfile firewall fw_service_ext_profile $ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_STATIC
fi
fi
fi
}
# Install packages in the saf/requirements.txt
function install_saf_requirements {
SAF_DIR=$1/networking_cisco/apps/saf
if [[ -e $SAF_DIR/requirements.txt ]]; then
echo "Installing packages from ${SAF_DIR}/requirements.txt"
sudo -E pip install -r $SAF_DIR/requirements.txt
else
echo "Requirement file ${SAF_DIR}/requirements.txt does not exist."
exit 1
fi
}
# Create database for cisco-saf
function _config_database {
mysqluser=$(mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -e "SELECT EXISTS(SELECT DISTINCT user FROM mysql.user WHERE user='$ENABLER_MYSQL_USER' AND host='$ENABLER_MYSQL_HOST')as user;")
echo "Create ${ENABLER_DB_NAME} database."
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE IF NOT EXISTS ${ENABLER_DB_NAME} CHARACTER SET utf8;"
val=(`echo $mysqluser | tr -d 'user '`)
if [[ $val == 0 ]]; then
# Mysql user does not exist. Create new one and set permission.
echo "Creating user ${ENABLER_MYSQL_USER} for ${ENABLER_DB_NAME}."
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h$MYSQL_HOST -e "CREATE USER '$ENABLER_MYSQL_USER'@'$ENABLER_MYSQL_HOST' IDENTIFIED BY '$ENABLER_DB_PASSWORD';"
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$ENABLER_MYSQL_USER'@'$ENABLER_MYSQL_HOST';"
fi
}
# Copy the enabler ini file
function _copy_fabric_enabler_config_file {
sudo mkdir -p /etc/saf
sudo cp $1/enabler_conf.ini /etc/saf
}
# Copy the upstart files
function _copy_fabric_enabler_upstart_files {
if is_ubuntu; then
if is_service_enabled q-svc; then
sudo cp $1/fabric-enabler-server.conf /etc/init/
fi
if is_service_enabled q-agt; then
sudo cp $1/fabric-enabler-agent.conf /etc/init/
fi
elif is_fedora; then
if is_service_enabled q-svc; then
sudo cp $1/fabric-enabler-server.service /usr/lib/systemd/system/
sudo systemctl enable fabric-enabler-server.service
fi
if is_service_enabled q-agt; then
sudo cp $1/fabric-enabler-agent.service /usr/lib/systemd/system/
sudo systemctl enable fabric-enabler-agent.service
fi
else
exit_distro_not_supported "list of packages"
fi
}
# Start fabric enabler processes
function _start_fabric_enabler_processes {
if is_ubuntu; then
if is_service_enabled q-svc; then
sudo start fabric-enabler-server
fi
if is_service_enabled q-agt; then
sudo start fabric-enabler-agent
fi
elif is_fedora; then
if is_service_enabled q-svc; then
sudo systemctl start fabric-enabler-server.service
fi
if is_service_enabled q-agt; then
sudo systemctl start fabric-enabler-agent.service
fi
else
exit_distro_not_supported "list of packages"
fi
}
# Stop fabric enabler processes
function _stop_fabric_enabler_processes {
if is_ubuntu; then
if is_service_enabled q-svc; then
sudo stop fabric-enabler-server
fi
if is_service_enabled q-agt; then
sudo stop fabric-enabler-agent
fi
elif is_fedora; then
if is_service_enabled q-svc; then
sudo systemctl stop fabric-enabler-server.service
fi
if is_service_enabled q-agt; then
sudo systemctl stop fabric-enabler-agent.service
fi
else
exit_distro_not_supported "list of packages"
fi
}
# Set the configuration parameters in neutron, nova and keystone that
# are needed for cisco-saf service.
function configure_cisco_saf {
SAF_INIT_DIR=$1/${NETWORKING_CISCO_ETC_SAF_DIR}/init
ETC_SAF_DIR=$1/${NETWORKING_CISCO_ETC_SAF_DIR}
# Create database for cisco-saf
if is_service_enabled mysql; then
_config_database
fi
# Copy the upstart files
_copy_fabric_enabler_upstart_files ${SAF_INIT_DIR}
# Copy config file
_copy_fabric_enabler_config_file ${ETC_SAF_DIR}
}
# Start fabric enabler processes.
function start_cisco_saf_processes {
_start_fabric_enabler_processes
}
# Stop fabric enabler processes.
function stop_cisco_saf_processes {
_stop_fabric_enabler_processes
}
# Clean up cisco-saf service
function cleanup_cisco_saf_service {
# Stop processes
stop_cisco_saf_processes
# Remove configurations
# Clean up installation
}
# Restore trace setting
$SAF_XTRACE

View File

@ -1,93 +0,0 @@
# This file contains setting for installing Cisco Standalone OpenStack
# fabric enabler using devstack.
#
# This local.conf file contains setting that is required on OpenStack
# compute nodes.
#
# NOTE: Modify the required parameters based on your setting in below,
# and then copy this file to the root DevStack directory as local.conf.
#
[[local|localrc]]
OFFLINE=False
RECLONE=yes
IP_VERSION=4
#
# Modify the following parameters based on your setup
#
HOST_IP=<ip address of the server>
SERVICE_HOST=<ip address of openstack controller node>
RABBIT_PASSWORD=<rabbitmq password>
RABBIT_USERID=<rabbitmq user id>
SERVICE_TOKEN=<token, usually password or uuid>
SERVICE_PASSWORD=<service password>
ADMIN_PASSWORD=<admin password>
#----------------------------------------------------
RABBIT_HOST=$SERVICE_HOST
# eanbler_conf.ini settings
#----------------------------------------------------
# ENABLER_LOG_LEVEL : debug level for enabler process, default is WARNING
# Example:
# ENABLER_LOG_LEVEL=DEBUG
#
# ENABLER_LOG_LEVEL=<debug level>
# Enable/Disable services
disable_all_services
enable_service q-agt
enable_service neutron
enable_service n-novnc
enable_service n-cpu
# Add networking-cisco Repository
enable_plugin networking-cisco https://github.com/openstack/networking-cisco.git master
enable_service net-cisco
# Enable Cisco SAF
enable_service cisco-saf
# Log
VERBOSE=True
DEBUG=True
USE_SCREEN=True
SCREEN_LOGDIR=/opt/stack/logs
LOGFILE=${SCREEN_LOGDIR}/stack.sh.log
# VNC
NOVA_VNC_ENABLED=True
VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
VNCSERVER_LISTEN=0.0.0.0
# Github base url
GIT_BASE=https://github.com
# Installation path
DEST=/opt/stack
DATA_DIR=$DEST/stack/data
# ML2 plugin and agent settings
Q_PLUGIN=ml2
Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch
ENABLE_TENANT_TUNNELS=False
Q_ML2_TENANT_NETWORK_TYPE=local
Q_ML2_PLUGIN_TYPE_DRIVERS=local
OVS_ENABLE_TUNNELING=False
PHYSICAL_NETWORK=ethd
OVS_PHYSICAL_BRIDGE=br-ethd
ENABLE_TENANT_VLANS=False
# Post Config parameters
#
# ml2_conf.ini
[[post-config|/$Q_PLUGIN_CONF_FILE]]
[agent]
arp_responder = False
prevent_arp_spoofing = False

View File

@ -1,323 +0,0 @@
# This file contains setting for installing Cisco Standalone OpenStack
# fabric enabler using devstack.
#
# This local.conf file contains setting that is required on a OpenStack
# controller node.
#
# NOTE: Modify the required parameters based on your setting in below,
# and then copy this file to the root DevStack directory as local.conf.
#
#
[[local|localrc]]
OFFLINE=False
RECLONE=yes
IP_VERSION=4
#----------------------------------------------------
#
# Modify the following parameters based on your setup
#
HOST_IP=<ip address of the server>
RABBIT_PASSWORD=<rabbitmq passsword>
RABBIT_USERID=<rabbitmq userid>
SERVICE_TOKEN=<token, usually password or uuid>
SERVICE_PASSWORD=<services password>
ADMIN_PASSWORD=<admin password>
MYSQL_PASSWORD=<mysql password>
#
#eanbler_conf.ini settings
#
# The following parameters can be used as default. The default values can be
# changed if they are not desirable.
#
# DCNM credentials:
# ENABLER_DCNM_USER : DCNM user id, default is root
# ENABLER_DCNM_AMQP_USER: DCNM rabbitmq user id, defualt is admin
# ENABLER_DCNM_DHCP : Use dcnm dhcp server or not, default is False
# Segmentation ID range:
# ENABLER_SEG_ID_MIN : segmentation ID minimum value, default is 10000
# ENABLER_SEG_ID_MAX : segmentation ID maximum value, default is 20000
# Debug settings:
# ENABLER_LOG_LEVEL : debug level for enabler process, default is WARNING
# ENABLER_LOG_DIR : debug log directory, default is ~/Logs
# ENABLER_LOG_FILE : debug log file name, default is fabric_enabler.log
# Database settings:
# ENABLER_DB_NAME : Enabler database name, default is cisco_dfa
# ENABLER_MYSQL_USER : Enabler database user name, default is dfa
# ENABLER_DB_PASSWORD : Enabler database password, default is MYSQL_PASSWORD
ENABLER_LOG_LEVEL=<debug level>
ENABLER_DCNM_IP_ADDR=<DCNM IP address>
ENABLER_DCNM_PASSWORD=<DCNM password>
#FWaaS Settings
# FABRIC_TYPE : Specifies if the fabric is EVPN or FabricPath(DFA)
# Default is EVPN. For DFA, set this as:
# FABRIC_TYPE='dfa'
# For explicitly setting to EVPN, do:
# FABRIC_TYPE='evpn'
#
# Firewall Device Specific Configs
#
# FIREWALL_DEVICE : Specifies the type of firewall. Currently
# supported devices are Physical ASA and Native
# Openstack FW using IPTables.
# More than one ASA for FW load balancing is also
# supported.
# For Native, set this as FIREWALL_DEVICE=native
# For Phy ASA, set this as FIREWALL_DEVICE=phy_asa
# For more than one Phy ASA device, set this as:
# FIREWALL_DEVICE=[phy_asa, phy_asa]
# FIREWALL_MGMT_IP : Specifies the Management IP address of FW device.
# Set this to a random value for Native FW.
# Example when there are two ASA FW devices with
# Managament IP address of 1.1.1.1 and 2.2.2.2:
# FIREWALL_MGMT_IP=[1.1.1.1,2.2.2.2]
# The IP addresses are delimited with a comma.
# For Native FW, you will set this like:
# FIREWALL_MGMT_IP=[2.3.4.5]
# The IP address for native FW value is ignored, it
# can be set to any value for Native FW. This line
# is currently needed to work.
# FIREWALL_USERNAME : Specifies the Firewall username for Physical ASA.
# This line is not needed for Native Firewall. For,
# a single ASA device with username as admin:
# FIREWALL_USERNAME=[admin]
# For two ASA devices:
# FIREWALL_USERNAME=[admin,admin]
# The first value corresponds to the username for
# the first device, second value to the second
# device and so on. It's again delimited with a
# comma.
# FIREWALL_PASSWORD : Spcifies the Firewall password for Physical ASA.
# This line is not needed for Native Firewall. Use
# the same semantics as that of USERNAME above to
# set the values.
# FIREWALL_INTERFACE_IN : Specifies the interface in the physical firewall
# through which traffic will be ingressing to the
# firewall from the protected host network.
# This line is not needed for Native Firewall. Use
# the same semantics as that of USERNAME above to
# set the values. As an example:
# FIREWALL_INTERFACE_IN=[Gi0/0,Gi2/0]
# The above lines specifies Gi0/0 and Gi2/0 as the
# 'in' interfaces for the first and second firewall
# respectively.
# FIREWALL_INTERFACE_OUT: Specifies the interface in the physical firewall
# through which traffic will be egressing out of the
# firewall to the external network.
# This line is not needed for Native Firewall. Use
# the same semantics as that of EWALL_INTERFACE_IN
# set the values.
#
# Common Firewall Configs
#
# FIREWALL_IN_IP_START : Specifies the start of the subnet pool for the
# 'in' service network.
# This is optional to set.
# Default of 100.121.10.0/24 will be used,
# if this value is not set. As an example:
# FIREWALL_IN_IP_START=100.122.10.0/24
# In this example, for a 24 subnet, the first
# service IN network will have 100.122.10.x, and
# subsequent ones will have 100.122.11.x and so on.
# FIREWALL_IN_IP_END : Specifies the end of the subnet pool for the
# 'in' service network.
# This is optional to set.
# Default of 100.121.200.0/24 will be used,
# if this value is not set. As an example:
# FIREWALL_IN_IP_END=100.122.200.0/24
# FIREWALL_OUT_IP_START : Specifies the start of the subnet pool for the
# 'out' service network.
# This is optional to set.
# Default of 200.121.10.0/24 will be used,
# if this value is not set. As an example:
# FIREWALL_OUT_IP_START=200.122.10.0/24
# In this example, for a 24 subnet, the first
# service OUT network will have 200.122.10.x, and
# subsequent ones will have 200.122.11.x and so on.
# FIREWALL_IN_IP_END : Specifies the end of the subnet pool for the
# 'out' service network.
# This is optional to set.
# Default of 200.121.200.0/24 will be used,
# if this value is not set. As an example:
# FIREWALL_OUT_IP_END=200.122.200.0/24
# FIREWALL_DUMMY_SUBNET : This specifies the subnet for the dummy
# interface. Openstack needs a router with an
# interface for the FW rules to be applied. So,
# enabler needs to create a router with a dummy
# interface so that FW can get activated.
# This is optional to set.
# Default of 9.9.9.0/24 will be used if this value
# is not set.
#
# For expert usage:
#
# The below parameters are optional, the default values will be filled
# based on the fabric type and firewall.
# All the below parameters are optional and is not recommended to be set
# unless one understands the profiles and SAF functionality very well.
#
# FIREWALL_HOST_NETWORK_PROFILE_EVPN_DYNAMIC : This specifies the profile
# of the 'in' service network
# for EVPN fabric where the
# firewall can run a routing
# protocol (e.g ASA)
# FIREWALL_HOST_NETWORK_PROFILE_EVPN_STATIC : This specifies the profile
# of the 'in' service network
# for EVPN fabric where the
# firewall cannot run a routing
# protocol (e.g native)
# FIREWALL_HOST_NETWORK_PROFILE_DFA_DYNAMIC : This specifies the profile
# of the 'in' service network
# for DFA fabric where the
# firewall can run a routing
# protocol (e.g ASA)
# FIREWALL_HOST_NETWORK_PROFILE_DFA_STATIC : This specifies the profile
# of the 'in' service network
# for DFA fabric where the
# firewall cannot run a routing
# protocol (e.g native)
#
# ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_DYNAMIC : This specifies the
# profile for out
# service partition for EVPN
# fabric where the firewall
# can run a routing
# protocol.
# ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_EVPN_STATIC : This specifies the
# profile for out
# service partition for EVPN
# fabric where the firewall
# cannot run a routing
# protocol.
# ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_DYNAMIC : This specifies the
# profile for out
# service partition for DFA
# fabric where the firewall
# can run a routing
# protocol.
# ENABLER_FIREWALL_EXT_PART_VRF_PROFILE_DFA_STATIC : This specifies the
# profile for out
# service partition for DFA
# fabric where the firewall
# cannot run a routing
# protocol.
#
# ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_DYNAMIC : This specifies the
# profile of the 'out' service
# network for EVPN fabric
# where the firewall can run a
# routing protocol.
# ENABLER_FIREWALL_EXT_NETWORK_PROFILE_EVPN_STATIC : This specifies the
# profile of the 'out' service
# network for EVPN fabric
# where the firewall cannot
# run a routing protocol.
# ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_DYNAMIC : This specifies the
# profile of the 'out' service
# network for DFA fabric
# where the firewall can run a
# routing protocol.
# ENABLER_FIREWALL_EXT_NETWORK_PROFILE_DFA_STATIC : This specifies the
# profile of the 'out' service
# network for DFA fabric
# where the firewall cannot
# run a routing protocol.
#----------------------------------------------------
DATABASE_TYPE=mysql
SERVICE_HOST=$HOST_IP
MYSQL_HOST=$SERVICE_HOST
# Enable/Disable services
enable_service n-sch
disable_service n-net
enable_service q-svc
enable_service q-agt
enable_service neutron
enable_service q-dhcp
disable_service q-l3
# Add networking-cisco Repository
enable_plugin networking-cisco https://github.com/openstack/networking-cisco.git master
enable_service net-cisco
# Enable Cisco SAF
enable_service cisco-saf
# Log
VERBOSE=True
DEBUG=True
USE_SCREEN=True
SCREEN_LOGDIR=/opt/stack/logs
LOGFILE=${SCREEN_LOGDIR}/stack.sh.log
# VNC
VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
VNCSERVER_LISTEN=0.0.0.0
# Github base url
GIT_BASE=https://github.com
# Installation path
DEST=/opt/stack
DATA_DIR=$DEST/stack/data
# ML2 plugin and agent settings
Q_PLUGIN=ml2
Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch
ENABLE_TENANT_TUNNELS=False
Q_ML2_TENANT_NETWORK_TYPE=local
Q_ML2_PLUGIN_TYPE_DRIVERS=local
OVS_ENABLE_TUNNELING=False
PHYSICAL_NETWORK=ethd
OVS_PHYSICAL_BRIDGE=br-ethd
ENABLE_TENANT_VLANS=False
#----------------------------------------------------
# Post Config parameters
#
# neutron.conf
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
notification_driver=messaging
notification_topics=cisco_dfa_neutron_notify
rpc_backend=rabbit
[keystone_authtoken]
auth_host=$SERVICE_HOST
admin_tenant_name=service
admin_user=neutron
admin_password=$ADMIN_PASSWORD
# ml2_config.ini
[[post-config|/$Q_PLUGIN_CONF_FILE]]
[agent]
arp_responder = False
prevent_arp_spoofing = False
# nova.conf
[[post-config|$NOVA_CONF]]
[keystone_authtoken]
auth_host=$SERVICE_HOST
admin_tenant_name=service
admin_user=nova
admin_password=$ADMIN_PASSWORD
# keystone.conf
[[post-config|$KEYSTONE_CONF]]
[DEFAULT]
notification_driver=messaging
notification_topics=cisco_dfa_keystone_notify
rpc_backend=rabbit
admin_endpoint=http://$SERVICE_HOST:%(admin_port)s/
#----------------------------------------------------

View File

View File

@ -1,133 +0,0 @@
===================================
Cisco Prime Network Registrar (PNR)
===================================
1. General
----------
This is an installation guide for enabling
Cisco Prime Network Registrar (PNR) support on OpenStack.
Please refer to PNR installation guide
(http://www.cisco.com/c/en/us/support/cloud-systems-management/prime-network-registrar/tsd-products-support-series-home.html)
for how to install and bring up the PNR.
The Neutron DHCP agent in the OpenStack environment needs to be setup
to communicate with the PNR DHCP server and with the PNR DNS server.
The PNR DHCP server performs leasing operations and PNR DNS server
resolves DNS queries, these two servers replace dnsmasq.
This guide does not cover OpenStack installation.
2. Prerequisites
----------------
The prerequisites for installing the PNR OpenStack enabler are the
following:
- Install PNR with required DNS and DHCP licenses.
- Disable dnsmasq or other DNS/DHCP services.
3. PNR plugin Installation
--------------------------
:3.1 Using devstack:
In this scenario, the PNR plugin will be installed along with OpenStack
using devstack.
1. Clone devstack.
2. Add this repo as an external repository:
::
> cat local.conf
[[local|localrc]]
enable_plugin networking-cisco https://git.openstack.org/openstack/networking-cisco.git
enable_service net-cisco
3. :command:`./stack.sh`
:3.2 On a setup with OpenStack already installed:
In this scenario, the PNR plugin will be installed on a setup which has
OpenStack installed already:
1. Clone networking-cisco_.
.. _networking-cisco: https://github.com/openstack/networking-cisco
2. :command:`cd networking-cisco`
3. :command:`sudo python networking_cisco/setup.py install`
4. The following modifications are needed in the ``dhcp_agent.ini``
file.
Change the DHCP driver from dnsmasq to PNR.
.. code-block:: ini
[DEFAULT]
#dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_driver = networking_cisco.plugins.cisco.cpnr.dhcp_driver.CpnrDriver
Add the following new section to the ``dhcp_agent.ini`` file
with the details for contacting the PNR local server.
.. code-block:: ini
[cisco_pnr]
http_server = http://<pnr_localcluster_ipaddress>:8080
http_username = <pnr_localcluster_username>
http_password = <pnr_localcluster_password>
external_interface = eth0
dhcp_server_addr = <pnr_localcluster_ipaddress>
dhcp_server_port = 67
dns_server_addr = <pnr_localcluster_ipaddress>
dns_server_port = 53
Change the <pnr_localcluster_ipaddress> to the IP
address of the local PNR VM.
Change the <pnr_localcluster_username> and
<pnr_localcluster_password> to the same username
and password provided during PNR installation.
If you are using HTTPS with a valid SSL certificate,
change the scheme in http_server config variable to
'https' and the port number in the address to the
appropriate port (default 8443).
If you do not want to verify SSL certificates, add a
config variable to ``dhcp_agent.ini`` file.
.. code-block:: ini
[cisco_pnr]
insecure = True
Note that using the ``insecure`` variable is NOT recommended in
production.
5. After changing ``dhcp_agent.ini``, restart the DHCP agent.
On Red Hat based server:
:command:`systemctl restart neutron-dhcp-agent`
On Ubuntu based server:
:command:`service restart neutron-dhcp-agent`
6. Start the dhcp and dns relay from command line as a detached
background process. The relay files exist in
networking_cisco/plugins/cisco/cpnr.
:command:`nohup python dhcp_relay.py --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-relay.log &`
:command:`nohup python dns_relay.py --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dns-relay.log &`

View File

@ -1,12 +0,0 @@
============
Applications
============
Contents:
.. toctree::
:maxdepth: 1
saf
cpnr

View File

@ -1,140 +0,0 @@
===================================
Cisco Nexus StandAlone Fabric (SAF)
===================================
1. General
----------
This is an installation guide for enabling nexus fabric support on OpenStack
Please refer to nexus fabric system configuration for how to bring up
the fabric using a spine and leaf topology with DCNM as the fabric manager.
The compute node in an OpenStack setup should be connected to the nexus
fabric leaf switch. This link on the compute node/server is often
referred as uplink in this note.
This guide does not cover OpenStack installation.
2. Prerequisites
----------------
The prerequisites for installing Nexus fabric OpenStack enabler are the
following:
- Install LLDPad
- Install OVS (version 2.3.x)
3. Fabric Enabler Installation
------------------------------
:3.1 Using devstack:
In this scenario, SAF will be installed along with openstack using devstack
1. Clone devstack.
2. Use ``networking-cisco/devstack/saf/local.conf.compute.saf`` and ``networking-cisco/devstack/saf/local.conf.control.saf`` as and example to create local.conf for control and compute nodes and set the required parameters in the local.conf based on the your setup.
3. Run ./stack.sh
:3.2 On a setup with OpenStack already installed:
In this scenario, SAF will be installed on a setup which has already OpenStack installed:
1. Clone networking-cisco_.
.. _networking-cisco: https://github.com/openstack/networking-cisco
2. The following modifications are needed in:
::
2.1 /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = local
mechanism_drivers = openvswitch
[ovs]
bridge_mappings = ethd:br-ethd
[agent]
tunnel_types =
Following sections should remain empty:
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
L3 agent - must be disabled
DHCP service - must be disabled
2.2 neutron.conf:
[DEFAULT]
notification_driver = messaging
notification_topics = cisco_dfa_neutron_notify
rpc_backend = rabbit
[keystone_authtoken]
...
auth_host = <ip address of controller>
auth_port = 35357
admin_tenant_name = service
admin_user = neutron
admin_password = <admin password>
...
2.3 nova.conf:
[keystone_authtoken]
...
admin_password = <admin password>
admin_user = nova
admin_tenant_name = service
auth_uri = http://<ip address of controller>:5000/v2.0
auth_host = <ip address of controller>
...
3.3 keystone.conf:
[DEFAULT]
notification_driver = messaging
notification_topics = cisco_dfa_keystone_notify
admin_endpoint = http://<services ip address>:%(admin_port)s/
rpc_backend = rabbit
4. ``cd networking-cisco``
5. Edit ``networking-cisco/etc/saf/enabler_conf.ini``
Set the parameters in each section of the enabler_conf.ini based on your setup
6. Run ``python tools/saf_prepare_setup.py``
7. Run ``sudo python setup.py install``
8. On controller node run:
- On ubuntu based server:
``sudo start fabric-enabler-server``
- On Red Hat based server:
``sudo systemctl start fabric-enabler-server``
9. On compute node run:
- On ubuntu based server:
``sudo start fabric-enabler-agent``
- On Red Hat based server:
``sudo systemctl start fabric-enabler-agent``

View File

@ -1,75 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'networking-cisco'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../CONTRIBUTING.rst

View File

@ -1,26 +0,0 @@
.. networking-cisco documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to networking-cisco's documentation!
========================================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
apps/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,12 +0,0 @@
============
Installation
============
At the command line::
$ pip install networking-cisco
Or, if you have virtualenvwrapper installed::
$ mkvirtualenv networking-cisco
$ pip install networking-cisco

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,7 +0,0 @@
========
Usage
========
To use networking-cisco in a project::
import networking_cisco

View File

@ -1,10 +0,0 @@
[cisco_pnr]
#http_server = http://<CPNR_IP_ADDRESS:8080>
#http_username = <CPNR WebUI User Name>
#http_password = <CPNR WebUI User Password>
#external_interface = <Name of the Interface...Example eth0, ens0...>
#dhcp_server_addr = <IPv4 address of the CPNR DHCP server>
#dhcp_server_port = <Port number where the CPNR DHCP is running>
#dns_server_addr = <IPv4 address of the CPNR DNS server>
#dns_server_port = <Port number where the CPNR DNS is running>
#insecure = <True to allow http communication, False when only https communication is allowed>

View File

@ -1,34 +0,0 @@
# Configuration for cpnr-rootwrap
# This file should be owned by (and only-writable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/cpnr/rootwrap.d,/usr/share/cpnr/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
[xenapi]
# XenAPI configuration is only required by the L2 agent if it is to
# target a XenServer/XCP compute host's dom0.
xenapi_connection_url=<None>
xenapi_connection_username=root
xenapi_connection_password=<None>

View File

@ -1,40 +0,0 @@
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
description "CPNR DHCP Relay Conf"
start on runlevel [2345]
stop on runlevel [!2345]
env DST="/usr/local/bin/"
pre-start script
logger -t cpnr-dhcp-relay.conf "Pre-Starting CPNR DHCP Relay Agent"
test -r $DST/cpnr-dhcp-relay-agent|| { stop; exit 0; }
logger -t cpnr-dhcp-relay.conf "Starting CPNR DHCP Relay Agent"
end script
pre-stop script
logger -t cpnr-dhcp-relay.conf "Stopping CPNR DHCP Relay Agent"
end script
script
logger -t cpnr-dhcp-relay.conf "Starting NOW CPNR DHCP Relay Agent"
exec $DST/cpnr-dhcp-relay-agent--config-file /etc/neutron/dhcp_agent.ini
--log-file /var/log/cpnr/cpnr-dhcp-relay-agent.log
end script

View File

@ -1,40 +0,0 @@
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
description "CPNR DNS Relay Conf"
start on runlevel [2345]
stop on runlevel [!2345]
env DST="/usr/local/bin/"
pre-start script
logger -t cpnr-dns-relay.conf "Pre-Starting CPNR DNS Relay Agent"
test -r $DST/cpnr-dns-relay-agent || { stop; exit 0; }
logger -t cpnr-dns-relay.conf "Starting CPNR DNS Relay Agent"
end script
pre-stop script
logger -t cpnr-dns-relay.conf "Stopping CPNR DNS Relay Agent"
end script
script
logger -t cpnr-dns-relay.conf "Starting NOW CPNR DNS Relay Agent"
exec $DST/cpnr-dns-relay-agent --config-file /etc/neutron/dhcp_agent.ini
--log-file /var/log/cpnr/cpnr-dns-relay-agent.log
end script

View File

@ -1,27 +0,0 @@
[cfg_agent]
# (IntOpt) Interval in seconds for processing of service updates.
# That is when the config agent's process_services() loop executes
# and it lets each service helper to process its service resources.
# rpc_loop_interval = 10
# (StrOpt) Period-separated module path to the routing service helper class.
# routing_svc_helper_class = networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
# (IntOpt) Timeout value in seconds for connecting to a hosting device.
# device_connection_timeout = 30
# (IntOpt) The time in seconds until a backlogged hosting device is
# presumed dead or booted to an error state.
# hosting_device_dead_timeout = 300
# (IntOpt) Interval in seconds when the config agent sents a report to the
# plugin. This is used to keep tab on the liveliness of the cfg agent.
# This value should be more than 0, otherwise cfg agent will be considered
# as dead.
# keepalive_interval = 10
# (IntOpt)The iteration where the config agent sends a full status report to
# the plugin. The default is every 6th iteration of the keep alive interval.
# This means with default value of keepalive_interval (10sec), a full report
# is sent once every 6*10 = 60 seconds.
# report_iteration = 6

View File

@ -1,243 +0,0 @@
[general]
# Name of the L3 admin tenant
# l3_admin_tenant = L3AdminTenant
# Name of management network for hosting device configuration
# management_network = osn_mgmt_nw
# Default security group applied on management port
# default_security_group = mgmt_sec_grp
# Maximal time (in seconds) between checks of config agent status
# cfg_agent_monitoring_interval = 20
# Seconds of no status update until a cfg agent is considered down
# cfg_agent_down_time = 30
# Driver to use for scheduling hosting device to a Cisco configuration agent
# configuration_agent_scheduler_driver = networking_cisco.plugins.cisco.device_manager.scheduler.hosting_device_cfg_agent_scheduler.HostingDeviceCfgAgentScheduler
# Path to templates for hosting devices
# templates_path = /opt/stack/data/neutron/cisco/templates
# Path to config drive files for service VM instances
# service_vm_config_path = /opt/stack/data/neutron/cisco/config_drive
# Ensure that Nova is running before attempting to create any CSR1kv VM
# ensure_nova_running = True
# IP address of primary domain name server for hosting devices
# domain_name_server_1 = 8.8.8.8
# IP address of secondary domain name server for hosting devices
# domain_name_server_2 = 8.8.4.4
[hosting_device_credentials]
# Cisco hosting device credentials specifications.
# Credentials for hosting devices must be defined here.
# NOTE! All fields must be included (even if left empty).
# Hosting device credential format.
# [cisco_hosting_device_credential:<UUID of hosting device credentail>] (1)
# name=<name of credentail> (2)
# description=<description of credential> (3)
# user_name=<username string>
# password=<password string>
# type=<type of credential> (4)
# (1) The UUID can be specified as an integer.
# (2),(3),(4): currently ignored. Can be left empty.
# Example:
# [cisco_hosting_device_credential:1]
# name="Universal credential"
# description="Credential used for all hosting devices"
# user_name=device_administrator
# password=fE#m%%92
# type=
[hosting_devices_templates]
# Cisco hosting device template definitions.
# In addition to defining templates using the neutron client,
# templates can be defined here to be immediately available
# when Neutron is started.
# NOTE! All fields must be included (even if left empty).
# Hosting device template format.
# [cisco_hosting_device_template:<UUID of hosting device template>] (1)
# name=<name given to hosting devices created using this template>
# enabled=<template enabled if True>
# host_category=<can be 'VM', 'Hardware', or 'Network_Node'> (2)
# service_types=<list of service types this template supports> (3)
# image=<the image name or UUD in Glance> (4)
# flavor=<the VM flavor or UUID in Nova> (5)
# default_credentials_id=<UUID of default credentials> (6)
# configuration_mechanism=<indicates how configurations are made> (7)
# protocol_port=<udp/tcp port of hosting device>
# booting_time=<Typical booting time (in seconds)>
# slot_capacity=<abstract metric specifying capacity to host logical resources>
# desired_slots_free=<desired number of slots to keep available at all times>
# tenant_bound=<list of tenant UUIDs to which template is available> (8)
# device_driver=<module to be used as hosting device driver>
# plugging_driver=<module to be used as plugging driver >
# (1) The UUID can be specified as an integer.
# (2) Specify 'VM' for virtual machine appliances, 'Hardware' for hardware
# appliances, and 'Network_Node' for traditional Neutron network nodes.
# (3) Write as string of ':' separated service type names. Can be left empty
# for now.
# (4) Leave empty for hardware appliances and network nodes.
# (5) Leave empty for hardware appliances and network nodes.
# (6) UUID of credential. Can be specified as an integer.
# (7) Currently ignored. Can be left empty for now.
# (8) A (possibly empty) string of ':'-separated tenant UUIDs representing the
# only tenants allowed to own/place resources on hosting devices created
# using this template. If string is empty all tenants are allowed.
# Example:
# [cisco_hosting_device_template:1]
# name=NetworkNode
# enabled=True
# host_category=Network_Node
# service_types=router:FW:VPN
# image=
# flavor=
# default_credentials_id=1
# configuration_mechanism=
# protocol_port=22
# booting_time=360
# slot_capacity=2000
# desired_slots_free=0
# tenant_bound=
# device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.noop_hd_driver.NoopHostingDeviceDriver
# plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.noop_plugging_driver.NoopPluggingDriver
# [cisco_hosting_device_template:2]
# name="CSR1kv template"
# enabled=True
# host_category=VM
# service_types=router:FW:VPN
# image=csr1kv_openstack_img
# flavor=621
# default_credentials_id=1
# configuration_mechanism=
# protocol_port=22
# booting_time=360
# slot_capacity=2000
# desired_slots_free=0
# tenant_bound=
# device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.csr1kv_hd_driver.CSR1kvHostingDeviceDriver
# Use this plugging driver for ML2 N1kv driver with VLAN trunking
# plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.n1kv_ml2_trunking_driver.N1kvML2TrunkingPlugDriver
# Use this plugging driver for VIF hot-plug (with plugins like ML2)
# plugging_driver=networking_cisco.plugins.cisco.l3.plugging_drivers.vif_hotplug_plugging_driver.VIFHotPlugPluggingDriver
# [cisco_hosting_device_template:3]
# name="ASR1kv template"
# enabled=True
# host_category=Hardware
# service_types=router:FW:VPN
# image=
# flavor=
# default_credentials_id=1
# configuration_mechanism=
# protocol_port=22
# booting_time=360
# slot_capacity=2000
# desired_slots_free=0
# tenant_bound=
# device_driver=networking_cisco.plugins.cisco.device_manager.hosting_device_drivers.noop_hd_driver.NoopHostingDeviceDriver
# plugging_driver=networking_cisco.plugins.cisco.device_manager.plugging_drivers.hw_vlan_trunking_driver.HwVLANTrunkingPlugDriver
[hosting_devices]
# Cisco hosting device specifications.
# In addition to specifying hosting devices using the neutron client,
# devices can be specified here to be immediately available when Neutron is
# started.
# NOTE! All fields must be included (even if left empty).
# Hosting device format.
# [cisco_hosting_device:<UUID of hosting device>] (1)
# template_id=<UUID of hosting device template for this hosting device>
# credentials_id=<UUID of credentials for this hosting device>
# name=<name of device, e.g., its name in DNS>
# description=<arbitrary description of the device>
# device_id=<manufacturer id of the device, e.g., its serial number>
# admin_state_up=<True if device is active, False otherwise>
# management_ip_address=<IP address of device's management network interface>
# protocol_port=<udp/tcp port of hosting device's management process>
# tenant_bound=<Tenant UUID or empty string> (2)
# auto_delete=<True or False> (3)
# (1) The UUID can be specified as an integer.
# (2) UUID of the only tenant allowed to own/place resources on this hosting
# device. If empty any tenant can place resources on it.
# (3) If True, a VM-based hosting device is subject to deletion as part of
# hosting device pool management and in case of VM failures. If set to
# False, the hosting device must be manually unregistered in the device
# manager and any corresponding VM must be deleted in Nova.
# Example:
# [cisco_hosting_device:3]
# template_id=1
# credentials_id=1
# name=dragon
# description=Main ASR1k serving region 1
# device_id=SN:abcd1234efgh
# admin_state_up=True
# management_ip_address=10.0.100.5
# protocol_port=22
# tenant_bound=
# auto_delete=False
[plugging_drivers]
# Cisco plugging driver configurations.
# Plugging driver specific settings are made here.
# For the hw_vlan_trunking_driver.HwVLANTrunkingPlugDriver plugging driver
# it is expected that for each hosting device the network interfaces to be used
# to reach different Neutron networks are specified.
# Specifically the format for this plugging driver is as follows
# [HwVLANTrunkingPlugDriver:<UUID of hosting device>] (1)
# internal_net_interface_<int number>=<network_uuid_spec>:<interface_name> (2)
# external_net_interface_<int number>=<network_uuid_spec>:<interface_name> (3)
# [zero or more additional internal or external specifications] ...
# (1) The UUID can be specified as an integer.
# (2),(3) <network_uuid_spec> can be '*' or a UUID, or a comma separated list
# of UUIDs.
# Example:
# [HwVLANTrunkingPlugDriver:3]
# internal_net_interface_1=*:GigabitEthernet1
# external_net_interface_1=*:GigabitEthernet2
# [HwVLANTrunkingPlugDriver:4]
# internal_net_interface_1=*:GigabitEthernet1
# external_net_interface_1=d7b2eac2-1ade-444e-edc5-81fd4267f53a:GigabitEthernet2
# external_net_interface_2=a36b533a-fae6-b78c-fe11-34aa82b12e3a,45c624b-ebf5-c67b-df22-43bb73c21f4e:GigabitEthernet3
[csr1kv_hosting_devices]
# Settings for CSR1kv hosting devices
# -----------------------------------
# CSR1kv default template file name
# configdrive_template = csr1kv_cfg_template
[n1kv]
# Settings coupled to N1kv plugin
# -------------------------------
# Name of N1kv port profile for management ports
# management_port_profile = osn_mgmt_pp
# Name of N1kv port profile for T1 ports
# t1_port_profile = osn_t1_pp
# Name of N1kv port profile for T2 ports
# t2_port_profile = osn_t2_pp
# Name of N1kv network profile for T1 networks
# t1_network_profile = osn_t1_np
# Name of N1kv network profile for T2 networks
# t2_network_profile = osn_t2_np

View File

@ -1,110 +0,0 @@
[routing]
# Name of default router type to create. Must be a unique name.
default_router_type = ASR1k_router
# Name of router type for Linux network namespace-based routers
# namespace_router_type_name = NetworkNamespace_router
# Time in seconds between renewed scheduling attempts of non-scheduled routers
# backlog_processing_interval = 10
# Driver to use for routertype-aware scheduling of router to a default L3 agent
# router_type_aware_scheduler_driver = networking_cisco.plugins.cisco.l3.schedulers.l3_routertype_aware_agent_scheduler.L3RouterTypeAwareScheduler
# Set 'auto_schedule' to True if routers are to be automatically scheduled by default
# auto_schedule = True
# Set 'share_hosting_device' to True if routers can share hosts with routers owned by other tenants by default
# share_hosting_device = True
[router_types]
# Cisco router type definitions.
# In addition to defining router types using the neutron client,
# router types can be defined here to be immediately available
# when Neutron is started.
# NOTE! All fields must be included (even if left empty).
# Cisco router type format.
# [cisco_router_type:<UUID of router type>]
# name=<router type name, should preferably be unique>
# description=<description of router type>
# template_id=<template to use to create hosting devices for this router type>
# ha_enabled_by_default=<True if HA should be enabled by default>
# shared=<True if if routertype is available to all tenants, False otherwise>
# slot_need=<Number of slots this router type consume in hosting device>
# scheduler=<module to be used as scheduler for router of this type> (1)
# driver=<module to be used by router plugin as router type driver> (2)
# cfg_agent_service_helper=<module to be used by configuration agent
# as service helper driver (3)
# cfg_agent_driver=<module to be used by configuration agent for
# device configurations> (4)
# (1) --(4): Leave empty for routers implemented in network nodes
# Example:
# [cisco_router_type:1]
# name=Namespace_Neutron_router
# description="Neutron router implemented in Linux network namespace"
# template_id=1
# shared=True
# slot_need=0
# scheduler=
# driver=
# cfg_agent_service_helper=
# cfg_agent_driver=
# [cisco_router_type:2]
# name=CSR1kv_router
# description="Neutron router implemented in Cisco CSR1kv device"
# template_id=2
# ha_enabled_by_default=False
# shared=True
# slot_need=10
# scheduler=networking_cisco.plugins.cisco.l3.schedulers.l3_router_hosting_device_scheduler.L3RouterHostingDeviceLongestRunningScheduler
# driver=networking_cisco.plugins.cisco.l3.drivers.noop_routertype_driver.NoopL3RouterDriver
# cfg_agent_service_helper=networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
# Use this cfg agent driver for N1kv VLAN trunking
# cfg_agent_driver=networking_cisco.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver
# Use this cfg agent driver for VIF hot-plug (with plugins like ML2)
# cfg_agent_driver=networking_cisco.plugins.cisco.cfg_agent.device_drivers.csr1kv.csr1kv_hotplug_routing_driver.CSR1kvHotPlugRoutingDriver
# [cisco_router_type:3]
# name=Hardware_Neutron_router
# description="Neutron router implemented in Cisco ASR1k device"
# template_id=3
# ha_enabled_by_default=True
# shared=True
# slot_need=1
# scheduler=networking_cisco.plugins.cisco.l3.schedulers.l3_router_hosting_device_scheduler.L3RouterHostingDeviceHARandomScheduler
# driver=networking_cisco.plugins.cisco.l3.drivers.asr1k.asr1k_routertype_driver.ASR1kL3RouterDriver
# cfg_agent_service_helper=networking_cisco.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper.RoutingServiceHelper
# cfg_agent_driver=networking_cisco.plugins.cisco.cfg_agent.device_drivers.asr1k.asr1k_routing_driver.ASR1kRoutingDriver
[ha]
# Enables high-availability support for routing service
# ha_support_enabled = True
# Default number of routers added for redundancy when high-availability
# by VRRP, HSRP, or GLBP is used (maximum is 4)
# default_ha_redundancy_level = 1
# Default mechanism used to implement high-availability. Can be one of HSRP,
# VRRP, or GLBP
# default_ha_mechanism = HSRP
# List of administratively disabled high-availability mechanisms (one or
# several of VRRP, HSRP, GBLP)
# disabled_ha_mechanisms = []
# Enables connectivity probing for high-availability even if (admin) user does
# not explicitly request it
# connectivity_probing_enabled_by_default = False
# Host that will be probe target for high-availability connectivity probing
# if (admin) user does not specify it
# default_probe_target = None
# Time (in seconds) between probes for high-availability connectivity probing
# if user does not specify it
# default_ping_interval = 5

View File

@ -1,26 +0,0 @@
[cisco_csr_ipsec]
# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
# status_check_interval = 60
# Cisco CSR management port information for REST access used by VPNaaS
# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
#
# Format is:
# [cisco_csr_rest:<public IP>]
# rest_mgmt = <mgmt port IP>
# tunnel_ip = <tunnel IP>
# username = <user>
# password = <password>
# timeout = <timeout>
# host = <hostname>
# tunnel_if = <tunnel I/F>
#
# where:
# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
# mgmt port IP -- IP address of CSR for REST API access
# user ---------- Username for REST management port access to Cisco CSR
# password ------ Password for REST management port access to Cisco CSR
# timeout ------- REST request timeout to Cisco CSR (optional)
# hostname ------ Name of host where CSR is running as a VM
# tunnel I/F ---- CSR port name used for tunnels' IP address

View File

@ -1,278 +0,0 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
# The total length allowed for the prefix name and VLAN is 32 characters,
# the prefix will be truncated if the total length is greater than 32.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
# (BoolOpt) A flag to enable strict hostkey checks when connecting to
# Nexus switches. Defaults to False (No hostkey checks)
# This will be deprecated along with nexus_driver since this is
# associated to the ncclient driver which is going away.
# host_key_checks = False
# (ListOp) A choice of driver methods to configure Nexus devices.
# The default choice has changed to 'restapi' which replaces the
# original 'ncclient' driver. The RESTAPI driver has better
# performance with less Nexus session limits. Additionally,
# new feature development is applied only to restapi driver.
# Plans are to remove ncclient driver in Cisco 7.0.0 release.
# To use the restapi driver, the Nexus 9K image version must be
# 7.0(3)I5(2) or greater. For short term, the original driver can be
# used by setting the nexus_driver to 'ncclient'. The default is
# set to:
# nexus_driver = restapi
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by OpenStack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
# nve_src_intf=<loopback number> (5)
# physnet=<physical network> (6)
# vpc_pool=<start,end> (7)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
# (5) Only valid if VXLAN overlay is configured and vxlan_global_config is
# set to True.
# The NVE source interface is a loopback interface that is configured on
# the switch with valid /32 IP address. This /32 IP address must be known
# by the transient devices in the transport network and the remote VTEPs.
# This is accomplished by advertising it through a dynamic routing protocol
# in the transport network. (NB: If no nve_src_intf is defined then a
# default setting of 0 (creates "loopback0") will be used.)
# (6) Only valid if VXLAN overlay is configured.
# The physical network name defined in the network_vlan_ranges variable
# (defined under the ml2_type_vlan section) that this switch is controlling.
# The configured 'physnet' is the physical network domain that is connected
# to this switch. The vlan ranges defined in network_vlan_ranges for a
# a physical network are allocated dynamically and are unique per physical
# network. These dynamic vlans may be reused across physical networks.
# (7) VPC pool is valid for Baremetal configurations.
# When there is a list of ethernet interfaces provided by Ironic to neutron
# in a port binding transaction, these are assumed to be port-channel type
# configurations. Ironic only learns ethernet interfaces so it is up to
# the Nexus ML2 Driver to either learn the port channel if the channel-group
# is already applied to the ethernet interfaces OR create a new port-channel
# and apply the channel-group to the ethernet interface. This pool is the
# reserved port-channel IDs available for allocation by the Nexus ML2 driver
# for each switch. Existing port-channel interfaces preconfigured on
# ethernet interfaces in use will be reused by the driver.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
# nve_src_intf=1
# physnet=physnet1
# vpc_pool=1001,1025
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
# The total length allowed for the prefix name and VLAN is 32 characters,
# the prefix will be truncated if the total length is greater than 32.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-
# (BoolOpt) A flag indicating whether OpenStack networking should manage the
# creation and removal of VLANs for provider networks on the Nexus
# switches. If the flag is set to False then OpenStack will not create or
# remove VLANs for provider networks, and the administrator needs to
# manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True
# (BoolOpt) A flag indicating whether OpenStack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then OpenStack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True
# (BoolOpt) A flag indicating whether OpenStack networking should manage the
# creating and removing of the Nexus switch VXLAN global settings of 'feature
# nv overlay', 'feature vn-segment-vlan-based', 'interface nve 1' and the NVE
# subcommand 'source-interface loopback #'. If the flag is set to False
# (default) then OpenStack will not add or remove these VXLAN settings, and
# the administrator needs to manage these operations manually or by external
# orchestration.
#
# vxlan_global_config = True
# (BoolOpt) To make Nexus device persistent by running the Nexus
# CLI 'copy run start' after applying successful configurations.
# (default) This flag defaults to False keep consistent with
# existing functionality.
# This will be deprecated along with nexus_driver since this is
# associated to the ncclient driver which is going away.
#
# persistent_switch_config = False
# (BoolOpt) Prevent caching ssh connections to a Nexus switch.
# Set this to True when there are multiple neutron controllers
# and/or when there may be non-neutron ssh connections to the
# same Nexus device. Nexus devices have a limit of 8 such
# connections. When a single neutron controller has more than
# 8 processes, caching is automatically disabled without
# regard to this option.
# (default) This flag defaults to False which indicates that ssh
# connections to a Nexus switch are cached when the neutron
# controller has fewer than 8 processes.
# This will be deprecated along with nexus_driver since this is
# associated to the ncclient driver which is going away.
#
# never_cache_ssh_connection = False
# (IntOpt) Time interval to check the state of the Nexus device.
# (default) This value defaults to 0 seconds which disables this
# functionality. When enabled, 30 seconds is suggested.
#
# switch_heartbeat_time = 0
[ml2_type_nexus_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN Network IDs that are available for tenant network allocation.
#
# vni_ranges =
# Example: 100:1000,2000:6000
#
# (ListOpt) Multicast groups for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. Comma separated
# list of min:max ranges of multicast IP's.
# NOTE: must be a valid multicast IP, invalid IP's will be discarded
#
# mcast_ranges =
# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1.
[ml2_cisco_n1kv]
# (StrOpt) Name of the policy profile to be associated with a port when no
# policy profile is specified during port creates.
# default_policy_profile = default-pp
# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
# policy profiles.
# poll_duration = 60
# (IntOpt) Timeout duration in seconds for the http request
# http_timeout = 15
# (IntOpt) Time duration in seconds between consecutive neutron-VSM syncs
# sync_interval = 300
# (BoolOpt) Specify whether tenants are restricted from accessing all the
# policy profiles.
# Default value: False, indicating all tenants can access all policy profiles.
#
# restrict_policy_profiles = False
# Describe Cisco N1KV VSM connectivity
# In this section you can specify connectivity details in order for plugin
# to connect to N1KV Virtual Supervisor Module (VSM).
#
# n1kv_vsm_ips =<vsm1_ip>,<vsm2_ip>,....
# username = <username>
# password = <password>
#
# An example would be:
# n1kv_vsm_ips = 1.1.1.1,1.1.1.2
# username = user
# password = password
# (IntOpt) Maximum number of retry attempts for VSM REST API.
# max_vsm_retries = 2
# (BoolOpt) Specify whether tenants are restricted from accessing all the
# network profiles.
# Default value: False, indicating all tenants can access all network profiles.
#
# restrict_network_profiles = False
[ml2_cisco_ucsm]
# Configuration for Single UCSM Support
# Cisco UCS Manager IP address
# ucsm_ip=1.1.1.1
# Username to connect to UCS Manager
# ucsm_username=user
# Password to connect to UCS Manager
# ucsm_password=password
# Hostname to Service profile mapping for UCS Manager
# controlled compute hosts
# ucsm_host_list=Hostname1:Serviceprofile1, Hostname2:Serviceprofile2
# SR-IOV and VM-FEX vendors supported by this plugin
# xxxx:yyyy represents vendor_id:product_id
# This config is optional.
# supported_pci_devs=['2222:3333', '4444:5555']
# Ethernet port names to be used for virtio ports
# ucsm_virtio_eth_ports = eth0, eth1
# UCSM information for multi-UCSM support.
# The following section can be repeated for the number of UCS Managers in
# the cloud.
# UCSM information format:
# [ml2_cisco_ucsm_ip:1.1.1.1]
# ucsm_username = username
# ucsm_password = password
# ucsm_virtio_eth_ports = eth0, eth1
# Hostname to Service Profile mapping for Compute hosts managed by
# this UCS Manager. This config should be specified for hosts configured
# with only Service Profiles and not Service Profile Templates.
# ucsm_host_list=Hostname1:Serviceprofile1, Hostname2:Serviceprofile2
# Service Profile Template config per UCSM. This is a mapping of Service Profile
# Template to the list of UCS Servers controlled by this template.
# sp_template_list = SP_Template1_path:SP_Template1:S1,S2 SP_Template2_path:SP_Template2:S3,S4
# VNIC Profile Template config per UCSM.
# vnic_template_list = physnet1:vnic_template_path1:vt11,vt12 physnet2:vnic_template_path2:vt21,vt22
# Pre-defined QoS policy name
# sriov_qos_policy=Test
# SR-IOV Multi-VLAN trunk config section
# [sriov_multivlan_trunk]
# Neutron network name=<comma seperated list of VLAN-ids or VLAN-id ranges>
# For example:
# test_network1=5,7-9
# test_network2=500,701 - 709

View File

@ -1,28 +0,0 @@
# Defines configuration options specific to the Cisco NCS Mechanism Driver
[ml2_ncs]
# (StrOpt) Cisco NCS HTTP endpoint for REST access to the OpenStack
# subtree.
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://ncs/api/running/services/openstack
# (StrOpt) Username for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout =
# Example: timeout = 15

View File

@ -1,253 +0,0 @@
{
"context_is_admin": "role:admin",
"owner": "tenant_id:%(tenant_id)s",
"admin_or_owner": "rule:context_is_admin or rule:owner",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"shared_routertypes": "field:routertypes:shared=True",
"shared_firewall_policies": "field:firewall_policies:shared=True",
"shared_subnetpools": "field:subnetpools:shared=True",
"shared_address_scopes": "field:address_scopes:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_subnetpool": "",
"create_subnetpool:shared": "rule:admin_only",
"get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
"update_subnetpool": "rule:admin_or_owner",
"delete_subnetpool": "rule:admin_or_owner",
"create_address_scope": "",
"create_address_scope:shared": "rule:admin_only",
"get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
"update_address_scope": "rule:admin_or_owner",
"update_address_scope:shared": "rule:admin_only",
"delete_address_scope": "rule:admin_or_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"get_router:cisco_ha:enabled": "rule:admin_or_owner",
"get_router:cisco_ha:details": "rule:admin_only",
"get_router:routerhost:hosting_device": "rule:admin_only",
"get_router:routerrole:role": "rule:admin_only",
"get_router:routertype-aware-scheduler:auto_schedule": "rule:admin_only",
"get_router:routertype-aware-scheduler:share_hosting_device": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"create_router:cisco_ha:enabled": "rule:admin_or_owner",
"create_router:cisco_ha:details": "rule:admin_only",
"create_router:routertype-aware-scheduler:auto_schedule": "rule:admin_only",
"create_router:routertype-aware-scheduler:share_hosting_device": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"update_router:cisco_ha:enabled": "rule:admin_or_owner",
"update_router:cisco_ha:details": "rule:admin_only",
"update_router:routertype-aware-scheduler:auto_schedule": "rule:admin_only",
"update_router:routertype-aware-scheduler:share_hosting_device": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"insert_rule": "rule:admin_or_owner",
"remove_rule": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"get_agent-loadbalancers": "rule:admin_only",
"get_loadbalancer-hosting-agent": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only",
"create_flavor": "rule:admin_only",
"update_flavor": "rule:admin_only",
"delete_flavor": "rule:admin_only",
"get_flavors": "rule:regular_user",
"get_flavor": "rule:regular_user",
"create_service_profile": "rule:admin_only",
"update_service_profile": "rule:admin_only",
"delete_service_profile": "rule:admin_only",
"get_service_profiles": "rule:admin_only",
"get_service_profile": "rule:admin_only",
"get_policy": "rule:regular_user",
"create_policy": "rule:admin_only",
"update_policy": "rule:admin_only",
"delete_policy": "rule:admin_only",
"get_policy_bandwidth_limit_rule": "rule:regular_user",
"create_policy_bandwidth_limit_rule": "rule:admin_only",
"delete_policy_bandwidth_limit_rule": "rule:admin_only",
"update_policy_bandwidth_limit_rule": "rule:admin_only",
"get_rule_type": "rule:regular_user",
"restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
"create_rbac_policy": "",
"create_rbac_policy:target_tenant": "rule:restrict_wildcard",
"update_rbac_policy": "rule:admin_or_owner",
"update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
"get_rbac_policy": "rule:admin_or_owner",
"delete_rbac_policy": "rule:admin_or_owner",
"create_hosting_device": "rule:admin_only",
"get_hosting_device": "rule:admin_only",
"get_hosting_devices": "rule:admin_only",
"update_hosting_device": "rule:admin_only",
"delete_hosting_device": "rule:admin_only",
"get_hosting_device_config": "rule:admin_or_owner",
"create_hosting_device_template": "rule:admin_only",
"get_hosting_device_template": "rule:admin_only",
"get_hosting_device_templates": "rule:admin_only",
"update_hosting_device_template": "rule:admin_only",
"delete_hosting_device_template": "rule:admin_only",
"create_routertype": "rule:admin_only",
"get_routertype": "rule:admin_or_owner or rule:shared_routertypes",
"get_routertype:template_id": "rule:admin_only",
"get_routertype:shared": "rule:admin_only",
"get_routertype:slot_need": "rule:admin_only",
"get_routertype:scheduler": "rule:admin_only",
"get_routertype:driver": "rule:admin_only",
"get_routertype:cfg_agent_service_helper": "rule:admin_only",
"get_routertype:cfg_agent_driver": "rule:admin_only",
"update_routertype": "rule:admin_only",
"delete_routertype": "rule:admin_only",
"create_hosting-device-l3-router": "rule:admin_only",
"delete_hosting-device-l3-router": "rule:admin_only",
"get_hosting-device-l3-routers": "rule:admin_only",
"get_l3-router-hosting-devices": "rule:admin_only",
"create_cfg-agent-hosting-device": "rule:admin_only",
"delete_cfg-agent-hosting-device": "rule:admin_only",
"get_cfg-agent-hosting-devices": "rule:admin_only",
"get_hosting-device-cfg-agents": "rule:admin_only"
}

View File

@ -1,214 +0,0 @@
# The following are for keystoneclient authentication
[keystone_authtoken]
#project_name = admin
#username = admin
#password = yourpasswd
#auth_url = http://controllerip/identity
#user_domain_name = default
#project_domain_name = default
# The following are for neutronclient authentication.
[neutron]
#project_name = service
#username = neutron
#password = yourpasswd
#auth_url = http://controllerip/identity
#user_domain_name = default
#project_domain_name = default
# The following are for novaclient authentication
[nova]
#project_name = service
#username = nova
#password = yourpasswd
#auth_url = http://controllerip/identity
#user_domain_name = default
#project_domain_name = default
#api_version = 2.1
#region_name = RegionOne
[general]
# If uplink detection is not needed, enter the hostname(s) in node field, which
# is comma separated list of hosts for which static uplink is configured.
# One way to find the node name is from /etc/hostname
# The node_uplink field has the comma separated uplink ports in server which
# is connected to the leaf. In example below, host1 has eth5 connected to leaf
# and host2 has eth4 connected to leaf.
#
# node = host1.example.com, host2.example.com
# node_uplink = eth5, eth4
[dfa_agent]
# OVS Neutron Agent related configuration. Ensure that this is the same as
# what is configured for OVS Neutron Agent.
# The defaults are given below for convenience.
# integration_bridge = br-int
# external_dfa_bridge = br-ethd
[dcnm]
# IP address of the DCNM. It should be reachable from openstack
# controller node.
#
# dcnm_ip = 3.3.3.3
# DCNM server login and rabbitmq messaging credentials
#
# dcnm_user = dcnm-login-username
# dcnm_amqp_user = dncm-amqp-username
# dcnm_password = dcnm-password
# Gateway MAC address. It should be the same as configured on the leaf nodes
#
# gateway_mac = 20:20:00:00:00:AA
# Orchestrator ID
# Orchestartor id used for registering the segmentation id range on DCNM
# If there are multiple setups using the same DCNM, please ensure
# different orchestrator IDs are used. no space is allowed
#
# orchestrator_id = 'Openstack'
# Segmentation ID range.
# The seg-id-min and seg-id-max are 20-bit integer values
#
# segmentation_id_min = seg-id-min
# segmentation_id_max = seg-id-max
# The suffix of a network name when it is created by DCNM.
#
# dcnm_net_ext = '(DCNM)'
# The lease file name of DHCP server on the DCNM.
#
# dcnm_dhcp_leases = '/var/lib/dhcpd/dhcpd.leases'
# Default configuration profile when creating a network in DCNM.
#
# The default is 'defaultNetworkIpv4EfProfile'.
# default_cfg_profile = defaultNetworkIpv4EfProfile
# Default vrf profile name for a partition in DCNM.
#
# default_vrf_profile = vrf-common-universal
# Default dhcp server is to use native dhcp
#dcnm_dhcp = false
[dfa_rpc]
# Transport URL parameter for RPC.
# The credentials should be set based on setup.
#
# transport_url = 'rabbit://username:password@rabbitmq-server-ip:5672//'
[dfa_mysql]
# MYSQL DB connection option
# The credentials should be set based on the setup.
#
# connection = mysql://username:password@mysql-server-ip/cisco_dfa?charset=utf8
[dfa_notify]
# Notification queue name for DFA enabler.
# service_name: keystone and neutron
#
# cisco_dfa_notify_queue = cisco_dfa_%(service_name)s_notify
[dfa_log]
# Log file name.
# If log file name and directory is not specified, the default
# is the standard output.
#
# log_file = fabric_enabler.log
# The direcoty name for the log file.
#
# log_dir = /home/localadmin/Logs
# Enabler debugging output level. Default is WARNING.
# Set to DEBUG to see the debbugging output
#
# log_level = DEBUG
# Enable syslog. Debugging messages goes to syslog
#
# use_syslog = False
# syslog_log_facility = LOG_USER
# Output format of log messages.
#
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# If need to save pid of fabric-enabler-server and fabric-enabler-agent
# the location of pid files should be set by the following options.
#
# pid_dir =
# pid_server_file =
# pid_agent_file =
[vdp]
# VDP default options
#
# mgrid2 = 0
# typeid = 0
# typeidver = 0
# vsiidfrmt = 5
# hints = none
# filter = 4
# vdp_sync_timeout = 30
[sys]
# Default root_helper
#
# root_helper = 'sudo'
[firewall]
# Firewall Default Parameters. The defaults are provided in the comment below.
#
#Currently supports phy_asa and Openstack native Firewall.
#Enter phy_asa or native
#device = native
#This is the Mgmt IP address for the physical Firewall device. For native,
#leave the line as uncommented.
#fw_mgmt_ip = [1.1.1.1]
#Uncomment the below and fill up the right values for phy_asa, not needed for
#native FW.
#fw_username = [admin]
#fw_password = [cisco123]
#This is the 'in' network interface of Tenant Edge Firewall.
#fw_interface_in = [Gi0/0]
#This is the 'out' network interface of Tenant Edge Firewall.
#fw_interface_out = [Gi0/1]
# The below configs are needed, fill this based on FP or EVPN fabric
# and further if it's based on Native or ASA FW
# This is the network profile for 'in' service network.
#fw_service_host_profile = 'serviceNetworkUniversalTfStaticRoutingProfile'
# This is the profile of the 'out' service prtition.
#fw_service_part_vrf_profile = 'vrf-common-universal-external-static'
# This is the network profile for 'out' service network.
#fw_service_ext_profile = 'externalNetworkUniversalTfStaticRoutingESProfile'
#The following lines specifies the starting and ending subnets for 'in' and
#'out' service networks. In the example below, for a 24 subnet, the first
#service IN network will have 100.120.2.x, and subsequent ones will have
#100.120.3.x and so on.
#fw_service_in_ip_start = 100.120.2.0/24
#fw_service_in_ip_end = 100.120.20.0/24
#fw_service_out_ip_start = 200.220.2.0/24
#fw_service_out_ip_end = 200.220.20.0/24
# This specifies the subnet for the dummy interface. Openstack needs a router
# with an interface for the FW rules to be applied. So, enabler needs to
# create a router with a dummy interface so that FW can get activated.
#fw_service_dummy_ip_subnet = '9.9.9.0/24'
[loadbalance]
#lb_enabled = False
#lb_native = True

View File

@ -1,42 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
description "Fabric Enabler Agent Conf"
start on runlevel [2345]
stop on runlevel [!2345]
env DST="/usr/local/bin/"
pre-start script
logger -t fabric-enabler-agent.conf "Pre-Starting Fabric Enabler Agent"
test -r /etc/saf/enabler_conf.ini || { stop; exit 0; }
test -r $DST/fabric-enabler-agent || { stop; exit 0; }
logger -t fabric-enabler-agent.conf "Starting Fabric Enabler Agent"
end script
pre-stop script
logger -t fabric-enabler-agent.conf "Stopping Fabric Enabler Agent"
end script
script
logger -t fabric-enabler-agent.conf "Starting NOW 1 Fabric Enabler Agent"
exec $DST/fabric-enabler-agent --config-file /etc/saf/enabler_conf.ini
end script
#respawn
#respawn limit 10 5

View File

@ -1,32 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
Description=Nexus Fabric Enabler Agent.
After=syslog.target network.target
[Service]
Type=simple
Environment="OPTS1=--config-file"
Environment="OPTS2=/etc/saf/enabler_conf.ini"
ExecStart=/usr/bin/fabric-enabler-agent $OPTS1 $OPTS2
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -1,47 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
description "Fabric Enabler Server Conf"
start on runlevel [2345]
stop on runlevel [!2345]
env DST="/usr/local/bin/"
#setuid localadmin
#setgid localadmin
#chdir /var/run`
pre-start script
logger -t fabric-enabler-server "Pre-Start Fabric Enabler Server Start"
test -r /etc/saf/enabler_conf.ini || { stop; exit 0; }
test -r $DST/fabric-enabler-server || { stop; exit 0; }
#mkdir -p /var/run/localadmin
#chown localadmin:root /var/run/localadmin
logger -t fabric-enabler-server "Pre-Starting Fabric Enabler Server Complete"
end script
pre-stop script
logger -t fabric-enabler-server "Stopping Fabric Enabler Server"
end script
script
logger -t fabric-enabler-server "Starting 1 Fabric Enabler Server"
exec $DST/fabric-enabler-server --config-file /etc/saf/enabler_conf.ini
end script
#respawn
#respawn limit 10 5

View File

@ -1,32 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
Description=Nexus Fabric Enabler Server.
After=syslog.target network.target
[Service]
Type=simple
Environment="OPTS1=--config-file"
Environment="OPTS2=/etc/saf/enabler_conf.ini"
ExecStart=/usr/bin/fabric-enabler-server $OPTS1 $OPTS2
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -1,35 +0,0 @@
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
[Unit]
Description=CPNR DHCP Relay Agent
After=syslog.target network.target
[Service]
Type=simple
User=cpnr
Environment="OPTS1=--config-file"
Environment="OPTS2=/etc/cpnr/cisco_pnr.ini"
Environment="OPTS3=--log-file"
Environment="OPTS4=/var/log/cpnr/cpnr-dhcp-relay-agent.log"
ExecStart=/usr/bin/cpnr-dhcp-relay $OPTS1 $OPTS2 $OPTS3 $OPTS4
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -1,34 +0,0 @@
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
[Unit]
Description=CPNR DNS Relay Agent
After=syslog.target network.target
[Service]
Type=simple
User=cpnr
Environment="OPTS1=--config-file"
Environment="OPTS2=/etc/cpnr/cisco_pnr.ini"
Environment="OPTS3=--log-file"
Environment="OPTS4=/var/log/cpnr/cpnr-dns-relay-agent.log"
ExecStart=/usr/bin/cpnr-dns-relay $OPTS1 $OPTS2 $OPTS3 $OPTS4
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -1,27 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import six
import pbr.version
__version__ = pbr.version.VersionInfo(
'networking_cisco').version_string()
if six.PY2:
gettext.install('neutron', unicode=1)
else:
gettext.install('neutron')

View File

@ -1,45 +0,0 @@
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking-cisco"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -1,117 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
import sys
import time
from networking_cisco.apps.saf.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
uplink_file_path = '/tmp/uplink'
detect_uplink_file_path = '/tmp/uplink_detected'
def run_cmd_line(cmd_str, stderr=None, shell=True,
echo_cmd=False, check_result=False):
if echo_cmd:
LOG.debug(cmd_str)
if shell:
cmd_args = cmd_str
else:
cmd_args = cmd_str.split()
output = None
returncode = 0
try:
output = subprocess.check_output(cmd_args, shell=shell, stderr=stderr)
except subprocess.CalledProcessError as e:
if check_result:
LOG.debug(e)
sys.exit(e.returncode)
else:
returncode = e.returncode
return output, returncode
def read_file(file_name):
file_content = None
if os.path.isfile(file_name):
filep = open(file_name, "r")
file_content = filep.read()
filep.close()
file_content = file_content.replace("\n", "")
return file_content
def find_uplink():
intf_cmd_list = ("ip link |grep 'state UP' | awk '{print $2}' "
"| sed 's/://'|grep ^[epb]")
intf_net_addr = "ifconfig %s | grep 'inet addr'"
en_rxtx = ('sudo /usr/sbin/lldptool -i %s -g "ncb" -L adminStatus=rxtx')
dis_rxtx = ('sudo /usr/sbin/lldptool -i %s -g "ncb" -L '
'adminStatus=disabled')
mod_brdg = ('sudo /usr/sbin/lldptool -i %s -g "ncb" -t -n -V evb | '
'grep "mode:bridge"')
intf_list, returncode = run_cmd_line(intf_cmd_list)
for intf in intf_list.split():
intf_out, retcode = run_cmd_line(intf_net_addr % intf)
if intf_out is None:
out, ret = run_cmd_line(en_rxtx % intf)
time.sleep(40)
out, ret = run_cmd_line(mod_brdg % intf)
run_cmd_line(dis_rxtx % intf)
if out:
return intf
def detect_uplink_non_auto(input_string):
file_str = "normal"
if input_string is None:
file_str = read_file(uplink_file_path)
return file_str
def detect_uplink_auto(input_string):
if input_string is None:
return_str = find_uplink()
else:
cmd_str = ('sudo /usr/sbin/lldptool -i %s -g "ncb" -t -n -V evb | '
'grep "mode:bridge"') % input_string
(output, returncode) = run_cmd_line(cmd_str,
check_result=False)
if returncode == 0:
return_str = "normal"
else:
return_str = "down"
LOG.debug('return_str=%s', return_str)
return return_str
def detect_uplink(input_string=None):
auto_detect = False
if os.path.isfile(uplink_file_path):
detected_uplink = detect_uplink_non_auto(input_string)
else:
detected_uplink = detect_uplink_auto(input_string)
auto_detect = True
log_str = "auto detect = %s, input string %s, detected uplink is %s." % (
auto_detect, input_string, detected_uplink)
LOG.debug(log_str)
return detected_uplink

View File

@ -1,266 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import platform
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_serialization import jsonutils
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.agent import iptables_driver as iptd
from networking_cisco.apps.saf.agent.vdp import dfa_vdp_mgr as vdpm
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import rpc
from networking_cisco.apps.saf.common import utils
LOG = logging.getLogger(__name__)
thishost = platform.node()
class RpcCallBacks(object):
"""RPC call back methods."""
def __init__(self, vdpd, ipt_drvr):
self._vdpd = vdpd
self._iptd = ipt_drvr
def _enqueue_event(self, vm):
oui = vm.get('oui')
if oui and oui.get('ip_addr') != '0.0.0.0' and self._iptd:
rule_info = dict(mac=vm.get('vm_mac'),
ip=oui.get('ip_addr'),
port=vm.get('port_uuid'),
status=vm.get('status'))
self._iptd.enqueue_event(rule_info)
def send_vm_info(self, context, msg):
vm_info = eval(msg)
LOG.debug('Received vm_info: %s', vm_info)
# Call VDP/LLDPad API to send the info
self._vdpd.vdp_vm_event(vm_info)
# Enqueue the vm info for updating iptables.
if isinstance(vm_info, list):
for vm in vm_info:
self._enqueue_event(vm)
else:
self._enqueue_event(vm_info)
def update_ip_rule(self, context, msg):
rule_info = eval(msg)
LOG.debug('RX Info : %s', rule_info)
# Update the iptables for this rule
if self._iptd:
self._iptd.enqueue_event(rule_info)
def send_msg_to_agent(self, context, msg):
msg_type = context.get('type')
uplink = jsonutils.loads(msg)
LOG.debug("Received %(context)s and %(msg)s", (
{'context': context, 'msg': uplink}))
if msg_type == constants.UPLINK_NAME:
LOG.debug("uplink is %(uplink)s", uplink)
self._vdpd.dfa_uplink_restart(uplink)
class DfaAgent(object):
"""DFA agent."""
def __init__(self, host, rpc_qn):
self._host_name = host
self._cfg = config.CiscoDFAConfig('neutron').cfg
self._my_host = self._cfg.DEFAULT.host if self._cfg.DEFAULT.host else (
utils.find_agent_host_id(host))
self._qn = '_'.join((rpc_qn, self._my_host))
LOG.debug('Starting DFA Agent on %s', self._my_host)
# List of task in the agent
self.agent_task_list = []
# This flag indicates the agent started for the first time.
self._need_uplink_info = True
# Initialize iptables driver. This will be used to update the ip
# rules in iptables, after launching an instance.
if (self._cfg.dcnm.dcnm_dhcp.lower() == 'true'):
self._iptd = iptd.IptablesDriver(self._cfg)
else:
self._iptd = None
LOG.debug("Using native dhcp, iptable driver is not needed")
# Setup RPC client for sending heartbeat to controller
self._url = self._cfg.dfa_rpc.transport_url
self.setup_client_rpc()
# Initialize VPD manager.
br_int = self._cfg.dfa_agent.integration_bridge
br_ext = self._cfg.dfa_agent.external_dfa_bridge
config_dict = {'integration_bridge': br_int,
'external_bridge': br_ext,
'host_id': self._my_host,
'root_helper': self._cfg.sys.root_helper,
'node_list': self._cfg.general.node,
'node_uplink_list': self._cfg.general.node_uplink}
self._vdpm = vdpm.VdpMgr(config_dict, self.clnt, self._host_name)
self.pool = eventlet.GreenPool()
self.setup_rpc()
def setup_client_rpc(self):
"""Setup RPC client for dfa agent."""
# Setup RPC client.
self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE,
exchange=constants.DFA_EXCHANGE)
def send_heartbeat(self):
context = {}
args = jsonutils.dumps(dict(when=time.ctime(), agent=thishost))
msg = self.clnt.make_msg('heartbeat', context, msg=args)
resp = self.clnt.cast(msg)
LOG.debug("send_heartbeat: resp = %s", resp)
def request_uplink_info(self):
context = {}
msg = self.clnt.make_msg('request_uplink_info',
context, agent=self._my_host)
try:
resp = self.clnt.call(msg)
LOG.debug("request_uplink_info: resp = %s", resp)
self._need_uplink_info = resp
except rpc.MessagingTimeout:
LOG.error(_LE("RPC timeout: Request for uplink info failed."))
def is_uplink_received(self):
"""Finds if uplink information is received and processed. """
return self._vdpm.is_uplink_received()
def setup_rpc(self):
"""Setup RPC server for dfa agent."""
endpoints = RpcCallBacks(self._vdpm, self._iptd)
self.server = rpc.DfaRpcServer(self._qn, self._my_host, self._url,
endpoints,
exchange=constants.DFA_EXCHANGE)
def start_rpc(self):
self.server.start()
LOG.debug('starting RPC server on the agent.')
self.server.wait()
def stop_rpc(self):
self.server.stop()
def start_rpc_task(self):
thrd = utils.EventProcessingThread('Agent_RPC_Server',
self, 'start_rpc')
thrd.start()
return thrd
def start_iptables_task(self):
thrd = self._iptd.create_thread()
thrd.start()
return thrd
def start_tasks(self):
rpc_thrd = self.start_rpc_task()
self.agent_task_list.append(rpc_thrd)
if (self._iptd):
ipt_thrd = self.start_iptables_task()
self.agent_task_list.append(ipt_thrd)
def save_my_pid(cfg):
mypid = os.getpid()
pid_path = cfg.dfa_log.pid_dir
pid_file = cfg.dfa_log.pid_agent_file
if pid_path and pid_file:
try:
if not os.path.exists(pid_path):
os.makedirs(pid_path)
except OSError:
LOG.error(_LE('Fail to create %s'), pid_path)
return
pid_file_path = os.path.join(pid_path, pid_file)
LOG.debug('dfa_agent pid=%s', mypid)
with open(pid_file_path, 'w') as fn:
fn.write(str(mypid))
def main():
# Setup logger
cfg = config.CiscoDFAConfig().cfg
logging.setup_logger('dfa_enabler', cfg)
# Get pid of the process and save it.
save_my_pid(cfg)
# Create DFA agent object
dfa_agent = DfaAgent(thishost, constants.DFA_AGENT_QUEUE)
LOG.debug('Starting tasks in agent...')
try:
# Start all task in the agent.
dfa_agent.start_tasks()
# Endless loop
while True:
start = time.time()
# Send heartbeat to controller, data includes:
# - timestamp
# - host name
dfa_agent.send_heartbeat()
# If the agent comes up for the fist time (could be after crash),
# ask for the uplink info.
if dfa_agent._need_uplink_info or (
not dfa_agent.is_uplink_received()):
dfa_agent.request_uplink_info()
for trd in dfa_agent.agent_task_list:
if not trd.am_i_active:
LOG.info(_LI("Thread %s is not active."), trd.name)
end = time.time()
delta = end - start
eventlet.sleep(constants.HB_INTERVAL - delta)
except Exception as e:
dfa_agent.stop_rpc()
LOG.exception(_LE('Exception %s is received'), str(e))
LOG.error(_LE('Exception %s is received'), str(e))
sys.exit("ERROR: %s" % str(e))
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,208 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from six.moves import queue
import time
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as dsl
from networking_cisco.apps.saf.common import utils
LOG = logging.getLogger(__name__)
class IpMacPort(object):
"""This class keeps host rule information."""
def __init__(self, ip, mac, port):
self.ip = ip
self.mac = mac and mac.lower()
self.port = port
self.chain = 'neutron-openvswi-s' + port[:10]
class IptablesDriver(object):
"""This class provides API to update iptables rule."""
def __init__(self, cfg):
self._root_helper = cfg.sys.root_helper
# List that contains VM info: ip, mac and port.
self.rule_info = []
# Queue to keep messages from server
self._iptq = queue.Queue()
def update_rule_entry(self, rule_info):
"""Update the rule_info list."""
if rule_info.get('status') == 'up':
self.add_rule_entry(rule_info)
if rule_info.get('status') == 'down':
self.remove_rule_entry(rule_info)
def add_rule_entry(self, rule_info):
"""Add host data object to the rule_info list."""
new_rule = IpMacPort(rule_info.get('ip'), rule_info.get('mac'),
rule_info.get('port'))
LOG.debug('Added rule info %s to the list', rule_info)
self.rule_info.append(new_rule)
def remove_rule_entry(self, rule_info):
"""Remove host data object from rule_info list."""
temp_list = list(self.rule_info)
for rule in temp_list:
if (rule.ip == rule_info.get('ip') and
rule.mac == rule_info.get('mac') and
rule.port == rule_info.get('port')):
LOG.debug('Removed rule info %s from the list', rule_info)
self.rule_info.remove(rule)
def _find_chain_name(self, mac):
"""Find a rule associated with a given mac."""
ipt_cmd = ['iptables', '-t', 'filter', '-S']
cmdo = dsl.execute(ipt_cmd, root_helper=self._root_helper,
log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
chain = o.split()[1]
LOG.info(_LI('Find %(chain)s for %(mac)s.'),
{'chain': chain, 'mac': mac})
return chain
def _find_rule_no(self, mac):
"""Find rule number associated with a given mac."""
ipt_cmd = ['iptables', '-L', '--line-numbers']
cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
rule_no = o.split()[0]
LOG.info(_LI('Found rule %(rule)s for %(mac)s.'),
{'rule': rule_no, 'mac': mac})
return rule_no
def update_ip_rule(self, ip, mac):
"""Update a rule associated with given ip and mac."""
rule_no = self._find_rule_no(mac)
chain = self._find_chain_name(mac)
if not rule_no or not chain:
LOG.error(_LE('Failed to update ip rule for %(ip)s %(mac)s'),
{'ip': ip, 'mac': mac})
return
update_cmd = ['iptables', '-R', '%s' % chain, '%s' % rule_no,
'-s', '%s/32' % ip, '-m', 'mac', '--mac-source',
'%s' % mac, '-j', 'RETURN']
LOG.debug('Execute command: %s', update_cmd)
dsl.execute(update_cmd, self._root_helper, log_output=False)
def enqueue_event(self, event):
"""Enqueue the given event.
The event contains host data (ip, mac, port) which will be used to
update the spoofing rule for the host in the iptables.
"""
LOG.debug('Enqueue iptable event %s.', event)
if event.get('status') == 'up':
for rule in self.rule_info:
if (rule.mac == event.get('mac').lower() and
rule.port == event.get('port')):
# Entry already exist in the list.
if rule.ip != event.get('ip'):
LOG.debug('enqueue_event: Only updating IP from %s'
' to %s.' % (rule.ip, event.get('ip')))
# Only update the IP address if it is different.
rule.ip = event.get('ip')
return
self._iptq.put(event)
def create_thread(self):
"""Create a task to process event for updating iptables."""
ipt_thrd = utils.EventProcessingThread('iptables', self,
'process_rule_info')
return ipt_thrd
def _is_ip_in_rule(self, ip, rule):
try:
ip_loc = rule.index('-s') + 1
rule_ip = rule[ip_loc].split('/')[0]
return ip == rule_ip
except Exception:
return False
def update_iptables(self):
"""Update iptables based on information in the rule_info."""
# Read the iptables
iptables_cmds = ['iptables-save', '-c']
all_rules = dsl.execute(iptables_cmds, root_helper=self._root_helper,
log_output=False)
# For each rule in rule_info update the rule if necessary.
new_rules = []
is_modified = False
for line in all_rules.split('\n'):
new_line = line
line_content = line.split()
# The spoofing rule which includes mac and ip should have
# -s cidr/32 option for ip address. Otherwise no rule
# will be modified.
if '-s' in line_content:
tmp_rule_info = list(self.rule_info)
for rule in tmp_rule_info:
if (rule.mac in line.lower() and
rule.chain.lower() in line.lower() and
not self._is_ip_in_rule(rule.ip, line_content)):
ip_loc = line_content.index('-s') + 1
line_content[ip_loc] = rule.ip + '/32'
new_line = ' '.join(line_content)
LOG.debug('Modified %(old_rule)s. '
'New rule is %(new_rule)s.' % (
{'old_rule': line,
'new_rule': new_line}))
is_modified = True
new_rules.append(new_line)
if is_modified and new_rules:
# Updated all the rules. Now commit the new rules.
iptables_cmds = ['iptables-restore', '-c']
dsl.execute(iptables_cmds, process_input='\n'.join(new_rules),
root_helper=self._root_helper, log_output=False)
def process_rule_info(self):
"""Task responsible for processing event queue."""
while True:
try:
event = self._iptq.get(block=False)
LOG.debug('Dequeue event: %s.', event)
self.update_rule_entry(event)
except queue.Empty:
self.update_iptables()
time.sleep(1)
except Exception:
LOG.exception(_LE('ERROR: failed to process queue'))

View File

@ -1,166 +0,0 @@
# Copyright 2017 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This file contains the public API's for interacting with LLDPAD. """
from networking_cisco._i18n import _LE
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as utils
LOG = logging.getLogger(__name__)
class LldpApi(object):
"""LLDP API Class. """
def __init__(self, root_helper):
self.root_helper = root_helper
def enable_lldp(self, port_name, is_ncb=True, is_nb=False):
"""Function to enable LLDP on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["-L", "-i", port_name, "-g", "ncb",
"adminStatus=rxtx"])
elif is_nb:
reply = self.run_lldptool(["-L", "-i", port_name, "-g", "nb",
"adminStatus=rxtx"])
else:
LOG.error(_LE("Both NCB and NB are not selected to "
"enable LLDP"))
return False
if reply is None:
return False
exp_str = "adminstatus=rxtx"
if exp_str in reply.replace(" ", "").lower():
return True
else:
return False
def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False):
"""Function to Query LLDP TLV on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "ncb"])
elif is_nb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "nb"])
else:
LOG.error(_LE("Both NCB and NB are not selected to "
"query LLDP"))
return reply
def run_lldptool(self, args):
"""Function for invoking the lldptool utility. """
full_args = ['lldptool'] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as exc:
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': str(exc)})
def _check_common_tlv_format(self, tlv_complete_data, tlv_data_pattern,
tlv_string):
"""Check for the common TLV format. """
if tlv_complete_data is None:
return False, None
tlv_string_split = tlv_complete_data.split(tlv_string)
if len(tlv_string_split) < 2:
return False, None
next_tlv_list = tlv_string_split[1].split('TLV')[0]
tlv_val_set = next_tlv_list.split(tlv_data_pattern)
if len(tlv_val_set) < 2:
return False, None
return True, tlv_val_set
def get_remote_evb_cfgd(self, tlv_data):
"""Returns IF EVB TLV is present in the TLV. """
return self._check_common_tlv_format(
tlv_data, "mode:", "EVB Configuration TLV")[0]
def get_remote_evb_mode(self, tlv_data):
"""Returns the EVB mode in the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "mode:", "EVB Configuration TLV")
if not ret:
return None
mode_val = parsed_val[1].split()[0].strip()
return mode_val
def get_remote_mgmt_addr(self, tlv_data):
"""Returns Remote Mgmt Addr from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "IPv4:", "Management Address TLV")
if not ret:
return None
addr_fam = 'IPv4:'
addr = parsed_val[1].split('\n')[0].strip()
return addr_fam + addr
def get_remote_sys_desc(self, tlv_data):
"""Returns Remote Sys Desc from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Description TLV")
if not ret:
return None
return parsed_val[1].strip()
def get_remote_sys_name(self, tlv_data):
"""Returns Remote Sys Name from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Name TLV")
if not ret:
return None
return parsed_val[1].strip()
def get_remote_port(self, tlv_data):
"""Returns Remote Port from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "Port Description TLV")
if not ret:
return None
return parsed_val[1].strip()
def get_remote_chassis_id_mac(self, tlv_data):
"""Returns Remote Chassis ID MAC from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "MAC:", "Chassis ID TLV")
if not ret:
return None
mac = parsed_val[1].split('\n')
return mac[0].strip()
def get_remote_port_id_mac(self, tlv_data):
"""Returns Remote Port ID MAC from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "MAC:", "Port ID TLV")
if not ret:
return None
mac = parsed_val[1].split('\n')
return mac[0].strip()
def get_remote_port_id_local(self, tlv_data):
"""Returns Remote Port ID Local from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "Local:", "Port ID TLV")
if not ret:
return None
local = parsed_val[1].split('\n')
return local[0].strip()

View File

@ -1,354 +0,0 @@
# Copyright 2017 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
This file contains the implementation of Topology Discovery of servers and
their associated leaf switches using Open source implementation of LLDP.
www.open-lldp.org
"""
from networking_cisco._i18n import _LE
from networking_cisco.apps.saf.agent.topo_disc import (
topo_disc_constants as constants)
from networking_cisco.apps.saf.agent.topo_disc import pub_lldp_api as pub_lldp
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as sys_utils
from networking_cisco.apps.saf.common import utils
LOG = logging.getLogger(__name__)
class TopoIntfAttr(object):
"""Class that stores the interface attributes. """
def __init__(self, protocol_interface, phy_interface):
"""Class Init. """
self.init_params(protocol_interface, phy_interface)
def init_params(self, protocol_interface, phy_interface):
"""Initializing parameters. """
self.lldp_cfgd = False
self.local_intf = protocol_interface
self.phy_interface = phy_interface
self.remote_evb_cfgd = False
self.remote_evb_mode = None
self.remote_mgmt_addr = None
self.remote_system_desc = None
self.remote_system_name = None
self.remote_port = None
self.remote_chassis_id_mac = None
self.remote_port_id_mac = None
self.local_evb_cfgd = False
self.local_evb_mode = None
self.local_mgmt_address = None
self.local_system_desc = None
self.local_system_name = None
self.local_port = None
self.local_chassis_id_mac = None
self.local_port_id_mac = None
self.db_retry_status = False
self.topo_send_cnt = 0
self.bond_interface = None
self.bond_member_ports = None
def update_lldp_status(self, status):
"""Update the LLDP cfg status. """
self.lldp_cfgd = status
def cmp_update_bond_intf(self, bond_interface):
"""Update the bond interface and its members.
Update the bond interface, if this interface is a part of bond
Return True if there's a change.
"""
if bond_interface != self.bond_interface:
self.bond_interface = bond_interface
self.bond_member_ports = sys_utils.get_member_ports(bond_interface)
return True
return False
def get_lldp_status(self):
"""Retrieve the LLDP cfg status. """
return self.lldp_cfgd
def get_db_retry_status(self):
"""Retrieve the RPC retru status.
This retrieves the number of times RPC was retried with the server.
"""
return self.db_retry_status
def get_phy_interface(self):
"""Retrieves the physical interface. """
return self.phy_interface
def store_db_retry_status(self, status):
"""This stores the number of times RPC was retried with the server. """
self.db_retry_status = status
def get_topo_disc_send_cnt(self):
"""Retrieve the topology status send count for this interface. """
return self.topo_send_cnt
def incr_topo_disc_send_cnt(self):
"""Increment the topology status send count for this interface. """
self.topo_send_cnt += 1
def reset_topo_disc_send_cnt(self):
"""Reset the topology status send count for this interface. """
self.topo_send_cnt = 0
def remote_evb_mode_uneq_store(self, remote_evb_mode):
"""Saves the EVB mode, if it is not the same as stored. """
if remote_evb_mode != self.remote_evb_mode:
self.remote_evb_mode = remote_evb_mode
return True
return False
def remote_evb_cfgd_uneq_store(self, remote_evb_cfgd):
"""This saves the EVB cfg, if it is not the same as stored. """
if remote_evb_cfgd != self.remote_evb_cfgd:
self.remote_evb_cfgd = remote_evb_cfgd
return True
return False
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr):
"""This function saves the MGMT address, if different from stored. """
if remote_mgmt_addr != self.remote_mgmt_addr:
self.remote_mgmt_addr = remote_mgmt_addr
return True
return False
def remote_sys_desc_uneq_store(self, remote_system_desc):
"""This function saves the system desc, if different from stored. """
if remote_system_desc != self.remote_system_desc:
self.remote_system_desc = remote_system_desc
return True
return False
def remote_sys_name_uneq_store(self, remote_system_name):
"""This function saves the system name, if different from stored. """
if remote_system_name != self.remote_system_name:
self.remote_system_name = remote_system_name
return True
return False
def remote_port_uneq_store(self, remote_port):
"""This function saves the port, if different from stored. """
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False
def remote_chassis_id_mac_uneq_store(self, remote_chassis_id_mac):
"""This function saves the Chassis MAC, if different from stored. """
if remote_chassis_id_mac != self.remote_chassis_id_mac:
self.remote_chassis_id_mac = remote_chassis_id_mac
return True
return False
def remote_port_id_mac_uneq_store(self, remote_port_id_mac):
"""This function saves the port MAC, if different from stored. """
if remote_port_id_mac != self.remote_port_id_mac:
self.remote_port_id_mac = remote_port_id_mac
return True
return False
class TopoDiscPubApi(object):
topo_intf_obj_dict = {}
@classmethod
def store_obj(cls, intf, obj):
"""Stores the topo object. """
cls.topo_intf_obj_dict[intf] = obj
@classmethod
def get_lldp_status(cls, intf):
"""Retrieves the LLDP status. """
if intf not in cls.topo_intf_obj_dict:
LOG.error(_LE("Interface %s not configured at all"), intf)
return False
intf_obj = cls.topo_intf_obj_dict.get(intf)
return intf_obj.get_lldp_status()
class TopoDisc(TopoDiscPubApi):
"""Topology Discovery Top level class once. """
def __init__(self, cb, root_helper, intf_list=None, all_intf=True):
"""Initialization routine, to configure interface.
Also create the periodic task.
cb => Callback in case any of the interface TLV changes.
intf_list => List of interfaces to be LLDP enabled and monitored.
all_intf => Boolean that signifies if all physical interfaces are to
be monitored. intf_list will be None, if this variable is True.
"""
self.pub_lldp = pub_lldp.LldpApi(root_helper)
self._init_cfg_interfaces(cb, intf_list, all_intf)
per_task = utils.PeriodicTask(constants.PERIODIC_TASK_INTERVAL,
self.periodic_discovery_task)
per_task.run()
def _init_cfg_interfaces(self, cb, intf_list=None, all_intf=True):
"""Configure the interfaces during init time. """
if not all_intf:
self.intf_list = intf_list
else:
self.intf_list = sys_utils.get_all_run_phy_intf()
self.cb = cb
self.intf_attr = {}
self.cfg_lldp_interface_list(self.intf_list)
def cfg_intf(self, protocol_interface, phy_interface=None):
"""Called by application to add an interface to the list. """
self.intf_list.append(protocol_interface)
self.cfg_lldp_interface(protocol_interface, phy_interface)
def uncfg_intf(self, intf):
"""Called by application to remove an interface to the list.
From an applications perspective, it makes sense to have this function.
But, here no action can be taken for the following reasons, but just
having it as a place-holder for tomorrow.
=> Can't remove interface from the list since DB in server may appear
stale.
self.intf_list.remove(intf)
=> One can just remove the interface DB, but need to retry that till
it succeeds, so it has to be in periodic loop.
=> So, currently leaving it as is, since LLDP frames won't be obtained
over the bridge, the periodic handler will automatically remove the
DB for this interface from server
"""
pass
def create_attr_obj(self, protocol_interface, phy_interface):
"""Creates the local interface attribute object and stores it. """
self.intf_attr[protocol_interface] = TopoIntfAttr(
protocol_interface, phy_interface)
self.store_obj(protocol_interface, self.intf_attr[protocol_interface])
def get_attr_obj(self, intf):
"""Retrieve the interface object. """
return self.intf_attr[intf]
def cmp_store_tlv_params(self, intf, tlv_data):
"""Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different.
"""
flag = False
attr_obj = self.get_attr_obj(intf)
remote_evb_mode = self.pub_lldp.get_remote_evb_mode(tlv_data)
if attr_obj.remote_evb_mode_uneq_store(remote_evb_mode):
flag = True
remote_evb_cfgd = self.pub_lldp.get_remote_evb_cfgd(tlv_data)
if attr_obj.remote_evb_cfgd_uneq_store(remote_evb_cfgd):
flag = True
remote_mgmt_addr = self.pub_lldp.get_remote_mgmt_addr(tlv_data)
if attr_obj.remote_mgmt_addr_uneq_store(remote_mgmt_addr):
flag = True
remote_sys_desc = self.pub_lldp.get_remote_sys_desc(tlv_data)
if attr_obj.remote_sys_desc_uneq_store(remote_sys_desc):
flag = True
remote_sys_name = self.pub_lldp.get_remote_sys_name(tlv_data)
if attr_obj.remote_sys_name_uneq_store(remote_sys_name):
flag = True
remote_port = self.pub_lldp.get_remote_port(tlv_data)
if attr_obj.remote_port_uneq_store(remote_port):
flag = True
remote_chassis_id_mac = self.pub_lldp.\
get_remote_chassis_id_mac(tlv_data)
if attr_obj.remote_chassis_id_mac_uneq_store(remote_chassis_id_mac):
flag = True
remote_port_id_mac = self.pub_lldp.get_remote_port_id_mac(tlv_data)
if attr_obj.remote_port_id_mac_uneq_store(remote_port_id_mac):
flag = True
return flag
def cfg_lldp_interface(self, protocol_interface, phy_interface=None):
"""Cfg LLDP on interface and create object. """
if phy_interface is None:
phy_interface = protocol_interface
self.create_attr_obj(protocol_interface, phy_interface)
ret = self.pub_lldp.enable_lldp(protocol_interface)
attr_obj = self.get_attr_obj(protocol_interface)
attr_obj.update_lldp_status(ret)
def cfg_lldp_interface_list(self, intf_list):
"""This routine configures LLDP on the given interfaces list. """
for intf in intf_list:
self.cfg_lldp_interface(intf)
def periodic_discovery_task(self):
"""Periodic task that checks the interface TLV attributes. """
try:
self._periodic_task_int()
except Exception as exc:
LOG.error(_LE("Exception caught in periodic discovery task %s"),
str(exc))
def _check_bond_interface_change(self, phy_interface, attr_obj):
"""Check if there's any change in bond interface.
First check if the interface passed itself is a bond-interface and then
retrieve the member list and compare.
Next, check if the interface passed is a part of the bond interface and
then retrieve the member list and compare.
"""
bond_phy = sys_utils.get_bond_intf(phy_interface)
if sys_utils.is_intf_bond(phy_interface):
bond_intf = phy_interface
else:
bond_intf = bond_phy
# This can be an addition or removal of the interface to a bond.
bond_intf_change = attr_obj.cmp_update_bond_intf(bond_intf)
return bond_intf_change
def _periodic_task_int(self):
"""Internal periodic discovery task routine to check TLV attributes.
This routine retrieves the LLDP TLC's on all its configured interfaces.
If the retrieved TLC is different than the stored TLV, it invokes the
callback.
"""
for intf in self.intf_list:
attr_obj = self.get_attr_obj(intf)
status = attr_obj.get_lldp_status()
if not status:
ret = self.pub_lldp.enable_lldp(intf)
attr_obj.update_lldp_status(ret)
continue
bond_intf_change = self._check_bond_interface_change(
attr_obj.get_phy_interface(), attr_obj)
tlv_data = self.pub_lldp.get_lldp_tlv(intf)
# This should take care of storing the information of interest
if self.cmp_store_tlv_params(intf, tlv_data) or (
attr_obj.get_db_retry_status() or bond_intf_change or (
attr_obj.get_topo_disc_send_cnt() > (
constants.TOPO_DISC_SEND_THRESHOLD))):
# Passing the interface attribute object to CB
ret = self.cb(intf, attr_obj)
status = not ret
attr_obj.store_db_retry_status(status)
attr_obj.reset_topo_disc_send_cnt()
else:
attr_obj.incr_topo_disc_send_cnt()

View File

@ -1,22 +0,0 @@
# Copyright 2017 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Topology Discovery constants:
# Query LLDP Daemon every 15 seconds.
PERIODIC_TASK_INTERVAL = 15
# This means a topology update message will be sent after every minute (15*4),
# even if there's no change in the parameters.
TOPO_DISC_SEND_THRESHOLD = 4

View File

@ -1,731 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from six.moves import queue
import time
from oslo_serialization import jsonutils
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.agent import detect_uplink as uplink_det
from networking_cisco.apps.saf.agent.topo_disc import topo_disc
from networking_cisco.apps.saf.agent.vdp import ovs_vdp
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as sys_utils
from networking_cisco.apps.saf.common import rpc
from networking_cisco.apps.saf.common import utils
LOG = logging.getLogger(__name__)
class VdpMsgPriQue(object):
"""VDP Message Queue. """
def __init__(self):
self._queue = queue.PriorityQueue()
def enqueue(self, priority, msg):
msg_tupl = (priority, msg)
self._queue.put(msg_tupl)
def dequeue(self):
msg_tupl = self._queue.get()
return msg_tupl[0], msg_tupl[1]
def dequeue_nonblock(self):
msg_tupl = self._queue.get_nowait()
return msg_tupl[0], msg_tupl[1]
def is_not_empty(self):
return not self._queue.empty()
class VdpQueMsg(object):
"""Construct VDP Message. """
def __init__(self, msg_type, port_uuid=None, vm_mac=None, oui=None,
net_uuid=None, segmentation_id=None, status=None,
vm_bulk_list=None, phy_uplink=None, br_int=None, br_ex=None,
root_helper=None):
self.msg_dict = {}
self.msg_type = msg_type
if msg_type == constants.VM_MSG_TYPE:
self.construct_vm_msg(port_uuid, vm_mac, net_uuid,
segmentation_id, status, oui, phy_uplink)
elif msg_type == constants.UPLINK_MSG_TYPE:
self.construct_uplink_msg(status, phy_uplink, br_int, br_ex,
root_helper)
elif msg_type == constants.VM_BULK_SYNC_MSG_TYPE:
self.construct_vm_bulk_sync_msg(vm_bulk_list, phy_uplink)
def construct_vm_msg(self, port_uuid, vm_mac, net_uuid,
segmentation_id, status, oui, phy_uplink):
self.msg_dict['port_uuid'] = port_uuid
self.msg_dict['vm_mac'] = vm_mac
self.msg_dict['net_uuid'] = net_uuid
self.msg_dict['segmentation_id'] = segmentation_id
self.msg_dict['status'] = status
self.msg_dict['oui'] = oui
self.msg_dict['phy_uplink'] = phy_uplink
def construct_vm_bulk_sync_msg(self, vm_bulk_list, phy_uplink):
self.msg_dict['phy_uplink'] = phy_uplink
self.msg_dict['vm_bulk_list'] = vm_bulk_list
def construct_uplink_msg(self, status, phy_uplink, br_int, br_ex,
root_helper):
self.msg_dict['status'] = status
self.msg_dict['phy_uplink'] = phy_uplink
self.msg_dict['br_int'] = br_int
self.msg_dict['br_ex'] = br_ex
self.msg_dict['root_helper'] = root_helper
def get_oui(self):
return self.msg_dict['oui']
def get_uplink(self):
return self.msg_dict['phy_uplink']
def get_mac(self):
return self.msg_dict['vm_mac']
def get_status(self):
return self.msg_dict['status']
def get_segmentation_id(self):
return self.msg_dict['segmentation_id']
def get_net_uuid(self):
return self.msg_dict['net_uuid']
def get_port_uuid(self):
return self.msg_dict['port_uuid']
def get_integ_br(self):
return self.msg_dict['br_int']
def get_ext_br(self):
return self.msg_dict['br_ex']
def get_root_helper(self):
return self.msg_dict['root_helper']
def set_uplink(self, uplink):
self.msg_dict['phy_uplink'] = uplink
class VdpMgr(object):
"""Responsible for Handling VM/Uplink requests. """
def __init__(self, config_dict, rpc_client, hostname):
self.br_integ = config_dict.get('integration_bridge')
self.br_ex = config_dict.get('external_bridge')
self.root_helper = config_dict.get('root_helper')
self.host_id = config_dict.get('host_id')
self.node_list = config_dict['node_list']
self.node_uplink_list = config_dict['node_uplink_list']
# Check for error?? fixme(padkrish)
self.que = VdpMsgPriQue()
self.err_que = VdpMsgPriQue()
self.phy_uplink = None
self.veth_intf = None
self.restart_uplink_called = False
self.ovs_vdp_obj_dict = {}
self.rpc_clnt = rpc_client
self.host_name = hostname
self.uplink_det_compl = False
self.process_uplink_ongoing = False
self.uplink_down_cnt = 0
self.is_os_run = False
self.static_uplink = False
self.static_uplink_port = None
self.static_uplink_first = True
self.bulk_vm_rcvd_flag = False
self.bulk_vm_check_cnt = 0
self.vdp_mgr_lock = utils.lock()
self.read_static_uplink()
self.start()
self.topo_disc = topo_disc.TopoDisc(self.topo_disc_cb,
self.root_helper)
def read_static_uplink(self):
"""Read the static uplink from file, if given."""
if self.node_list is None or self.node_uplink_list is None:
return
for node, port in zip(self.node_list.split(','),
self.node_uplink_list.split(',')):
if node.strip() == self.host_name:
self.static_uplink = True
self.static_uplink_port = port.strip()
return
def topo_disc_cb(self, intf, topo_disc_obj):
return self.save_topo_disc_params(intf, topo_disc_obj)
def update_vm_result(self, port_uuid, result, lvid=None,
vdp_vlan=None, fail_reason=None):
context = {'agent': self.host_id}
if lvid is None or vdp_vlan is None:
args = jsonutils.dumps({'port_uuid': port_uuid, 'result': result,
'fail_reason': fail_reason})
else:
args = jsonutils.dumps({'port_uuid': port_uuid, 'local_vlan': lvid,
'vdp_vlan': vdp_vlan, 'result': result,
'fail_reason': fail_reason})
msg = self.rpc_clnt.make_msg('update_vm_result', context, msg=args)
try:
resp = self.rpc_clnt.call(msg)
return resp
except rpc.MessagingTimeout:
LOG.error(_LE("RPC timeout: Failed to update VM result on the"
" server"))
def vdp_vlan_change_cb(self, port_uuid, lvid, vdp_vlan, fail_reason):
"""Callback function for updating the VDP VLAN in DB. """
LOG.info(_LI("Vlan change CB lvid %(lvid)s VDP %(vdp)s"),
{'lvid': lvid, 'vdp': vdp_vlan})
self.update_vm_result(port_uuid, constants.RESULT_SUCCESS,
lvid=lvid, vdp_vlan=vdp_vlan,
fail_reason=fail_reason)
def process_vm_event(self, msg, phy_uplink):
LOG.info(_LI("In processing VM Event status %(status)s for MAC "
"%(mac)s UUID %(uuid)s oui %(oui)s"),
{'status': msg.get_status(), 'mac': msg.get_mac(),
'uuid': msg.get_port_uuid(), 'oui': msg.get_oui()})
time.sleep(10)
if msg.get_status() == 'up':
res_fail = constants.CREATE_FAIL
else:
res_fail = constants.DELETE_FAIL
if (not self.uplink_det_compl or
phy_uplink not in self.ovs_vdp_obj_dict):
LOG.error(_LE("Uplink Port Event not received yet"))
self.update_vm_result(msg.get_port_uuid(), res_fail)
return
ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink]
port_event_reply = ovs_vdp_obj.send_vdp_port_event(
msg.get_port_uuid(), msg.get_mac(), msg.get_net_uuid(),
msg.get_segmentation_id(), msg.get_status(), msg.get_oui())
if not port_event_reply.get('result'):
LOG.error(_LE("Error in VDP port event, Err Queue enq"))
self.update_vm_result(
msg.get_port_uuid(), res_fail,
fail_reason=port_event_reply.get('fail_reason'))
else:
LOG.info(_LI("Success in VDP port event"))
lvid, vdp_vlan = ovs_vdp_obj.get_lvid_vdp_vlan(msg.get_net_uuid(),
msg.get_port_uuid())
self.update_vm_result(
msg.get_port_uuid(), constants.RESULT_SUCCESS,
lvid=lvid, vdp_vlan=vdp_vlan,
fail_reason=port_event_reply.get('fail_reason'))
def process_bulk_vm_event(self, msg, phy_uplink):
"""Process the VM bulk event usually after a restart. """
LOG.info("In processing Bulk VM Event status %s", msg)
time.sleep(3)
if (not self.uplink_det_compl or
phy_uplink not in self.ovs_vdp_obj_dict):
# This condition shouldn't be hit as only when uplink is obtained,
# save_uplink is called and that in turns calls this process_bulk.
LOG.error(_LE("Uplink Port Event not received,"
"yet in bulk process"))
return
ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink]
for vm_dict in msg.msg_dict.get('vm_bulk_list'):
if vm_dict['status'] == 'down':
ovs_vdp_obj.pop_local_cache(vm_dict['port_uuid'],
vm_dict['vm_mac'],
vm_dict['net_uuid'],
vm_dict['local_vlan'],
vm_dict['vdp_vlan'],
vm_dict['segmentation_id'])
vm_msg = VdpQueMsg(constants.VM_MSG_TYPE,
port_uuid=vm_dict['port_uuid'],
vm_mac=vm_dict['vm_mac'],
net_uuid=vm_dict['net_uuid'],
segmentation_id=vm_dict['segmentation_id'],
status=vm_dict['status'],
oui=vm_dict['oui'],
phy_uplink=phy_uplink)
self.process_vm_event(vm_msg, phy_uplink)
def process_uplink_event(self, msg, phy_uplink):
LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"),
{'msg': msg.get_status(), 'uplink': phy_uplink})
if msg.get_status() == 'up':
ovs_exc_raised = False
ovs_exc_reason = ""
try:
self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp(
phy_uplink, msg.get_integ_br(), msg.get_ext_br(),
msg.get_root_helper(), self.vdp_vlan_change_cb)
except Exception as exc:
ovs_exc_reason = str(exc)
LOG.error(_LE("OVS VDP Object creation failed %s"),
str(ovs_exc_reason))
ovs_exc_raised = True
if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink].
is_lldpad_setup_done()):
# Is there a way to delete the object??
if not ovs_exc_reason:
uplink_fail_reason = (self.ovs_vdp_obj_dict[phy_uplink].
get_uplink_fail_reason())
else:
uplink_fail_reason = ovs_exc_reason
LOG.error(_LE("UP Event Processing NOT Complete"))
self.err_que.enqueue(constants.Q_UPL_PRIO, msg)
self.save_uplink(uplink=self.phy_uplink,
fail_reason=uplink_fail_reason)
else:
self.uplink_det_compl = True
veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink].
get_lldp_local_bridge_port())
LOG.info(_LI("UP Event Processing Complete Saving uplink "
"%(ul)s and veth %(veth)s"),
{'ul': self.phy_uplink, 'veth': veth_intf})
self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf)
self.topo_disc.uncfg_intf(self.phy_uplink)
self.topo_disc.cfg_intf(veth_intf,
phy_interface=self.phy_uplink)
elif msg.get_status() == 'down':
# Free the object fixme(padkrish)
if phy_uplink in self.ovs_vdp_obj_dict:
self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params()
else:
ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex,
phy_uplink)
self.save_uplink()
self.topo_disc.uncfg_intf(self.veth_intf)
self.topo_disc.cfg_intf(phy_uplink)
def process_queue(self):
LOG.info(_LI("Entered process_q"))
while True:
prio, msg = self.que.dequeue()
msg_type = msg.msg_type
phy_uplink = msg.get_uplink()
LOG.info(_LI("Msg dequeued type is %d"), msg_type)
try:
if msg_type == constants.VM_MSG_TYPE:
self.process_vm_event(msg, phy_uplink)
elif msg_type == constants.VM_BULK_SYNC_MSG_TYPE:
self.process_bulk_vm_event(msg, phy_uplink)
elif msg_type == constants.UPLINK_MSG_TYPE:
try:
self.process_uplink_event(msg, phy_uplink)
except Exception as eu:
LOG.exception(_LE("Exception caught in process_uplink"
" %s"), str(eu))
self.process_uplink_ongoing = False
except Exception as e:
LOG.exceptin(_LE("Exception caught in process_q %s"), str(e))
def process_err_queue(self):
LOG.info(_LI("Entered Err process_q"))
try:
while self.err_que.is_not_empty():
prio, msg = self.err_que.dequeue_nonblock()
msg_type = msg.msg_type
LOG.info(_LI("Msg dequeued from err queue type is %d"),
msg_type)
if msg_type == constants.UPLINK_MSG_TYPE:
self.que.enqueue(constants.Q_UPL_PRIO, msg)
except Exception as e:
LOG.exceptin(_LE("Exception caught in proc_err_que %s "), str(e))
def start(self):
# Spawn the thread
# Pass the Que as last argument so that in case of exception, the
# daemon can exit gracefully. fixme(padkrish)
thr_q = utils.EventProcessingThread("VDP_Mgr", self, 'process_queue')
thr_q.start()
task_err_proc = utils.PeriodicTask(constants.ERR_PROC_INTERVAL,
self.process_err_queue)
task_err_proc.run()
task_uplink = utils.PeriodicTask(constants.UPLINK_DET_INTERVAL,
self.vdp_uplink_proc_top)
task_uplink.run()
def is_openstack_running(self):
"""Currently it just checks for the presence of both the bridges. """
try:
if (ovs_vdp.is_bridge_present(self.br_ex, self.root_helper) and
ovs_vdp.is_bridge_present(self.br_integ,
self.root_helper)):
return True
else:
return False
except Exception as e:
LOG.error(_LE("Exception in is_openstack_running %s"), str(e))
return False
def vdp_uplink_proc_top(self):
try:
self.vdp_uplink_proc()
except Exception as e:
LOG.error(_LE("VDP uplink proc exception %s"), e)
def save_uplink(self, uplink="", veth_intf="", fail_reason=""):
context = {}
# If uplink physical interface is a part of bond, then this function
# will be called with uplink=bond0, as an example
memb_port_list = sys_utils.get_member_ports(uplink)
args = jsonutils.dumps({'agent': self.host_id, 'uplink': uplink,
'veth_intf': veth_intf,
'memb_port_list': memb_port_list,
'fail_reason': fail_reason})
msg = self.rpc_clnt.make_msg('save_uplink', context, msg=args)
try:
resp = self.rpc_clnt.call(msg)
return resp
except rpc.MessagingTimeout:
LOG.error(_LE("RPC timeout: Failed to save link name on the "
"server"))
def _fill_topology_cfg(self, topo_dict):
"""Fills the extra configurations in the topology. """
cfg_dict = {}
if topo_dict.bond_member_ports is not None:
cfg_dict.update({'bond_member_ports':
topo_dict.bond_member_ports})
if topo_dict.bond_interface is not None:
cfg_dict.update({'bond_interface':
topo_dict.bond_interface})
return cfg_dict
def save_topo_disc_params(self, intf, topo_disc_obj):
context = {}
topo_cfg = self._fill_topology_cfg(topo_disc_obj)
args = jsonutils.dumps(
{'host': self.host_id, 'protocol_interface': intf,
'heartbeat': time.ctime(),
'phy_interface': topo_disc_obj.phy_interface,
'remote_evb_cfgd': topo_disc_obj.remote_evb_cfgd,
'remote_evb_mode': topo_disc_obj.remote_evb_mode,
'remote_mgmt_addr': topo_disc_obj.remote_mgmt_addr,
'remote_system_desc': topo_disc_obj.remote_system_desc,
'remote_system_name': topo_disc_obj.remote_system_name,
'remote_port': topo_disc_obj.remote_port,
'remote_chassis_id_mac': topo_disc_obj.remote_chassis_id_mac,
'remote_port_id_mac': topo_disc_obj.remote_port_id_mac,
'configurations': jsonutils.dumps(topo_cfg)})
msg = self.rpc_clnt.make_msg('save_topo_disc_params', context,
msg=args)
try:
resp = self.rpc_clnt.call(msg)
return resp
except rpc.MessagingTimeout:
LOG.error("RPC timeout: Failed to send topo disc on the server")
def uplink_bond_intf_process(self):
"""Process the case when uplink interface becomes part of a bond.
This is called to check if the phy interface became a part of the
bond. If the below condition is True, this means, a physical
interface that was not a part of a bond was earlier discovered as
uplink and now that interface became part of the bond.
Usually, this doesn't happen as LLDP and in turn this function will
first detect a 'down' followed by an 'up'. When regular interface
becomes part of bond, it's rare for it to hit this 'normal' case.
But, still providing the functionality if it happens.
The following is done :
a. Bring down the physical interface by sending a 'down' event
b. Add the bond interface by sending an 'up' event
Consquently, when bond is added that will be assigned to
self.phy_uplink. Then, the below condition will be False. i.e..
'get_bond_intf' will return False, when the argument is 'bond0'.
"""
bond_intf = sys_utils.get_bond_intf(self.phy_uplink)
if bond_intf is None:
return False
self.save_uplink(
fail_reason=constants.port_transition_bond_down_reason)
self.process_uplink_ongoing = True
upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='down',
phy_uplink=self.phy_uplink,
br_int=self.br_integ, br_ex=self.br_ex,
root_helper=self.root_helper)
self.que.enqueue(constants.Q_UPL_PRIO, upl_msg)
self.phy_uplink = None
self.veth_intf = None
self.uplink_det_compl = False
# No veth interface
self.save_uplink(
uplink=bond_intf,
fail_reason=constants.port_transition_bond_up_reason)
self.phy_uplink = bond_intf
self.process_uplink_ongoing = True
upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='up',
phy_uplink=self.phy_uplink,
br_int=self.br_integ, br_ex=self.br_ex,
root_helper=self.root_helper)
self.que.enqueue(constants.Q_UPL_PRIO, upl_msg)
return True
def check_periodic_bulk_vm_notif_rcvd(self):
"""Bulk VM check handler called from periodic uplink detection.
This gets called by the 'normal' stage of uplink detection.
The bulk VM event sends all the VM's running in this agent.
Sometimes during upgrades, it was found that due to some race
condition, the server does not send the Bulk VM event.
Whenever, a save_uplink is done by the agent, the server sends
the Bulk VM event.
If Bulk VM event is not received after few attempts, save_uplink is
done to request the Bulk VM list.
It's not protected with a mutex, since worst case,
Bulk VM event will be sent twice, which is not that bad. When
uplink is detected for the first time, it will hit the below
else case and there a save_uplink is anyways done.
"""
if not self.bulk_vm_rcvd_flag:
if self.bulk_vm_check_cnt >= 1:
self.bulk_vm_check_cnt = 0
self.save_uplink(uplink=self.phy_uplink,
veth_intf=self.veth_intf)
LOG.info(_LI("Doing save_uplink again to request "
"Bulk VM's"))
else:
LOG.info(_LI("Bulk VM not received, incrementing count"))
self.bulk_vm_check_cnt += 1
def static_uplink_detect(self, veth):
"""Return the static uplink based on argument passed.
The very first time, this function is called, it returns the uplink
port read from a file.
After restart, when this function is called the first time, it
returns 'normal' assuming a veth is passed to this function which will
be the case if uplink processing is successfully done.
If user modified the uplink configuration and restarted, a 'down'
will be returned to clear the old uplink.
"""
LOG.info(_LI("In static_uplink_detect %(veth)s"), {'veth': veth})
if self.static_uplink_first:
self.static_uplink_first = False
if self.phy_uplink is not None and (
self.phy_uplink != self.static_uplink_port):
return 'down'
if veth is None:
return self.static_uplink_port
else:
return 'normal'
def vdp_uplink_proc(self):
"""Periodic handler to detect the uplink interface to the switch.
-> restart_uplink_called: should be called by agent initially to set
the stored uplink and veth from DB
-> process_uplink_ongoing: Will be set when uplink message is enqueue
and reset when dequeued and processed completely
-> uplink_det_compl: Will be set to True when a valid uplink is
detected and object created. Will be reset when uplink is down
-> phy_uplink: Is the uplink interface
-> veth_intf : Signifies the veth interface.
"""
LOG.info(_LI("In Periodic Uplink Task"))
if not self.is_os_run:
if not self.is_openstack_running():
LOG.info(_LI("OpenStack is not running"))
return
else:
self.is_os_run = True
if not self.restart_uplink_called or self.process_uplink_ongoing:
LOG.info(_LI("Uplink before restart not refreshed yet..states "
"%(ruc)d %(puo)d"),
{'ruc': self.restart_uplink_called,
'puo': self.process_uplink_ongoing})
return
if self.phy_uplink is not None:
if (self.uplink_det_compl and (
self.phy_uplink not in self.ovs_vdp_obj_dict)):
LOG.error(_LE("Not Initialized for phy %s"), self.phy_uplink)
return
if self.phy_uplink in self.ovs_vdp_obj_dict:
self.veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink].
get_lldp_local_bridge_port())
# The below logic has a bug when agent is started
# and openstack is not running fixme(padkrish)
else:
if self.veth_intf is None:
LOG.error(_LE("Incorrect state, Bug"))
return
if self.static_uplink:
ret = self.static_uplink_detect(self.veth_intf)
else:
ret = uplink_det.detect_uplink(self.veth_intf)
if ret is 'down':
if self.phy_uplink is None:
LOG.error(_LE("Wrong status down"))
return
# Call API to set the uplink as "" DOWN event
self.uplink_down_cnt = self.uplink_down_cnt + 1
if not self.static_uplink and (
self.uplink_down_cnt < constants.UPLINK_DOWN_THRES):
return
self.process_uplink_ongoing = True
upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE,
status='down',
phy_uplink=self.phy_uplink,
br_int=self.br_integ, br_ex=self.br_ex,
root_helper=self.root_helper)
self.que.enqueue(constants.Q_UPL_PRIO, upl_msg)
self.phy_uplink = None
self.veth_intf = None
self.uplink_det_compl = False
self.uplink_down_cnt = 0
elif ret is None:
if self.veth_intf is not None:
LOG.error(_LE("Wrong status None"))
return
# Call API to set the uplink as "" Uplink not discovered yet
self.save_uplink(fail_reason=constants.uplink_undiscovered_reason)
elif ret is 'normal':
if self.veth_intf is None:
LOG.error(_LE("Wrong status Normal"))
return
# Uplink already discovered, nothing to be done here
# Resetting it back, happens when uplink was down for a very short
# time and no need to remove flows
self.uplink_down_cnt = 0
bond_det = self.uplink_bond_intf_process()
# Revisit this logic.
# If uplink detection fails, it will be put in Error queue, which
# will dequeue and put it back in the main queue
# At the same time this periodic task will also hit this normal
# state and will put the message in main queue. fixme(padkrish)
# The below lines are put here because after restart when
# eth/veth are passed to uplink script, it will return normal
# But OVS object would not have been created for the first time,
# so the below lines ensures it's done.
if not self.uplink_det_compl and not bond_det:
if self.phy_uplink is None:
LOG.error(_LE("Incorrect state, bug"))
return
self.process_uplink_ongoing = True
upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE,
status='up',
phy_uplink=self.phy_uplink,
br_int=self.br_integ, br_ex=self.br_ex,
root_helper=self.root_helper)
self.que.enqueue(constants.Q_UPL_PRIO, upl_msg)
# yield
LOG.info(_LI("Enqueued Uplink Msg from normal"))
self.check_periodic_bulk_vm_notif_rcvd()
else:
LOG.info(_LI("In Periodic Uplink Task uplink found %s"), ret)
bond_intf = sys_utils.get_bond_intf(ret)
if bond_intf is not None:
ret = bond_intf
LOG.info(_LI("Interface %(memb)s part of bond %(bond)s") %
{'memb': ret, 'bond': bond_intf})
# Call API to set the uplink as ret
self.save_uplink(uplink=ret, veth_intf=self.veth_intf)
self.phy_uplink = ret
self.process_uplink_ongoing = True
upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE,
status='up',
phy_uplink=self.phy_uplink,
br_int=self.br_integ, br_ex=self.br_ex,
root_helper=self.root_helper)
self.que.enqueue(constants.Q_UPL_PRIO, upl_msg)
# yield
LOG.info(_LI("Enqueued Uplink Msg"))
def vdp_vm_event(self, vm_dict_list):
if isinstance(vm_dict_list, list):
vm_msg = VdpQueMsg(constants.VM_BULK_SYNC_MSG_TYPE,
vm_bulk_list=vm_dict_list,
phy_uplink=self.phy_uplink)
self.bulk_vm_rcvd_flag = True
else:
vm_dict = vm_dict_list
LOG.info(_LI("Obtained VM event Enqueueing Status %(status)s "
"MAC %(mac)s uuid %(uuid)s oui %(oui)s"),
{'status': vm_dict['status'], 'mac': vm_dict['vm_mac'],
'uuid': vm_dict['net_uuid'], 'oui': vm_dict['oui']})
vm_msg = VdpQueMsg(constants.VM_MSG_TYPE,
port_uuid=vm_dict['port_uuid'],
vm_mac=vm_dict['vm_mac'],
net_uuid=vm_dict['net_uuid'],
segmentation_id=vm_dict['segmentation_id'],
status=vm_dict['status'],
oui=vm_dict['oui'],
phy_uplink=self.phy_uplink)
self.que.enqueue(constants.Q_VM_PRIO, vm_msg)
def is_uplink_received(self):
"""Returns whether uplink information is received after restart.
Not protecting this with a mutex, since this gets called inside the
loop from dfa_agent and having a mutex is a overkill. Worst case,
during multiple restarts on server and when the corner case is hit,
this may return an incorrect value of False when _dfa_uplink_restart
is at the middle of execution. Returning an incorrect value of False,
may trigger an RPC to the server to retrieve the uplink one extra time.
_dfa_uplink_restart will not get executed twice, since that is anyway
protected with a mutex.
"""
return self.restart_uplink_called
def dfa_uplink_restart(self, uplink_dict):
try:
with self.vdp_mgr_lock:
if not self.restart_uplink_called:
self._dfa_uplink_restart(uplink_dict)
except Exception as exc:
LOG.error(_LE("Exception in dfa_uplink_restart %s") % str(exc))
def _dfa_uplink_restart(self, uplink_dict):
LOG.info(_LI("Obtained uplink after restart %s "), uplink_dict)
# This shouldn't happen
if self.phy_uplink is not None:
LOG.error(_LE("Uplink detection already done %s"), self.phy_uplink)
return
uplink = uplink_dict.get('uplink')
veth_intf = uplink_dict.get('veth_intf')
# Logic is as follows:
# If DB didn't have any uplink it means it's not yet detected or down
# if DB has uplink and veth, then no need to scan all ports we can
# start with this veth.
# If uplink has been removed or modified during restart, then a
# down will be returned by uplink detection code and it will be
# removed then.
# If DB has uplink, but no veth, it's an error condition and in
# which case remove the uplink port from bridge and start fresh
if uplink is None or len(uplink) == 0:
LOG.info(_LI("uplink not discovered yet"))
self.restart_uplink_called = True
return
if veth_intf is not None and len(veth_intf) != 0:
LOG.info(_LI("veth interface is already added, %(ul)s %(veth)s"),
{'ul': uplink, 'veth': veth_intf})
self.phy_uplink = uplink
self.veth_intf = veth_intf
self.restart_uplink_called = True
return
LOG.info(_LI("Error case removing the uplink %s from bridge"), uplink)
ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, uplink)
self.restart_uplink_called = True

View File

@ -1,819 +0,0 @@
# Copyright 2015 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This file contains the implementation of OpenStack component of VDP.
VDP is a part of LLDP Agent Daemon (lldpad). For more information on VDP,
pls visit http://www.ieee802.org/1/pages/802.1bg.html
"""
import six
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.agent.vdp import (
lldpad_constants as vdp_const)
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as utils
from networking_cisco.apps.saf.common import utils as sys_utils
LOG = logging.getLogger(__name__)
# When timeout support becomes available in lldpad, this config will be
# enabled fixme(padkrish)
# OPTS = [
# cfg.IntOpt('lldp_timeout',
# default='0',
# help=_('Timeout in seconds for lldptool commands')),
# ]
def enable_lldp(self, port_name, is_ncb=True, is_nb=False):
"""Function to enable LLDP on the interface. """
if is_ncb:
self.run_lldptool(["-L", "-i", port_name, "-g", "ncb",
"adminStatus=rxtx"])
if is_nb:
self.run_lldptool(["-L", "-i", port_name, "-g", "nb",
"adminStatus=rxtx"])
class LldpadDriver(object):
"""LLDPad driver class. """
def __init__(self, port_name, phy_uplink, root_helper, is_ncb=True,
is_nb=False):
"""Initialize Routine.
param port_name: Port where LLDP/EVB needs to be cfgd
param phy_uplink: Physical Interface
param root_helper: utility to use when running shell cmds.
param is_ncb: Should LLDP be cfgd on Nearest Customer Bridge
param is_nb: Should LLDP be cfgd on Nearest Bridge
"""
# Re-enable this once support becomes available in lldpad.
# fixme(padkrish)
# cfg.CONF.register_opts(OPTS)
self.port_name = port_name
self.phy_uplink = phy_uplink
self.is_ncb = is_ncb
self.is_nb = is_nb
self.root_helper = root_helper
self.mutex_lock = sys_utils.lock()
self.read_vdp_cfg()
self.vdp_vif_map = {}
self.oui_vif_map = {}
self.enable_lldp()
sync_timeout_val = int(self.vdp_opts['vdp_sync_timeout'])
vdp_periodic_task = sys_utils.PeriodicTask(sync_timeout_val,
self._vdp_refrsh_hndlr)
self.vdp_periodic_task = vdp_periodic_task
vdp_periodic_task.run()
def clear_uplink(self):
self.phy_uplink = None
self.port_name = None
self.vdp_periodic_task.stop()
del self.vdp_vif_map
del self.oui_vif_map
def read_vdp_cfg(self):
self._cfg = config.CiscoDFAConfig().cfg
self.vdp_opts = dict()
self.vdp_opts['mgrid'] = self._cfg.vdp.mgrid2
self.vdp_opts['typeid'] = self._cfg.vdp.typeid
self.vdp_opts['typeidver'] = self._cfg.vdp.typeidver
self.vdp_opts['vsiidfrmt'] = self._cfg.vdp.vsiidfrmt
self.vdp_opts['hints'] = self._cfg.vdp.hints
self.vdp_opts['filter'] = self._cfg.vdp.filter
self.vdp_opts['vdp_sync_timeout'] = self._cfg.vdp.vdp_sync_timeout
def enable_lldp(self):
"""Function to enable LLDP on the interface. """
if self.is_ncb:
self.run_lldptool(["-L", "-i", self.port_name, "-g", "ncb",
"adminStatus=rxtx"])
if self.is_nb:
self.run_lldptool(["-L", "-i", self.port_name, "-g", "nb",
"adminStatus=rxtx"])
def enable_evb(self):
"""Function to enable EVB on the interface. """
if self.is_ncb:
self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb",
"-V", "evb", "enableTx=yes"])
ret = self.enable_gpid()
return ret
else:
LOG.error(_LE("EVB cannot be set on NB"))
return False
def enable_gpid(self):
"""Function to enable Group ID on the interface.
This is needed to use the MAC, GID, VID Filter.
"""
if self.is_ncb:
self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb",
"-V", "evb", "-c", "evbgpid=yes"])
return True
else:
LOG.error(_LE("GPID cannot be set on NB"))
return False
# fixme(padkrish)
def _vdp_refrsh_hndlr(self):
"""Periodic refresh of vNIC events to VDP.
VDP daemon itself has keepalives. This is needed on top of it
to keep Orchestrator like OpenStack, VDP daemon and the physical
switch in sync.
"""
LOG.debug("Refresh handler")
try:
if not self.vdp_vif_map:
LOG.debug("vdp_vif_map not created, returning")
return
vdp_vif_map = dict.copy(self.vdp_vif_map)
oui_vif_map = dict.copy(self.oui_vif_map)
for key in six.iterkeys(vdp_vif_map):
lvdp_dict = vdp_vif_map.get(key)
loui_dict = oui_vif_map.get(key)
if not lvdp_dict:
return
if not loui_dict:
oui_id = ""
oui_data = ""
else:
oui_id = loui_dict.get('oui_id')
oui_data = loui_dict.get('oui_data')
with self.mutex_lock:
if key in self.vdp_vif_map:
LOG.debug("Sending Refresh for VSI %s", lvdp_dict)
vdp_vlan, fail_reason = self.send_vdp_assoc(
vsiid=lvdp_dict.get('vsiid'),
mgrid=lvdp_dict.get('mgrid'),
typeid=lvdp_dict.get('typeid'),
typeid_ver=lvdp_dict.get('typeid_ver'),
vsiid_frmt=lvdp_dict.get('vsiid_frmt'),
filter_frmt=lvdp_dict.get('filter_frmt'),
gid=lvdp_dict.get('gid'),
mac=lvdp_dict.get('mac'),
vlan=0, oui_id=oui_id, oui_data=oui_data,
sw_resp=True)
# check validity.
if not utils.is_valid_vlan_tag(vdp_vlan):
LOG.error(_LE("Returned vlan %(vlan)s is invalid."),
{'vlan': vdp_vlan})
# Need to invoke CB. So no return here.
vdp_vlan = 0
exist_vdp_vlan = lvdp_dict.get('vdp_vlan')
exist_fail_reason = lvdp_dict.get('fail_reason')
callback_count = lvdp_dict.get('callback_count')
# Condition will be hit only during error cases when switch
# reloads or when compute reloads
if vdp_vlan != exist_vdp_vlan or (
fail_reason != exist_fail_reason or
callback_count > vdp_const.CALLBACK_THRESHOLD):
# Invoke the CB Function
cb_fn = lvdp_dict.get('vsw_cb_fn')
cb_data = lvdp_dict.get('vsw_cb_data')
if cb_fn:
cb_fn(cb_data, vdp_vlan, fail_reason)
lvdp_dict['vdp_vlan'] = vdp_vlan
lvdp_dict['fail_reason'] = fail_reason
lvdp_dict['callback_count'] = 0
else:
lvdp_dict['callback_count'] += 1
except Exception as e:
LOG.error(_LE("Exception in Refrsh %s"), str(e))
def run_lldptool(self, args):
"""Function for invoking the lldptool utility. """
full_args = ['lldptool'] + args
try:
utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def store_oui(self, port_uuid, oui_type, oui_data):
"""Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data
"""
self.oui_vif_map[port_uuid] = {'oui_id': oui_type,
'oui_data': oui_data}
def store_vdp_vsi(self, port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, reason):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason
"""
if port_uuid in self.vdp_vif_map:
LOG.debug("Not Storing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': mac, 'uuid': vsiid})
if new_network:
vdp_vlan = reply
else:
vdp_vlan = vlan
vdp_dict = {'vdp_vlan': vdp_vlan,
'mgrid': mgrid,
'typeid': typeid,
'typeid_ver': typeid_ver,
'vsiid_frmt': vsiid_frmt,
'vsiid': vsiid,
'filter_frmt': filter_frmt,
'mac': mac,
'gid': gid,
'vsw_cb_fn': vsw_cb_fn,
'vsw_cb_data': vsw_cb_data,
'fail_reason': reason,
'callback_count': 0}
self.vdp_vif_map[port_uuid] = vdp_dict
LOG.debug("Storing VDP VSI MAC %(mac)s UUID %(uuid)s VDP VLAN "
"%(vlan)s", {'mac': mac, 'uuid': vsiid, 'vlan': vdp_vlan})
if oui_id:
self.store_oui(port_uuid, oui_id, oui_data)
def clear_oui(self, port_uuid):
"""Clears the OUI specific info.
:param uuid: vNIC UUID
Currently only one OUI per VSI fixme(padkrish)
"""
if port_uuid in self.oui_vif_map:
del self.oui_vif_map[port_uuid]
else:
LOG.debug("OUI does not exist")
def clear_vdp_vsi(self, port_uuid):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
"""
try:
LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': self.vdp_vif_map[port_uuid].get('mac'),
'uuid': self.vdp_vif_map[port_uuid].get('vsiid')})
del self.vdp_vif_map[port_uuid]
except Exception:
LOG.error(_LE("VSI does not exist"))
self.clear_oui(port_uuid)
def gen_cisco_vdp_oui(self, oui_id, oui_data):
"""Cisco specific handler for constructing OUI arguments. """
oui_list = []
vm_name = oui_data.get('vm_name')
if vm_name is not None:
oui_str = "oui=%s," % oui_id
oui_name_str = oui_str + "vm_name=" + vm_name
oui_list.append(oui_name_str)
ip_addr = oui_data.get('ip_addr')
if ip_addr is not None:
oui_str = "oui=%s," % oui_id
ip_addr_str = oui_str + "ipv4_addr=" + ip_addr
oui_list.append(ip_addr_str)
vm_uuid = oui_data.get('vm_uuid')
if vm_uuid is not None:
oui_str = "oui=%s," % oui_id
vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid
oui_list.append(vm_uuid_str)
return oui_list
def gen_oui_str(self, oui_list):
"""Generate the OUI string for vdptool. """
oui_str = []
for oui in oui_list:
oui_str.append('-c')
oui_str.append(oui)
return oui_str
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:return vdp_keyword_str: Dictionary of VDP arguments and values
"""
vdp_keyword_str = {}
if mgrid is None:
mgrid = self.vdp_opts.get('mgrid')
mgrid_str = "mgrid2=%s" % mgrid
if typeid is None:
typeid = self.vdp_opts.get('typeid')
typeid_str = "typeid=%s" % typeid
if typeid_ver is None:
typeid_ver = self.vdp_opts.get('typeidver')
typeid_ver_str = "typeidver=%s" % typeid_ver
if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')):
vsiid_str = "uuid=%s" % vsiid
else:
# Only format supported for now
LOG.error(_LE("Unsupported VSIID Format1"))
return vdp_keyword_str
if vlan == constants.INVALID_VLAN:
vlan = 0
if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID:
if not mac or gid == 0:
LOG.error(_LE("Incorrect Filter Format Specified"))
return vdp_keyword_str
else:
f = "filter=%s-%s-%s"
filter_str = f % (vlan, mac, gid)
elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID:
if gid == 0:
LOG.error(_LE("NULL GID Specified"))
return vdp_keyword_str
else:
filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid
elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID:
if not mac:
LOG.error(_LE("NULL MAC Specified"))
return vdp_keyword_str
else:
filter_str = "filter=" + '%d' % vlan + "-" + mac
elif int(filter_frmt) == vdp_const.VDP_FILTER_VID:
filter_str = "filter=" + '%d' % vlan
else:
LOG.error(_LE("Incorrect Filter Format Specified"))
return vdp_keyword_str
oui_list = []
if oui_id is not None and oui_data is not None:
if oui_id is 'cisco':
oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data)
mode_str = "mode=" + mode
vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str,
typeid=typeid_str, typeid_ver=typeid_ver_str,
vsiid=vsiid_str, filter=filter_str,
oui_list=oui_list)
return vdp_keyword_str
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error(_LE("EVB cannot be set on NB"))
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
None, None)
if len(vdp_key_str) == 0:
LOG.error(_LE("NULL List"))
return
reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode,
"-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'],
"-c", vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'],
"-c", vdp_key_str['vsiid']])
return reply
def send_vdp_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data,
sw_resp):
"""Constructs and Sends the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error(_LE("EVB cannot be set on NB"))
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
oui_id, oui_data)
if len(vdp_key_str) == 0:
LOG.error(_LE("NULL List"))
return
oui_cmd_str = self.gen_oui_str(vdp_key_str['oui_list'])
if sw_resp:
# If filter is not VID and if VLAN is 0, Query for the TLV first,
# if found VDP will return the VLAN. Add support for this once
# vdptool has the support for querying exact VSI filters
# fixme(padkrish)
reply = self.run_vdptool(["-T", "-i", self.port_name, "-W",
"-V", mode, "-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'], "-c",
vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'], "-c",
vdp_key_str['vsiid'], "-c",
"hints=none", "-c",
vdp_key_str['filter']],
oui_args=oui_cmd_str)
else:
reply = self.run_vdptool(["-T", "-i", self.port_name,
"-V", mode, "-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'], "-c",
vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'], "-c",
vdp_key_str['vsiid'], "-c",
"hints=none", "-c",
vdp_key_str['filter']],
oui_args=oui_cmd_str)
return reply
def crosscheck_reply_vsiid_mac(self, reply, vsiid, mac):
"""Cross Check the reply against the input vsiid,mac for associate. """
vsiid_reply = reply.partition("uuid = ")[2].split()[0]
if vsiid != vsiid_reply:
fail_reason = vdp_const.vsi_mismatch_failure_reason % (
vsiid, vsiid_reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
mac_reply = reply.partition("filter = ")[2].split('-')[1]
if mac != mac_reply:
fail_reason = vdp_const.mac_mismatch_failure_reason % (
mac, mac_reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
return True, None
def crosscheck_query_vsiid_mac(self, reply, vsiid, mac):
"""Cross Check the reply against the input vsiid,mac for get query. """
vsiid_reply = reply.partition("uuid")[2].split()[0][4:]
if vsiid != vsiid_reply:
fail_reason = vdp_const.vsi_mismatch_failure_reason % (
vsiid, vsiid_reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
mac_reply = reply.partition("filter")[2].split('-')[1]
if mac != mac_reply:
fail_reason = vdp_const.mac_mismatch_failure_reason % (
mac, mac_reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
return True, None
def get_vdp_failure_reason(self, reply):
"""Parse the failure reason from VDP. """
try:
fail_reason = reply.partition(
"filter")[0].replace('\t', '').split('\n')[-2]
if len(fail_reason) == 0:
fail_reason = vdp_const.retrieve_failure_reason % (reply)
except Exception:
fail_reason = vdp_const.retrieve_failure_reason % (reply)
return fail_reason
def check_filter_validity(self, reply, filter_str):
"""Check for the validify of the filter. """
try:
f_ind = reply.index(filter_str)
l_ind = reply.rindex(filter_str)
except Exception:
fail_reason = vdp_const.filter_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
if f_ind != l_ind:
# Currently not supported if reply contains a filter keyword
fail_reason = vdp_const.multiple_filter_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
return True, None
def get_vlan_from_associate_reply(self, reply, vsiid, mac):
"""Parse the associate reply from VDP daemon to get the VLAN value. """
try:
verify_flag, fail_reason = self.crosscheck_reply_vsiid_mac(
reply, vsiid, mac)
if not verify_flag:
return constants.INVALID_VLAN, fail_reason
mode_str = reply.partition("mode = ")[2].split()[0]
if mode_str != "assoc":
fail_reason = self.get_vdp_failure_reason(reply)
return constants.INVALID_VLAN, fail_reason
except Exception:
fail_reason = vdp_const.mode_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return constants.INVALID_VLAN, fail_reason
check_filter, fail_reason = self.check_filter_validity(
reply, "filter = ")
if not check_filter:
return constants.INVALID_VLAN, fail_reason
try:
vlan_val = reply.partition("filter = ")[2].split('-')[0]
vlan = int(vlan_val)
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return constants.INVALID_VLAN, fail_reason
return vlan, None
def check_hints(self, reply):
"""Parse the hints to check for errors. """
try:
f_ind = reply.index("hints")
l_ind = reply.rindex("hints")
except Exception:
fail_reason = vdp_const.hints_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
if f_ind != l_ind:
# Currently not supported if reply contains a filter keyword
fail_reason = vdp_const.multiple_hints_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
try:
hints_compl = reply.partition("hints")[2]
hints_val = reply.partition("hints")[2][0:4]
len_hints = int(hints_val)
hints_val = hints_compl[4:4 + len_hints]
hints = int(hints_val)
if hints != 0:
fail_reason = vdp_const.nonzero_hints_failure % (hints)
return False, fail_reason
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return False, fail_reason
return True, None
def get_vlan_from_query_reply(self, reply, vsiid, mac):
"""Parse the query reply from VDP daemon to get the VLAN value. """
hints_ret, fail_reason = self.check_hints(reply)
if not hints_ret:
LOG.error(_LE("Incorrect hints found %s"), reply)
return constants.INVALID_VLAN, fail_reason
check_filter, fail_reason = self.check_filter_validity(reply, "filter")
if not check_filter:
return constants.INVALID_VLAN, fail_reason
try:
verify_flag, fail_reason = self.crosscheck_query_vsiid_mac(
reply, vsiid, mac)
if not verify_flag:
return constants.INVALID_VLAN, fail_reason
filter_val = reply.partition("filter")[2]
len_fil = len(filter_val)
vlan_val = filter_val[4:len_fil].split('-')[0]
vlan = int(vlan_val)
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error(_LE("%s"), fail_reason)
return constants.INVALID_VLAN, fail_reason
return vlan, None
def send_vdp_assoc(self, vsiid=None, mgrid=None, typeid=None,
typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0,
mac="", vlan=0, oui_id="", oui_data="", sw_resp=False):
"""Sends the VDP Associate Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return vlan: VLAN value returned by vdptool which in turn is given
: by Switch
"""
if sw_resp and filter_frmt == vdp_const.VDP_FILTER_GIDMACVID:
reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt,
gid, mac, vlan, oui_id, oui_data)
vlan_resp, fail_reason = self.get_vlan_from_query_reply(
reply, vsiid, mac)
if vlan_resp != constants.INVALID_VLAN:
return vlan_resp, fail_reason
reply = self.send_vdp_msg("assoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac,
vlan, oui_id, oui_data, sw_resp)
if sw_resp:
vlan, fail_reason = self.get_vlan_from_associate_reply(
reply, vsiid, mac)
return vlan, fail_reason
return None, None
def send_vdp_deassoc(self, vsiid=None, mgrid=None, typeid=None,
typeid_ver=None,
vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0,
mac="", vlan=0, oui_id="", oui_data="",
sw_resp=False):
"""Sends the VDP Dis-Associate Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section
for more detailed information
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
"""
if filter_frmt == vdp_const.VDP_FILTER_GIDMACVID:
reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt,
gid, mac, vlan, oui_id, oui_data)
vlan_resp, fail_reason = self.get_vlan_from_query_reply(
reply, vsiid, mac)
# This is to cover cases where the enabler has a different VLAN
# than LLDPAD. deassoc won't go through if wrong VLAN is passed.
# Since enabler does not have right VLAN, most likely flows are not
# programmed. Otherwise, there will be stale flows. No way of
# knowing unless all flows are read and compared.
if vlan_resp != constants.INVALID_VLAN:
if vlan != vlan_resp:
LOG.info(_LI("vlan_resp %(resp)s different from passed "
"VLAN %(vlan)s"), {'resp': vlan_resp,
'vlan': vlan})
vlan = vlan_resp
self.send_vdp_msg("deassoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
oui_id, oui_data, sw_resp)
def send_vdp_vnic_up(self, port_uuid=None, vsiid=None,
mgrid=None, typeid=None, typeid_ver=None,
vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID,
gid=0, mac="", vlan=0, oui=None,
new_network=False, vsw_cb_fn=None, vsw_cb_data=None):
"""Interface function to apps, called for a vNIC UP.
This currently sends an VDP associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: VLAN reply from vdptool
"""
if oui is None:
oui = {}
oui_id = None
oui_data = None
if 'oui_id' in oui:
oui_id = oui['oui_id']
oui_data = oui
reply, fail_reason = self.send_vdp_assoc(
vsiid=vsiid, mgrid=mgrid, typeid=typeid, typeid_ver=typeid_ver,
vsiid_frmt=vsiid_frmt, filter_frmt=filter_frmt, gid=gid, mac=mac,
vlan=vlan, oui_id=oui_id, oui_data=oui_data, sw_resp=new_network)
self.store_vdp_vsi(port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, fail_reason)
return reply, fail_reason
def send_vdp_vnic_down(self, port_uuid=None, vsiid=None, mgrid=None,
typeid=None, typeid_ver=None,
vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID,
gid=0, mac="", vlan=0, oui=""):
"""Interface function to apps, called for a vNIC DOWN.
This currently sends an VDP dis-associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
"""
# Correct non-zero VLAN needs to be specified
try:
with self.mutex_lock:
self.send_vdp_deassoc(vsiid=vsiid, mgrid=mgrid, typeid=typeid,
typeid_ver=typeid_ver,
vsiid_frmt=vsiid_frmt,
filter_frmt=filter_frmt, gid=gid,
mac=mac, vlan=vlan)
self.clear_vdp_vsi(port_uuid)
except Exception as e:
LOG.error(_LE("VNIC Down exception %s"), e)
def run_vdptool(self, args, oui_args=None):
"""Function that runs the vdptool utility. """
if oui_args is None:
oui_args = []
full_args = ['vdptool'] + args + oui_args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})

View File

@ -1,44 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service VDP 2.2 constants."""
MINIMUM_VDP22_VERSION = "0.9.45"
VDP_MGRID = 0
VDP_TYPEID = 0
VDP_TYPEID_VER = 0
VDP_VSIFRMT_UUID = 5
VDP_FILTER_VID = 1
VDP_FILTER_MACVID = 2
VDP_FILTER_GIDVID = 3
VDP_FILTER_GIDMACVID = 4
VDP_SYNC_TIMEOUT = 15
CALLBACK_THRESHOLD = 5
verify_failure_reason = "vsi_id mismatch, queried %s, returned %s"
retrieve_failure_reason = "Unable to retrieve failure, reply %s"
mode_failure_reason = "Incorrect Reply,no mode information found: %s"
filter_failure_reason = "Incorrect Reply,no filter information found: %s"
multiple_filter_failure_reason = \
"Incorrect Reply,multiple filter information found: %s"
format_failure_reason = "Reply not formatted correctly: %s"
hints_failure_reason = "Incorrect Reply,no hints information found: %s"
multiple_hints_failure_reason = \
"Incorrect Reply,multiple hints information found: %s"
nonzero_hints_failure = "Non-zero hints, value %d"
vsi_mismatch_failure_reason = "VSIID Reply mis-match req vsi %s reply vsi %s"
mac_mismatch_failure_reason = \
"VSIID MAC Reply mis-match req mac %s reply mac %s"

View File

@ -1,780 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This file contains the mixin class implementation of OVS extensions for VDP.
VDP is a part of LLDP Agent Daemon (lldpad). For more information on VDP,
pls visit http://www.ieee802.org/1/pages/802.1bg.html
"""
import six
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.agent.vdp import lldpad
from networking_cisco.apps.saf.agent.vdp import vdp_constants as constants
from networking_cisco.apps.saf.common import constants as cconstants
from networking_cisco.apps.saf.common import dfa_exceptions as dfae
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import dfa_sys_lib as ovs_lib
from networking_cisco.apps.saf.common import utils as sys_utils
from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
def is_uplink_already_added(root_helper, br_ex, port_name):
br, port_exist = ovs_lib.port_exists_glob(root_helper, port_name)
if not port_exist:
return False
else:
if br == br_ex:
return True
else:
LOG.error(_LE("Port %(port)s added to wrong bridge %(br)s "
"Given %(br_ex)s"), {'port': port_name, 'br': br,
'br_ex': br_ex})
return False
def delete_uplink_and_flows(root_helper, br_ex, port_name):
glob_delete_vdp_flows(br_ex, root_helper)
port_exist = is_uplink_already_added(root_helper, br_ex, port_name)
if port_exist:
ovs_lib.delete_port_glob(root_helper, br_ex, port_name)
lldp_ovs_veth_str = constants.LLDPAD_OVS_VETH_PORT + port_name
ovs_lib.delete_port_glob(root_helper, br_ex, lldp_ovs_veth_str)
def glob_delete_vdp_flows(br_ex, root_helper):
br = ovs_lib.OVSBridge(br_ex, root_helper=root_helper)
br.delete_flows(dl_dst=constants.NCB_DMAC, dl_type=constants.LLDP_ETYPE)
br.delete_flows(dl_dst=constants.NCB_DMAC,
dl_type=constants.VDP22_ETYPE)
def is_bridge_present(br, root_helper):
ovs_bridges = ovs_lib.get_bridges(root_helper)
if br in ovs_bridges:
return True
else:
return False
class LocalVlan(object):
def __init__(self, vlan, segmentation_id):
self.vlan = vlan
self.segmentation_id = segmentation_id
self.late_binding_vlan = 0
self.lvid = cconstants.INVALID_VLAN
self.port_uuid_list = {}
# Generally for the same network, there will be only one VDP VLAN
# However, Inconsistencies can arise. This dictionary is to keep track
# of all VLAN's floating around for different vNIC's of the same
# network.
self.port_vdp_vlan_dict = {}
def set_port_uuid(self, port_uuid, vdp_vlan, fail_reason):
if port_uuid not in self.port_uuid_list:
port_vlan_set = [port_uuid, vdp_vlan, fail_reason]
self.port_uuid_list[port_uuid] = port_vlan_set
self.set_port_vlan(vdp_vlan)
def set_portid_fail_reason(self, port_id, fail_reason):
if port_id not in self.port_uuid_list:
LOG.error(_LE("Unable to set fail_reason, port_uuid %s not "
"created"), port_id)
return
self.port_uuid_list[port_id][2] = fail_reason
def get_portid_fail_reason(self, port_id):
if port_id not in self.port_uuid_list:
return None
return self.port_uuid_list[port_id][2]
def get_portid_vlan(self, port_id):
if port_id not in self.port_uuid_list:
return cconstants.INVALID_VLAN
return self.port_uuid_list[port_id][1]
def set_portid_vlan(self, port_id, new_vlan):
self.port_uuid_list[port_id][1] = new_vlan
def set_port_vlan(self, vdp_vlan):
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.info(_LI("Passed Invalid vlan in set_port_vlan"))
return
if vdp_vlan not in self.port_vdp_vlan_dict:
self.port_vdp_vlan_dict[vdp_vlan] = 0
self.port_vdp_vlan_dict[vdp_vlan] += 1
def reset_port_vlan(self, vdp_vlan):
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.info(_LI("Passed Invalid vlan in reset_port_vlan"))
return
if vdp_vlan not in self.port_vdp_vlan_dict:
LOG.error(_LE("wrongly called, no VLAN's present"))
self.port_vdp_vlan_dict[vdp_vlan] = 0
else:
self.port_vdp_vlan_dict[vdp_vlan] -= 1
if not self.port_vdp_vlan_dict[vdp_vlan]:
del self.port_vdp_vlan_dict[vdp_vlan]
def decr_reset_vlan(self, port_uuid, new_vlan):
vlan = self.get_portid_vlan(port_uuid)
self.reset_port_vlan(vlan)
self.set_portid_vlan(port_uuid, new_vlan)
self.set_port_vlan(new_vlan)
def set_fail_reason(self, port_uuid, fail_reason):
self.set_portid_fail_reason(port_uuid, fail_reason)
def any_valid_vlan(self):
return len(self.port_vdp_vlan_dict) != 0
def any_consistent_vlan(self):
if len(self.port_vdp_vlan_dict) != 1:
return False
for vlan in six.iterkeys(self.port_vdp_vlan_dict):
return vlan
return self.port_vdp_vlan_dict.keys()[0]
class OVSNeutronVdp(object):
"""Implements the VDP specific changes in OVS.
Creating the veth pairs, programming the flows for VDP, deleting the VDP
specific flows, communicating with VDP (lldpad) daemon using lldpad class
are some of the functionality provided by this class.
"""
def __init__(self, uplink, integ_br, ext_br, root_helper,
vdp_vlan_cb, vdp_mode=constants.VDP_SEGMENT_MODE):
# self.root_helper = 'sudo'
self.root_helper = root_helper
self.uplink = uplink
self.integ_br = integ_br
self.ext_br = ext_br
self.vdp_mode = vdp_mode
self.local_vlan_map = {}
self.lldpad_info = {}
self.lldp_local_veth_port = None
self.lldp_ovs_veth_port = None
self.ovs_vdp_lock = sys_utils.lock()
self.phy_peer_port_num = cconstants.INVALID_OFPORT
self.int_peer_port_num = cconstants.INVALID_OFPORT
self.int_peer_port = None
self.phy_peer_port = None
self.ext_br_obj = None
self.integ_br_obj = None
self.vdp_vlan_cb = vdp_vlan_cb
self.uplink_fail_reason = ""
self.setup_lldpad = self.setup_lldpad_ports()
if not self.setup_lldpad:
return
flow_check_periodic_task = sys_utils.PeriodicTask(
cconstants.FLOW_CHECK_INTERVAL, self._flow_check_handler)
self.flow_check_periodic_task = flow_check_periodic_task
flow_check_periodic_task.run()
def is_lldpad_setup_done(self):
return self.setup_lldpad
def _check_bridge_flow(self, flow, out_vlan, in_vlan):
out_vlan_flow_str = 'dl_vlan=' + str(out_vlan)
in_vlan_flow_str = 'actions=mod_vlan_vid:' + str(in_vlan)
flow_str = out_vlan_flow_str + ' ' + in_vlan_flow_str
flow_partition = flow.partition(flow_str)[1]
return len(flow_partition) != 0
if not len(flow_partition):
return False
return True
def _flow_check_handler_internal(self):
"""Periodic handler to check if installed flows are present.
This handler runs periodically to check if installed flows are present.
This function cannot detect and delete the stale flows, if present.
It requires more complexity to delete stale flows. Generally, stale
flows are not present. So, that logic is not put here.
"""
integ_flow = self.integ_br_obj.dump_flows_for(
in_port=self.int_peer_port_num)
ext_flow = self.ext_br_obj.dump_flows_for(
in_port=self.phy_peer_port_num)
for net_uuid, lvm in six.iteritems(self.local_vlan_map):
vdp_vlan = lvm.any_consistent_vlan()
flow_required = False
if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)):
return
if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid):
LOG.error(_LE("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on Integ bridge"),
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan):
LOG.error(_LE("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on External bridge"),
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if flow_required:
LOG.info(_LI("Programming flows for lvid %(lvid)s vdp vlan"
" %(vdp)s"),
{'lvid': lvm.lvid, 'vdp': vdp_vlan})
self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
def _flow_check_handler(self):
"""Top level routine to check OVS flow consistency. """
LOG.info(_LI("In _flow_check_handler"))
try:
with self.ovs_vdp_lock:
self._flow_check_handler_internal()
except Exception as e:
LOG.error(_LE("Exception in _flow_check_handler_internal %s"),
str(e))
def program_vdp_flows(self, lldp_ovs_portnum, phy_port_num):
br = self.ext_br_obj
high_prio = constants.VDP_FLOW_PRIO
br.add_flow(priority=high_prio, in_port=lldp_ovs_portnum,
dl_dst=constants.NCB_DMAC, dl_type=constants.LLDP_ETYPE,
actions="output:%s" % phy_port_num)
br.add_flow(priority=high_prio, in_port=phy_port_num,
dl_dst=constants.NCB_DMAC, dl_type=constants.LLDP_ETYPE,
actions="output:%s" % lldp_ovs_portnum)
br.add_flow(priority=high_prio, in_port=lldp_ovs_portnum,
dl_dst=constants.NCB_DMAC, dl_type=constants.VDP22_ETYPE,
actions="output:%s" % phy_port_num)
br.add_flow(priority=high_prio, in_port=phy_port_num,
dl_dst=constants.NCB_DMAC, dl_type=constants.VDP22_ETYPE,
actions="output:%s" % lldp_ovs_portnum)
def delete_vdp_flows(self):
br = self.ext_br_obj
br.delete_flows(dl_dst=constants.NCB_DMAC,
dl_type=constants.LLDP_ETYPE)
br.delete_flows(dl_dst=constants.NCB_DMAC,
dl_type=constants.VDP22_ETYPE)
def clear_obj_params(self):
LOG.debug("Clearing Uplink Params")
self.flow_check_periodic_task.stop()
# How is the IP link/veth going to be removed?? fixme(padkrish)
# IF the veth is removed, no need to unconfigure lldp/evb
self.delete_vdp_flows()
lldp_ovs_veth_str = constants.LLDPAD_OVS_VETH_PORT + self.uplink
br = self.ext_br_obj
br.delete_port(lldp_ovs_veth_str)
br.delete_port(self.uplink)
self.lldpad_info.clear_uplink()
del self.lldpad_info
# It's ok if the veth remains even if the uplink changes, worst case
# the number of veth's will be the number of physical server ports.
# It's not a common occurrence for uplink to change, even if so
# the unused veth can be removed manually.
# Reason for not removing it is the same as given in function below.
# ip_lib.IPDevice(lldp_ovs_veth_str, self.root_helper).link.delete()
def program_vm_ovs_flows(self, lvid, old_vlan, new_vlan):
if old_vlan:
# outbound
self.ext_br_obj.delete_flows(in_port=self.phy_peer_port_num,
dl_vlan=lvid)
# inbound
self.integ_br_obj.delete_flows(in_port=self.int_peer_port_num,
dl_vlan=old_vlan)
if new_vlan:
# outbound
self.ext_br_obj.add_flow(priority=4,
in_port=self.phy_peer_port_num,
dl_vlan=lvid,
actions="mod_vlan_vid:%s,normal" %
new_vlan)
# inbound
self.integ_br_obj.add_flow(priority=3,
in_port=self.int_peer_port_num,
dl_vlan=new_vlan,
actions="mod_vlan_vid:%s,normal" % lvid)
def gen_veth_str(self, const_str, intf_str):
"""Generate a veth string.
Concatenates the constant string with remaining available length
of interface string from trailing position.
"""
avl_len = constants.MAX_VETH_NAME - len(const_str)
if avl_len <= 0:
LOG.error(_LE("veth string name too short"))
raise dfae.DfaAgentFailed(reason="Veth Unavailable")
start_pos = len(intf_str) - avl_len
veth_str = const_str + intf_str[start_pos:]
return veth_str
def setup_lldpad_ports(self):
"""Setup the flows for passing LLDP/VDP frames in OVS. """
# Creating the physical bridge and setting up patch ports is done by
# OpenStack
ovs_bridges = ovs_lib.get_bridges(self.root_helper)
if self.ext_br not in ovs_bridges or self.integ_br not in ovs_bridges:
self.uplink_fail_reason = cconstants.bridge_not_cfgd_reason % (
ovs_bridges, self.integ_br, self.ext_br)
LOG.error(_LE("%s"), self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
br = ovs_lib.OVSBridge(self.ext_br, root_helper=self.root_helper)
self.ext_br_obj = br
int_br = ovs_lib.OVSBridge(self.integ_br, root_helper=self.root_helper)
self.integ_br_obj = int_br
self.phy_peer_port, self.int_peer_port = self.find_interconnect_ports()
if self.phy_peer_port is None or self.int_peer_port is None:
self.uplink_fail_reason = cconstants.veth_not_cfgd_reason % (
self.phy_peer_port, self.int_peer_port)
LOG.error(_LE("%s"), self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
lldp_ovs_veth_str = constants.LLDPAD_OVS_VETH_PORT + self.uplink
if len(lldp_ovs_veth_str) > constants.MAX_VETH_NAME:
lldp_ovs_veth_str = self.gen_veth_str(
constants.LLDPAD_OVS_VETH_PORT,
self.uplink)
lldp_loc_veth_str = constants.LLDPAD_LOC_VETH_PORT + self.uplink
if len(lldp_loc_veth_str) > constants.MAX_VETH_NAME:
lldp_loc_veth_str = self.gen_veth_str(
constants.LLDPAD_LOC_VETH_PORT,
self.uplink)
ip_wrapper = ip_lib.IPWrapper()
self.delete_vdp_flows()
br.delete_port(lldp_ovs_veth_str)
if ip_lib.device_exists(lldp_ovs_veth_str):
# What about OVS restart cases fixme(padkrish)
# IMPORTANT.. The link delete should be done only for non-restart
# cases. Otherwise, The MAC address of the veth interface changes
# for every delete/create. So, if lldpad has the association sent
# already, retriggering it will make the ASSOC appear as coming
# from another station and more than one VSI instance will appear
# at the Leaf. Deleting the assoc and creating the assoc for new
# veth is not optimal. fixme(padkrish)
# ip_lib.IPDevice(lldp_ovs_veth_str,self.root_helper).link.delete()
lldp_loc_veth = ip_wrapper.device(lldp_loc_veth_str)
lldp_ovs_veth = ip_wrapper.device(lldp_ovs_veth_str)
else:
# fixme(padkrish) Due to above reason, do the vethcreate below only
# if it doesn't exist and not deleted.
lldp_loc_veth, lldp_ovs_veth = (
ip_wrapper.add_veth(lldp_loc_veth_str,
lldp_ovs_veth_str))
if not br.port_exists(self.uplink):
phy_port_num = br.add_port(self.uplink)
else:
phy_port_num = br.get_port_ofport(self.uplink)
if phy_port_num == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.invalid_uplink_ofport_reason
LOG.error(_LE("%s"), self.uplink_fail_reason)
return False
if not br.port_exists(lldp_ovs_veth_str):
lldp_ovs_portnum = br.add_port(lldp_ovs_veth)
else:
lldp_ovs_portnum = br.get_port_ofport(lldp_ovs_veth)
if lldp_ovs_portnum == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.lldp_ofport_not_detect_reason
LOG.error(_LE("%s"), self.uplink_fail_reason)
return False
lldp_loc_veth.link.set_up()
lldp_ovs_veth.link.set_up()
# What about OVS restart cases fixme(padkrish)
self.program_vdp_flows(lldp_ovs_portnum, phy_port_num)
self.phy_peer_port_num = br.get_port_ofport(self.phy_peer_port)
self.int_peer_port_num = int_br.get_port_ofport(self.int_peer_port)
if (self.phy_peer_port_num == cconstants.INVALID_OFPORT or
self.int_peer_port_num == cconstants.INVALID_OFPORT):
self.uplink_fail_reason = cconstants.invalid_peer_ofport_reason % (
self.phy_peer_port_num, self.int_peer_port_num)
LOG.error(_LE("%s"), self.uplink_fail_reason)
return False
self.lldpad_info = (lldpad.LldpadDriver(lldp_loc_veth_str, self.uplink,
self.root_helper))
ret = self.lldpad_info.enable_evb()
if not ret:
self.uplink_fail_reason = cconstants.evb_cfg_fail_reason
LOG.error(_LE("%s"), self.uplink_fail_reason)
return False
self.lldp_local_veth_port = lldp_loc_veth_str
self.lldp_ovs_veth_port = lldp_ovs_veth_str
LOG.info(_LI("Setting up lldpad ports complete"))
return True
def get_uplink_fail_reason(self):
return self.uplink_fail_reason
def get_lldp_local_bridge_port(self):
return self.lldp_local_veth_port
def get_lldp_ovs_bridge_port(self):
return self.lldp_ovs_veth_port
def find_interconnect_ports(self):
"""Find the internal veth or patch ports. """
phy_port_list = self.ext_br_obj.get_port_name_list()
int_port_list = self.integ_br_obj.get_port_name_list()
for port in phy_port_list:
# Use get Interface xxx type
is_patch = ovs_lib.is_patch(self.root_helper, port)
if is_patch:
# Get the peer for this patch
peer_port = ovs_lib.get_peer(self.root_helper, port)
if peer_port in int_port_list:
return port, peer_port
# A solution is needed for veth pairs also, fixme(padkrish)
# ip_wrapper.get_devices() returns all the devices
# Pick the ones whose type is veth (?) and get the other pair
# Combination of "ethtool -S xxx" command and "ip tool" command.
return None, None
def port_down_segment_mode(self, lldpad_port, port_uuid, mac,
net_uuid, segmentation_id, oui):
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
fail_reason = "Local VLAN Map not available in port_down"
LOG.error(_LE("%s"), fail_reason)
return {'result': False, 'fail_reason': fail_reason}
if port_uuid not in lvm.port_uuid_list:
fail_reason = "port_uuid %s not in cache for port_down" % (
port_uuid)
LOG.error(_LE("%s"), fail_reason)
return {'result': False, 'fail_reason': fail_reason}
vdp_vlan = lvm.late_binding_vlan
lldpad_port.send_vdp_vnic_down(port_uuid=port_uuid,
vsiid=port_uuid,
gid=segmentation_id,
mac=mac, vlan=vdp_vlan, oui=oui)
lvm.port_uuid_list.pop(port_uuid, None)
lvm.reset_port_vlan(vdp_vlan)
# Check here that if all the VM's in that network has
# 0 as VLAN (dis-assoc sent by switch, but flow not removed), then
# also remove the flow by calling unprovision. Do this after the
# pop instruction above.
# Check with the late binding vlan, if that still points to
# old_vlan, remove the flow and make late_binding_vlan as 0
# late_binding_vlan should reflect the VLAN that is installed
# for the flow.
if not lvm.port_uuid_list:
self.unprovision_vdp_overlay_networks(net_uuid, lvm.lvid,
vdp_vlan, oui)
self.local_vlan_map.pop(net_uuid)
LOG.info(_LI("No valid ports, clearing flows"))
else:
# There are ports present in the network case.
if not lvm.any_valid_vlan():
# This condition will be hit when there are still ports
# remaining in the network, but none of them have a valid
# VLAN. i.e. Dis-assoc sent by switch for all ports except
# one, vdp_vlan_change will not remove flows, since there's
# a valid port left with a VLAN. Now, user removes the VM
# with valid port. Now flow has to be deleted since
# there's no valid port with a VLAN.
self.unprovision_vdp_overlay_networks(net_uuid, lvm.lvid,
vdp_vlan, oui)
lvm.late_binding_vlan = 0
LOG.info(_LI("unprovisioned Local %(lvid)s, VDP %(vdp)s VLAN "
"since no VM has valid VLAN"),
{'lvid': lvm.lvid, 'vdp': vdp_vlan})
else:
# There are still valid VLAN's present.
# Say, out of 3 VM's one VM got a VLAN change due to which
# the new flow will be programmed according to new vlan.
# The VM with new VLAN gets deleted.
# Say, for whatever reason, the other VM's in the 'same'
# network didn't gets changed to new VLAN.
# VLAN change function won't be called and so it will
# be stranded with stale flow unless the below
# functionality of putting back the old VLAN is there.
vlan_other = lvm.any_consistent_vlan()
if vlan_other and ovs_lib.is_valid_vlan_tag(vlan_other) and (
vlan_other != lvm.late_binding_vlan):
self.program_vm_ovs_flows(lvm.lvid, vdp_vlan, vlan_other)
lvm.late_binding_vlan = vlan_other
self.vdp_nego_req = True
LOG.info(_LI("Reprogrammed old Flows Local %(lvid)s "
"VDP %(vdp)s Other %(other)s VLANs"),
{'lvid': lvm.lvid, 'vdp': vdp_vlan,
'other': vlan_other})
return {'result': True, 'fail_reason': None}
def port_up_segment_mode(self, lldpad_port, port_name, port_uuid, mac,
net_uuid, segmentation_id, oui):
lvm = self.local_vlan_map.get(net_uuid)
if lvm and lvm.late_binding_vlan and (not lvm.vdp_nego_req):
vdp_vlan = lvm.late_binding_vlan
ovs_cb_data = {'obj': self, 'mac': mac,
'port_uuid': port_uuid, 'net_uuid': net_uuid}
vlan, fail_reason = lldpad_port.send_vdp_vnic_up(
port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id,
mac=mac, vlan=vdp_vlan, oui=oui,
vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data)
lvm.set_port_uuid(port_uuid, vdp_vlan, fail_reason)
return {'result': True, 'fail_reason': fail_reason}
else:
int_br = self.integ_br_obj
lvid = int_br.get_port_vlan_tag(port_name)
if lvid != cconstants.INVALID_VLAN:
provision_reply = self.provision_vdp_overlay_networks(
port_uuid, mac, net_uuid, segmentation_id, lvid, oui)
vdp_vlan = provision_reply.get('vdp_vlan')
if not lvm:
lvm = LocalVlan(lvid, segmentation_id)
self.local_vlan_map[net_uuid] = lvm
lvm.lvid = lvid
# This is just populating the list of ports in a network.
# The vdp_vlan that's a part of port_list is just for debugging
# So, it's ok to populate the port UUID list even if VDP VLAN
# is invalid.
lvm.set_port_uuid(port_uuid, vdp_vlan,
provision_reply.get('fail_reason'))
if vdp_vlan != cconstants.INVALID_VLAN:
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False
else:
LOG.error(_LE("Cannot provision VDP overlay"))
return {'result': provision_reply.get('result'),
'fail_reason': provision_reply.get('fail_reason')}
else:
fail_reason = "Invalid OVS VLAN for port %s" % (port_name)
LOG.error(_LE("%s"), fail_reason)
return {'result': False,
'fail_reason': fail_reason}
def send_vdp_port_event_internal(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui):
"""Send vNIC UP/Down event to VDP.
:param port_uuid: a ovslib.VifPort object.
:mac: MAC address of the VNIC
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
:oui: OUI Parameters
"""
lldpad_port = self.lldpad_info
if not lldpad_port:
fail_reason = "There is no LLDPad port available."
LOG.error(_LE("%s"), fail_reason)
return {'result': False, 'fail_reason': fail_reason}
if status == 'up':
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
port_name = self.ext_br_obj.get_ofport_name(port_uuid)
if port_name is None:
fail_reason = "Unknown portname for uuid %s" % (port_uuid)
LOG.error(_LE("%s"), fail_reason)
return {'result': False, 'fail_reason': fail_reason}
LOG.info("Status up: portname for uuid %(uuid)s is %(port)s",
{'uuid': port_uuid, 'port': port_name})
ret = self.port_up_segment_mode(lldpad_port, port_name,
port_uuid, mac, net_uuid,
segmentation_id, oui)
else:
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
LOG.info(_LI("Status down for portname uuid %s"), port_uuid)
ret = self.port_down_segment_mode(lldpad_port, port_uuid,
mac, net_uuid,
segmentation_id, oui)
return ret
def send_vdp_port_event(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui):
"""Send vNIC UP/Down event to VDP.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
"""
try:
with self.ovs_vdp_lock:
ret = self.send_vdp_port_event_internal(port_uuid, mac,
net_uuid,
segmentation_id,
status, oui)
return ret
except Exception as e:
LOG.error(_LE("Exception in send_vdp_port_event %s") % str(e))
return {'result': False, 'fail_reason': str(e)}
def get_lvid_vdp_vlan(self, net_uuid, port_uuid):
"""Retrieve the Local Vlan ID and VDP Vlan. """
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error(_LE("lvm not yet created, get_lvid_vdp_lan "
"return error"))
return cconstants.INVALID_VLAN, cconstants.INVALID_VLAN
vdp_vlan = lvm.get_portid_vlan(port_uuid)
lvid = lvm.lvid
LOG.info("Return from lvid_vdp_vlan lvid %(lvid)s vdp_vlan %(vdp)s",
{'lvid': lvid, 'vdp': vdp_vlan})
return lvid, vdp_vlan
def unprovision_vdp_overlay_networks(self, net_uuid, lvid, vdp_vlan, oui):
"""Unprovisions a overlay type network configured using VDP.
:param net_uuid: the uuid of the network associated with this vlan.
:lvid: Local VLAN ID
:vdp_vlan: VDP VLAN ID
:oui: OUI Parameters
"""
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error(_LE("Cannot unprovision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid "),
{'net_uuid': net_uuid})
return
LOG.info(_LI('unprovision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s'),
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, vdp_vlan, 0)
def vdp_vlan_change_internal(self, vsw_cb_data, vdp_vlan, fail_reason):
"""Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads.
"""
LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan)
if not vsw_cb_data:
LOG.error(_LE("NULL vsw_cb_data Info received"))
return
net_uuid = vsw_cb_data.get('net_uuid')
port_uuid = vsw_cb_data.get('port_uuid')
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error(_LE("Network %s is not in the local vlan map"), net_uuid)
return
lldpad_port = self.lldpad_info
if not lldpad_port:
LOG.error(_LE("There is no LLDPad port available."))
return
exist_vdp_vlan = lvm.late_binding_vlan
lvid = lvm.vlan
LOG.debug("lvid %(lvid)s exist %(vlan)s",
{'lvid': lvid, 'vlan': exist_vdp_vlan})
lvm.decr_reset_vlan(port_uuid, vdp_vlan)
lvm.set_fail_reason(port_uuid, fail_reason)
self.vdp_vlan_cb(port_uuid, lvid, vdp_vlan, fail_reason)
if vdp_vlan == exist_vdp_vlan:
LOG.debug("No change in provider VLAN %s", vdp_vlan)
return
# Logic is if the VLAN changed to 0, clear the flows only if none of
# the VM's in the network has a valid VLAN.
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
if ovs_lib.is_valid_vlan_tag(exist_vdp_vlan) and not (
lvm.any_valid_vlan()):
# Clear the old flows
LOG.debug("Clearing flows, no valid vlans")
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, 0)
lvm.late_binding_vlan = 0
lvm.vdp_nego_req = False
else:
# If any VM gets a VLAN change, we immediately modify the flow.
# This is done to not wait for all VM's VLAN getting updated from
# switch. Logic is if any VM gts a new VLAN, the other VM's of the
# same network will be updated eventually.
if vdp_vlan != exist_vdp_vlan and (
ovs_lib.is_valid_vlan_tag(vdp_vlan)):
# Add the new flows and remove the old flows
LOG.warn("Non Zero VDP Vlan change %s %s" %
(vdp_vlan, exist_vdp_vlan))
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, vdp_vlan)
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False
else:
LOG.error(_LE("Invalid or same VLAN Exist %(exist)s "
"New %(new)s VLANs"),
{'exist': exist_vdp_vlan, 'new': vdp_vlan})
def vdp_vlan_change(self, vsw_cb_data, vdp_vlan, fail_reason):
"""Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads.
"""
LOG.debug("In VDP VLAN change VLAN %s" % vdp_vlan)
try:
with self.ovs_vdp_lock:
self.vdp_vlan_change_internal(vsw_cb_data, vdp_vlan,
fail_reason)
except Exception as e:
LOG.error(_LE("Exception in vdp_vlan_change %s") % str(e))
def provision_vdp_overlay_networks(self, port_uuid, mac, net_uuid,
segmentation_id, lvid, oui):
"""Provisions a overlay type network configured using VDP.
:param port_uuid: the uuid of the VM port.
:param mac: the MAC address of the VM.
:param net_uuid: the uuid of the network associated with this vlan.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:lvid: Local VLAN ID
:oui: OUI Parameters
"""
lldpad_port = self.lldpad_info
if lldpad_port:
ovs_cb_data = {'obj': self, 'port_uuid': port_uuid, 'mac': mac,
'net_uuid': net_uuid}
vdp_vlan, fail_reason = lldpad_port.send_vdp_vnic_up(
port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id,
mac=mac, new_network=True, oui=oui,
vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data)
else:
fail_reason = "There is no LLDPad port available."
LOG.error(_LE("%s"), fail_reason)
return {'result': False, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error(_LE("Cannot provision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid "),
{'net_uuid': net_uuid})
return {'result': True, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
LOG.info(_LI('provision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s'),
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, 0, vdp_vlan)
return {'result': True, 'vdp_vlan': vdp_vlan, 'fail_reason': None}
def pop_local_cache(self, port_uuid, mac, net_uuid, lvid, vdp_vlan,
segmentation_id):
"""Populate the local cache after restart. """
LOG.info("Populating the OVS VDP cache with port %(port_uuid)s, "
"mac %(mac)s net %(net_uuid)s lvid %(lvid)s vdpvlan "
"%(vdp_vlan)s seg %(seg)s",
{'port_uuid': port_uuid, 'mac': mac, 'net_uuid': net_uuid,
'lvid': lvid, 'vdp_vlan': vdp_vlan, 'seg': segmentation_id})
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
lvm = LocalVlan(lvid, segmentation_id)
self.local_vlan_map[net_uuid] = lvm
lvm.lvid = lvid
lvm.set_port_uuid(port_uuid, vdp_vlan, None)
if vdp_vlan != cconstants.INVALID_VLAN:
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False

View File

@ -1,27 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""LLDP/VDP Constants."""
LLDPAD_OVS_VETH_PORT = "LLDPO"
LLDPAD_LOC_VETH_PORT = "LLDPL"
MAX_VETH_NAME = 10
NCB_DMAC = "01:80:c2:00:00:00"
LLDP_ETYPE = 0x88cc
VDP22_ETYPE = 0x8940
VDP_FLOW_PRIO = 99
VDP_SEGMENT_MODE = 10

View File

@ -1,213 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from oslo_config import cfg
from networking_cisco._i18n import _LE
from networking_cisco.apps.saf.agent.vdp import (
lldpad_constants as vdp_const)
from networking_cisco.apps.saf.common import constants as com_const
from networking_cisco.apps.saf.common import utils
from networking_cisco.apps.saf.server.services import constants as const
from networking_cisco.apps.saf.server.services.firewall.native import (
fw_constants as fw_const)
default_keystone_opts = {
'keystone_authtoken': {
'username': 'admin',
'project_name': 'admin',
'user_domain_name': 'default',
'project_domain_name': 'default',
},
}
default_neutron_opts = {
'neutron': {
'username': 'neutron',
'project_name': 'service',
'user_domain_name': 'default',
'project_domain_name': 'default',
},
}
default_nova_opts = {
'nova': {
'username': 'nova',
'project_name': 'service',
'user_domain_name': 'default',
'project_domain_name': 'default',
'region_name': 'RegionOne',
'api_version': '2.1',
},
}
default_dfa_agent_opts = {
'dfa_agent': {
'integration_bridge': 'br-int',
'external_dfa_bridge': 'br-ethd',
},
}
default_vdp_opts = {
'vdp': {
'mgrid2': vdp_const.VDP_MGRID,
'typeid': vdp_const.VDP_TYPEID,
'typeidver': vdp_const.VDP_TYPEID_VER,
'vsiidfrmt': vdp_const.VDP_VSIFRMT_UUID,
'hints': 'none',
'filter': vdp_const.VDP_FILTER_GIDMACVID,
'vdp_sync_timeout': vdp_const.VDP_SYNC_TIMEOUT,
},
}
default_firewall_opts = {
'firewall': {
'device': fw_const.DEVICE,
'sched_policy': fw_const.SCHED_POLICY,
'fw_auto_serv_nwk_create': fw_const.AUTO_NWK_CREATE,
'fw_service_host_profile': fw_const.HOST_PROF,
'fw_service_host_fwd_mode': fw_const.HOST_FWD_MODE,
'fw_service_part_vrf_profile': fw_const.PART_PROF,
'fw_service_ext_profile': fw_const.EXT_PROF,
'fw_service_ext_fwd_mode': fw_const.EXT_FWD_MODE,
'fw_service_in_ip_start': fw_const.IN_IP_START,
'fw_service_in_ip_end': fw_const.IN_IP_END,
'fw_service_out_ip_start': fw_const.OUT_IP_START,
'fw_service_out_ip_end': fw_const.OUT_IP_END,
'fw_service_dummy_ip_subnet': fw_const.DUMMY_IP_SUBNET,
},
}
DEFAULT_LOG_LEVELS = (
"amqp=WARN, amqplib=WARN, oslo.messaging=WARN, pika=WARN, paramiko=WARN,"
"paramiko.transport=WARN,"
"paramiko.transport.sftp=WARN,"
"pika.callback=WARN,oslo.messaging._drivers=WARN"
)
default_log_opts = {
'dfa_log': {
'use_syslog': 'False',
'syslog_lgo_facility': 'LOG_USER',
'log_dir': '.',
'log_file': 'fabric_enabler.log',
'log_level': 'WARNING',
'log_format': '%(asctime)s %(levelname)8s [%(name)s] %(message)s',
'log_date_format': '%Y-%m-%d %H:%M:%S',
'default_log_levels': DEFAULT_LOG_LEVELS,
},
}
default_sys_opts = {
'sys': {
'root_helper': 'sudo',
},
}
default_dcnm_opts = {
'dcnm': {
'default_cfg_profile': 'defaultNetworkIpv4EfProfile',
'default_vrf_profile': 'vrf-common-universal',
'default_partition_name': 'CTX',
'dcnm_net_ext': '(DCNM)',
'gateway_mac': '20:20:00:00:00:AA',
'dcnm_dhcp_leases': '/var/lib/dhcpd/dhcpd.leases',
'dcnm_dhcp': 'false',
'segmentation_reuse_timeout': com_const.SEG_REUSE_TIMEOUT,
'vlan_id_min': const.VLAN_ID_MIN,
'vlan_id_max': const.VLAN_ID_MAX,
'vlan_reuse_timeout': const.VLAN_REUSE_TIMEOUT,
'orchestrator_id': com_const.ORCHESTRATOR_ID
},
}
default_notify_opts = {
'dfa_notify': {
'cisco_dfa_notify_queue': 'cisco_dfa_%(service_name)s_notify',
},
}
default_loadbalance_opts = {
'loadbalance': {
'lb_enabled': 'False',
'lb_native': 'True',
},
}
default_opts_list = [
default_log_opts,
default_neutron_opts,
default_nova_opts,
default_keystone_opts,
default_dfa_agent_opts,
default_vdp_opts,
default_sys_opts,
default_dcnm_opts,
default_notify_opts,
default_firewall_opts,
default_loadbalance_opts,
]
class CiscoDFAConfig(object):
"""Cisco DFA Mechanism Driver Configuration class."""
def __init__(self, service_name=None):
self.dfa_cfg = {}
self._load_default_opts()
args = sys.argv[1:]
try:
opts = [(args[i], args[i + 1]) for i in range(0, len(args), 2)]
except IndexError:
opts = []
cfgfile = cfg.find_config_files(service_name)
for k, v in opts:
if k == '--config-file':
cfgfile.insert(0, v)
multi_parser = cfg.MultiConfigParser()
read_ok = multi_parser.read(cfgfile)
if len(read_ok) != len(cfgfile):
raise cfg.Error(_LE("Failed to parse config file."))
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
if parsed_item not in self.dfa_cfg:
self.dfa_cfg[parsed_item] = {}
for key, value in parsed_file[parsed_item].items():
self.dfa_cfg[parsed_item][key] = value[0]
# Convert it to object.
self._cfg = utils.Dict2Obj(self.dfa_cfg)
def _load_default_opts(self):
"""Load default options."""
for opt in default_opts_list:
self.dfa_cfg.update(opt)
@property
def cfg(self):
return self._cfg

View File

@ -1,105 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# RPC message type exchange between server and agent.
VM_INFO = 1000
UPDATE_IP_RULE = 1001
UPLINK_NAME = 1002
DID_LEN = 36
# RPC queue name on agent side.
DFA_AGENT_QUEUE = 'dfa_agent'
# RPC queue name on server side.
DFA_SERVER_QUEUE = 'dfa_server_q'
DFA_EXCHANGE = 'dfa'
RESULT_FAIL = 'FAIL'
RESULT_SUCCESS = 'SUCCESS'
CREATE_FAIL = 'CREATE:FAIL'
DELETE_FAIL = 'DELETE:FAIL'
UPDATE_FAIL = 'UPDATE:FAIL'
SUBNET_PENDING = 'SUBNET:PENDING'
DHCP_PORT_CHECK = 3
DHCP_PREFIX = 'dhcp'
LBAAS_PREFIX = 'lbaas'
MAIN_INTERVAL = 5
# Process queues interval
PROCESS_QUE_INTERVAL = 1
# Failure recovery interval
FAIL_REC_INTERVAL = 60
# Heartbeat interval
HB_INTERVAL = 30
# Segmentation ID reuse after 1 hour
SEG_REUSE_TIMEOUT = 1
# Default Orchestrator ID
ORCHESTRATOR_ID = 'Openstack'
# Special return value for an invalid OVS ofport
INVALID_OFPORT = -1
INVALID_VLAN = -1
MIN_VLAN_TAG = 1
MAX_VLAN_TAG = 4094
VM_MSG_TYPE = 50
UPLINK_MSG_TYPE = 51
VM_BULK_SYNC_MSG_TYPE = 52
UPLINK_DET_INTERVAL = 10
ERR_PROC_INTERVAL = 20
# IF 'down' is seen twice continuously
UPLINK_DOWN_THRES = 3
# Timer to check for the presence of flows
FLOW_CHECK_INTERVAL = 60
Q_UPL_PRIO = 1
Q_VM_PRIO = 2
RES_SEGMENT = "SEGMENT"
RES_VLAN = "VLAN"
RES_IN_SUBNET = 'IN_SUB'
RES_OUT_SUBNET = 'OUT_SUB'
uplink_down_reason = "Uplink went down"
uplink_undiscovered_reason = "Uplink not yet discovered"
port_transition_bond_down_reason = \
"Physical port became port of bond interface, intermittent down"
port_transition_bond_up_reason = \
"Physical port became port of bond interface, intermittent up"
bridge_not_cfgd_reason = \
"Integ or Physical Bridge not created by Openstack. Bridge_list: " \
"%s Integ Br: %s Ext Br: %s"
veth_not_cfgd_reason = \
"Integ or Physical Patch/Veth Ports not configured by Openstack. " \
"Ext Peer: %s Integ Peer: %s"
invalid_uplink_ofport_reason = "Uplink OF port not detected on external bridge"
lldp_ofport_not_detect_reason = \
"lldp veth port not detected on external bridge"
invalid_peer_ofport_reason = \
"int or phy peer OF Port not detected on Int or Phy Bridge. " \
"Phy Peer: %s Int Peer: %s"
evb_cfg_fail_reason = "Unable to cfg EVB"

View File

@ -1,92 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Exceptions used by DFA enabler"""
from neutron_lib import exceptions
from neutronclient.common import exceptions as ncexc
from networking_cisco._i18n import _
# Neutronclient exceptions
ConnectionFailed = ncexc.ConnectionFailed
class NetworkNotFound(exceptions.NotFound):
"""Network cannot be found."""
message = _("Network %(network_id)s could not be found.")
class ConfigProfileNotFound(exceptions.NotFound):
"""Config Profile cannot be found."""
message = _("Config profile for network %(network_id)s could "
"not be found.")
class ConfigProfileFwdModeNotFound(exceptions.NotFound):
"""Config Profile forwarding mode cannot be found."""
message = _("Forwarding Mode for network %(network_id)s "
"could not be found.")
class ConfigProfileIdNotFound(exceptions.NotFound):
"""Config Profile ID cannot be found."""
message = _("Config Profile %(profile_id)s could not be found.")
class ConfigProfileNameNotFound(exceptions.NotFound):
"""Config Profile name cannot be found."""
message = _("Config Profile %(name)s could not be found.")
class ProjectIdNotFound(exceptions.NotFound):
"""Project ID cannot be found."""
message = _("Project ID %(project_id)s could not be found.")
class DfaClientRequestFailed(exceptions.ServiceUnavailable):
"""Request to DCNM failed."""
message = _("Request to DCNM failed: %(reason)s.")
class DfaAgentFailed(exceptions.ServiceUnavailable):
"""Failure in running DfaAgent."""
message = _("OpenStack is not running: %(reason)s.")
class InvalidInput(exceptions.InvalidInput):
"""Invalid Input specified."""
message = _("Invalid input for operation: %(error_message)s.")

View File

@ -1,103 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""DFA logging helper module."""
import logging
import logging.handlers as log_hdlr
import os
import sys
# Rotating file size limit.
ONEK = 1024
ONEMEG = (1024 * 1024)
MAX_BYTES = (5 * ONEMEG)
BACKUP_COUNT = 10
DAYS = 'D'
SECONDS = 'S'
MINUTES = 'M'
HOURS = 'H'
MIDNIGHT = 'MIDNIGHT'
LOG_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
_loggers = {}
def getLogger(name):
if name in _loggers:
return _loggers[name]
logger = logging.getLogger(name)
_loggers[name] = logger
return logger
def setup_logger(project, cfg):
if _loggers.get(project):
return
logger = getLogger(None)
# Set logging level.
level = LOG_LEVELS.get(cfg.dfa_log.log_level.lower(), logging.WARNING)
logger.setLevel(level)
# Set default log levels for specified modules.
def_log_levels = cfg.dfa_log.default_log_levels
for dll in def_log_levels.split(','):
mod, sep, lvl = dll.partition('=')
level = LOG_LEVELS.get(lvl.lower(), logging.WARNING)
logging.getLogger(mod).setLevel(level)
# Set log file path name.
log_dir = cfg.dfa_log.log_dir
log_file = cfg.dfa_log.log_file
if log_dir and log_file:
try:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
except OSError:
pass
else:
log_file_path = os.path.join(log_dir, log_file)
handler = log_hdlr.TimedRotatingFileHandler(log_file_path,
when=DAYS,
interval=1)
else:
handler = logging.StreamHandler(sys.stdout)
if cfg.dfa_log.use_syslog.lower() == 'true':
log_fac = cfg.dfa_log.syslog_log_facility
facility = getattr(log_hdlr.SysLogHandler, log_fac, None)
handler = log_hdlr.SysLogHandler(address='/dev/log',
facility=facility)
# Setting log format.
log_format = cfg.dfa_log.log_format
date_fowrmat = cfg.dfa_log.log_date_format
formatter = logging.Formatter(fmt=log_format, datefmt=date_fowrmat)
handler.setFormatter(formatter)
logger.addHandler(handler)
_loggers[project] = logger

View File

@ -1,433 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from eventlet.green import subprocess
from eventlet import greenthread
import os
import shlex
import signal
import six
from networking_cisco._i18n import _, _LE, _LI
from networking_cisco.apps.saf.common import constants as q_const
from networking_cisco.apps.saf.common import dfa_exceptions as dfae
from networking_cisco.apps.saf.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
def is_valid_vlan_tag(vlan):
return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % DEFAULT_OVS_VSCTL_TIMEOUT, "list-br"]
try:
return execute(args, root_helper=root_helper).strip().split("\n")
except Exception as exc:
LOG.error(_LE("Unable to retrieve bridges. Exception: %s"), exc)
def is_patch(root_helper, port):
args = ["ovs-vsctl", "--timeout=%d" % DEFAULT_OVS_VSCTL_TIMEOUT, "get",
"Interface", port, "type"]
try:
output = execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
LOG.error(_LE("Unable to retrieve Interface type %s"), e)
return False
if 'patch' in output:
return True
else:
return False
def get_peer(root_helper, port):
args = ["ovs-vsctl", "--timeout=%d" % DEFAULT_OVS_VSCTL_TIMEOUT, "get",
"Interface", port, "options"]
try:
output = execute(args, root_helper=root_helper).strip().split("\n")
output1 = output[0].split("=")[1].strip('}')
except Exception as e:
LOG.error(_LE("Unable to retrieve Peer"), e)
return None
return output1
def get_bridge_name_for_port_name_glob(root_helper, port_name):
try:
args = ["ovs-vsctl", "--timeout=%d" % DEFAULT_OVS_VSCTL_TIMEOUT,
"port-to-br", port_name]
output = execute(args, root_helper=root_helper)
return output
except RuntimeError as exc:
LOG.error(_LE("Error Running vsctl for getting bridge name for "
"portname"), exc)
return False
def port_exists_glob(root_helper, port_name):
output = get_bridge_name_for_port_name_glob(root_helper, port_name)
port_exists = bool(output)
if port_exists:
return output.strip(), port_exists
else:
return output, port_exists
def delete_port_glob(root_helper, br_ex, port_name):
try:
args = ["ovs-vsctl", "--timeout=%d" % DEFAULT_OVS_VSCTL_TIMEOUT, "--",
"--if-exists", "del-port", br_ex, port_name]
execute(args, root_helper=root_helper)
except RuntimeError as e:
LOG.error(_LE("Error Running vsctl for port delete"), e)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = DEFAULT_OVS_VSCTL_TIMEOUT
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError:
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
LOG.error(_LE("Error Running vsctl"), e)
return False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
def set_secure_mode(self):
self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'],
check_error=True)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
ofport = self.db_get_val("Interface", port_name, "ofport")
# This can return a non-integer string, like '[]' so ensure a
# common failure case
try:
int(ofport)
return ofport
except (ValueError, TypeError):
return q_const.INVALID_OFPORT
def get_port_vlan_tag(self, port_name):
vlan_tag = self.db_get_val("port", port_name, "tag")
# This can return a non-integer string, like '[]' so ensure a
# common failure case
try:
int(vlan_tag)
return vlan_tag
except (ValueError, TypeError):
return q_const.INVALID_VLAN
def get_ofport_name(self, iface_uuid):
ext_str = "external_ids:iface-id=" + iface_uuid
try:
output = self.run_vsctl(["--columns=name", "find", "Interface",
ext_str])
return output.split()[2].strip('\"')
except Exception:
LOG.error(_LE("Unable to retrieve ofport name on %(iface-id)s"),
{'iface-id': iface_uuid})
return None
def do_action_flows(self, action, kwargs_list):
flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))
def add_flow(self, **kwargs):
self.do_action_flows('add', [kwargs])
def delete_flows(self, **kwargs):
self.do_action_flows('del', [kwargs])
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def dump_flows_for(self, **kwargs):
retval = None
flow_str = ",".join(
"=".join([key, str(val)]) for key, val in kwargs.items())
flows = self.run_ofctl("dump-flows", [flow_str])
if flows:
retval = '\n'.join(item for item in flows.splitlines()
if 'NXST' not in item)
return retval
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def create_process(cmd, root_helper=None, addl_env=None, log_output=True):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
log_output and LOG.info(_LI("Running command: %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
return obj, cmd
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
log_output=True):
try:
obj, cmd = create_process(cmd, root_helper=root_helper,
addl_env=addl_env, log_output=log_output)
_stdout, _stderr = (process_input and
obj.communicate(process_input) or
obj.communicate())
obj.stdin.close()
m = _LE("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
'stdout': _stdout, 'stderr': _stderr}
if obj.returncode and log_fail_as_error:
LOG.error(m)
else:
log_output and LOG.info(m)
if obj.returncode and check_exit_code:
raise RuntimeError(m)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = "Cannot match priority on flow deletion or modification"
raise dfae.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise dfae.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in six.iteritems(flow_dict):
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
def get_bond_intf(intf):
bond_dir = '/proc/net/bonding/'
dir_exist = os.path.exists(bond_dir)
if not dir_exist:
return
base_dir = '/sys/class/net'
for subdir in os.listdir(bond_dir):
file_name = '/'.join((base_dir, subdir, 'bonding', 'slaves'))
file_exist = os.path.exists(file_name)
if file_exist:
with open(file_name, 'r') as fd:
slave_val = fd.read().strip('\n')
if intf in slave_val:
return subdir
def is_intf_bond(intf):
bond_dir = '/proc/net/bonding/'
dir_exist = os.path.exists(bond_dir)
if not dir_exist or not intf:
return False
bond_file = '/'.join((bond_dir, intf))
return os.path.exists(bond_file)
def get_member_ports(intf):
if not is_intf_bond(intf):
return
base_dir = '/sys/class/net'
file_name = '/'.join((base_dir, intf, 'bonding', 'slaves'))
file_exist = os.path.exists(file_name)
if file_exist:
with open(file_name, 'r') as fd:
slave_val = fd.read().strip('\n')
return slave_val
def is_intf_up(intf):
"""Function to check if a interface is up. """
intf_path = '/'.join(('/sys/class/net', intf))
intf_exist = os.path.exists(intf_path)
if not intf_exist:
LOG.error(_LE("Unable to get interface %(intf)s, Interface dir "
"%(dir)s does not exist"),
{'intf': intf, 'dir': intf_path})
return False
try:
oper_file = '/'.join((intf_path, 'operstate'))
with open(oper_file, 'r') as fd:
oper_state = fd.read().strip('\n')
if oper_state == 'up':
return True
except Exception as e:
LOG.error(_LE("Exception in reading %s"), str(e))
return False
def get_all_run_phy_intf():
"""Retrieve all physical interfaces that are operationally up. """
intf_list = []
base_dir = '/sys/class/net'
dir_exist = os.path.exists(base_dir)
if not dir_exist:
LOG.error(_LE("Unable to get interface list :Base dir %s does not "
"exist"), base_dir)
return intf_list
dir_cont = os.listdir(base_dir)
for subdir in dir_cont:
dev_dir = base_dir + '/' + subdir + '/' + 'device'
dev_exist = os.path.exists(dev_dir)
if dev_exist:
oper_state = is_intf_up(subdir)
if oper_state is True:
intf_list.append(subdir)
else:
LOG.info(_LI("Dev dir %s does not exist, not physical intf"),
dev_dir)
return intf_list

View File

@ -1,139 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from oslo_config import cfg
import oslo_messaging as messaging
from networking_cisco._i18n import _LE
from networking_cisco.apps.saf.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
# RPC exceptions
RPCException = messaging.MessagingException
RemoteError = messaging.RemoteError
MessagingTimeout = messaging.MessagingTimeout
class DfaRpcClient(object):
"""RPC Client class for DFA enabler."""
def __init__(self, transport_url, topic, exchange=None, fanout=False):
super(DfaRpcClient, self).__init__()
transport = messaging.get_transport(cfg.CONF, url=transport_url)
target = messaging.Target(exchange=exchange,
topic=topic, fanout=fanout)
self._client = messaging.RPCClient(transport, target)
def make_msg(self, method, context, **kwargs):
return {'method': method,
'context': context,
'args': kwargs}
def call(self, msg):
return self._rpc_call(msg)
def cast(self, msg):
return self._rpc_cast(msg)
def _rpc_call(self, msg):
return self._client.call(msg['context'], msg['method'], **msg['args'])
def _rpc_cast(self, msg):
return self._client.cast(msg['context'], msg['method'], **msg['args'])
class DfaRpcServer(object):
"""RPC server class for DFA enabler."""
def __init__(self, topic, server, url, endpoints, exchange=None,
fanout=False, executor='eventlet'):
super(DfaRpcServer, self).__init__()
transport = messaging.get_transport(cfg.CONF, url=url)
target = messaging.Target(exchange=exchange, topic=topic,
server=server, fanout=fanout)
endpoints = [endpoints]
self._server = messaging.get_rpc_server(transport, target, endpoints,
executor=executor)
LOG.debug('RPC server: topic=%s, server=%s, endpoints=%s' % (
topic, server, endpoints))
def start(self):
if self._server:
self._server.start()
def wait(self):
try:
while True:
time.sleep(1)
except Exception as exc:
LOG.exception(_LE('RPC Server: Exception %s occurred'), str(exc))
self.stop()
def stop(self):
if self._server:
self._server.stop()
self._server.wait()
class DfaNotificationEndpoints(object):
"""Notification endpoints."""
def __init__(self, endp):
self._endpoint = endp
def info(self, ctxt, publisher_id, event_type, payload, metadata):
self._endpoint.callback(metadata.get('timestamp'), event_type, payload)
class DfaNotifcationListener(object):
"""RPC Client class for DFA enabler."""
def __init__(self, topic, url, endpoints, exchange=None, fanout=False):
super(DfaNotifcationListener, self).__init__()
transport = messaging.get_transport(cfg.CONF, url=url)
targets = [messaging.Target(exchange=exchange,
fanout=fanout,
topic=topic)]
endpoints = [endpoints]
self._listener = messaging.get_notification_listener(transport,
targets,
endpoints)
def start(self):
if self._listener:
self._listener.start()
def wait(self):
try:
while True:
time.sleep(1)
except Exception as exc:
LOG.exception(_LE('RPC Server: Exception %s occurred'), str(exc))
self.stop()
def stop(self):
if self._listener:
self._listener.stop()
self._listener.wait()

View File

@ -1,187 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import six
import socket
import struct
import sys
import threading
from threading import Lock
import time
import traceback
import uuid
TIME_FORMAT = '%a %b %d %H:%M:%S %Y'
class PeriodicTask(object):
"""Periodic task"""
def __init__(self, interval, func, **kwargs):
self._interval = interval
self._fn = func
self._kwargs = kwargs
self.stop_flag = False
self._excq = kwargs.get('excq')
def run(self):
try:
if self.stop_flag:
return
start = time.time()
self._fn(**self._kwargs)
end = time.time()
delta = end - start
self.thrd = threading.Timer(self._interval - delta, self.run)
self.thrd.start()
except Exception as e:
if self._excq:
emsg = ('%(name)s : %(excp)s' % {'name': self._fn.__name__,
'excp': str(e)})
self._excq.put(emsg, block=False)
def stop(self):
try:
self.thrd.cancel()
self.stop_flag = True
except Exception as e:
if self._excq:
emsg = ('Exception in timer stop %s' % str(e))
self._excq.put(emsg, block=False)
class EventProcessingThread(threading.Thread):
"""Event processing thread."""
def __init__(self, name, obj, task, excq=None):
super(EventProcessingThread, self).__init__(name=name)
self._thread_name = name
self._hdlr = obj
self._task = task
self._excq = excq
def run(self):
try:
getattr(self._hdlr, self._task)()
except Exception:
if self._excq:
exc_type, exc_value, exc_tb = sys.exc_info()
tbstr = traceback.format_exception(exc_type, exc_value, exc_tb)
exstr = str(dict(name=self._thread_name, tb=tbstr))
self._excq.put(exstr, block=False)
@property
def am_i_active(self):
return self.isAlive()
@property
def name(self):
return self._thread_name
class Dict2Obj(object):
"""Convert a dictionary to an object."""
def __init__(self, d):
for key, val in six.iteritems(d):
# Check if it is nested dictionary
if isinstance(val, dict):
setattr(self, key, Dict2Obj(val))
else:
setattr(self, key, val)
def __getattr__(self, val):
return self.__dict__.get(val)
def get_uuid():
return str(uuid.uuid4())
def lock():
return Lock()
def utc_time(ct):
if ct:
return datetime.datetime.strptime(ct, TIME_FORMAT)
def utc_time_lapse(lapse):
current_time = utc_time(time.ctime())
hour_lapse = current_time - datetime.timedelta(hours=lapse)
return hour_lapse
def is_valid_ipv4(addr):
try:
socket.inet_aton(addr)
return True
except socket.error:
return False
def is_valid_mac(addr):
"""Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
"""
addrs = addr.split(':')
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True
def make_cidr(gw, mask):
"""Create network address in CIDR format.
Return network address for a given gateway address and netmask.
"""
try:
int_mask = (0xFFFFFFFF << (32 - int(mask))) & 0xFFFFFFFF
gw_addr_int = struct.unpack('>L', socket.inet_aton(gw))[0] & int_mask
return (socket.inet_ntoa(struct.pack("!I", gw_addr_int)) +
'/' + str(mask))
except (socket.error, struct.error, ValueError, TypeError):
return
def find_agent_host_id(this_host):
"""Returns the neutron agent host id for RHEL-OSP6 HA setup."""
host_id = this_host
try:
for root, dirs, files in os.walk('/run/resource-agents'):
for fi in files:
if 'neutron-scale-' in fi:
host_id = 'neutron-n-' + fi.split('-')[2]
break
return host_id
except IndexError:
return host_id

View File

@ -1,42 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
DFA_db_session = None
Base = declarative_base()
def configure_db(cfg):
global DFA_db_session
if DFA_db_session:
return
connection = cfg.dfa_mysql.connection
engine = create_engine(connection, echo=False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine, autocommit=True)
DFA_db_session = Session()
def get_session():
return DFA_db_session

File diff suppressed because it is too large Load Diff

View File

@ -1,235 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""CLI module for fabric enabler."""
from __future__ import print_function
import cmd
import itertools
import pkg_resources
import platform
from prettytable import PrettyTable
import six
import sys
from oslo_serialization import jsonutils
from oslo_utils import netutils
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.common import dfa_exceptions as dexc
from networking_cisco.apps.saf.common import rpc
from networking_cisco.apps.saf.common import utils
from networking_cisco.apps.saf.server import cisco_dfa_rest as cdr
class DfaCli(cmd.Cmd):
"""Represents fabric enabler command line interface."""
prompt = '(enabler) '
intro = 'Fabric Enabler Command Line Interface'
def __init__(self):
self.ctl_host = platform.node()
cmd.Cmd.__init__(self)
self._cfg = config.CiscoDFAConfig().cfg
self.dcnm_client = cdr.DFARESTClient(self._cfg)
self.setup_client_rpc()
self.clnt = None
def setup_client_rpc(self):
url = self._cfg.dfa_rpc.transport_url % (
{'ip': self.ctl_host})
self.clnt = rpc.DfaRpcClient(url, constants.DFA_SERVER_QUEUE)
def set_static_ip_address(self, ipaddr, macaddr):
context = {}
args = jsonutils.dumps(dict(mac=macaddr, ip=ipaddr))
msg = self.clnt.make_msg('set_static_ip_address', context, msg=args)
resp = self.clnt.cast(msg)
return resp
def do_set_static_ip(self, line):
args = line.split()
ip_mac = dict(itertools.izip_longest(args[::2], args[1::2],
fillvalue=''))
ipaddr = ip_mac.get('--ip')
macaddr = ip_mac.get('--mac')
# Some sanity check.
if (not ipaddr or not macaddr or
not utils.is_valid_ipv4(ipaddr)
or not netutils.is_valid_mac(macaddr)):
print('Invalid input parameters.\n'
'Usage:'
' set_static_ip --mac <mac address> --ip <ip address>')
return
self.set_static_ip_address(ipaddr, macaddr)
def do_get_config_profile(self, line):
try:
cfgp_list = self.dcnm_client.config_profile_list()
if not cfgp_list:
print('No config profile found.')
return
except dexc.DfaClientRequestFailed:
print('Failed to access DCNM.')
return
cfg_table = PrettyTable(['Config Profile Name', 'Alias'])
for cfg in cfgp_list:
if cfg.startswith('defaultNetwork'):
cfg_alias = cfg.split('defaultNetwork')[1].split('Profile')[0]
elif cfg.endswith('Profile'):
cfg_alias = cfg.split('Profile')[0]
else:
cfg_alias = cfg
cfg_table.add_row([cfg, cfg_alias])
print(cfg_table)
def do_list_networks(self, line):
tenant_name = line
if not tenant_name:
print('Tenant name is required.')
return
try:
part_name = self._cfg.dcnm.default_partition_name
net_list = self.dcnm_client.list_networks(tenant_name, part_name)
if not net_list:
print('No network found.')
return
except dexc.DfaClientRequestFailed:
print('Failed to access DCNM.')
return
list_table = None
for net in net_list:
columns = net.keys()
if list_table is None:
list_table = PrettyTable(columns)
if list_table:
list_table.add_row(net.values())
print(list_table)
def do_get_network(self, line):
args = line.split()
if len(args) < 2:
print('Invalid parameters')
return
if not args[1].isdigit():
print('Invalid segmentation id %s.', args[1])
return
try:
net = self.dcnm_client.get_network(args[0], args[1])
if not net:
print('No network found.')
return
except dexc.DfaClientRequestFailed:
print('Failed to access DCNM.')
return
net_table = PrettyTable(net.keys())
row = []
for key, val in six.iteritems(net):
if key == 'configArg' or key == 'dhcpScope':
val = str(val)
row.append(val)
net_table.add_row(row)
print(net_table)
def do_list_organizations(self, line):
'''Get list of organization on DCNM.'''
org_list = self.dcnm_client.list_organizations()
if not org_list:
print('No organization found.')
return
org_table = PrettyTable(['Organization Name'])
for org in org_list:
org_table.add_row([org['organizationName']])
print(org_table)
def do_get_dcnm_version(self, line):
'''Get current version of DCNM.'''
ver = self.dcnm_client.get_version()
print(ver)
def do_get_enabler_version(self, line):
'''Get current fabric enabler's package version.'''
print('Version: %s' % pkg_resources.get_distribution(
"networking-cisco").version)
def help_get_config_profile(self):
print('\n'.join(['get_config_profile',
'Display supported configuration profile in DCNM']))
def help_list_networks(self):
print('\n'.join(['list_networks tenant-name',
'Display list of network for given tenant.']))
def help_get_network(self):
print('\n'.join(['get_network tenant-name segmentation_id',
'Display network details.']))
def help_set_static_ip(self):
print('\n'.join(['set_static_ip --mac <mac address> --ip <ip address>',
'Set static ip address for an instance.']))
def emptyline(self):
return
def do_prompt(self, line):
'''Set prompt for the command line.'''
self.prompt = line + ' '
def do_quit(self, line):
'''exit the program.'''
sys.exit(1)
def do_EOF(self, line):
'''Use Ctrl-D to exit the program.'''
return True
# Shortcuts
do_q = do_quit
def dfa_cli():
# Add default config file.
if len(sys.argv[1:]) % 2:
sys.argv.append("")
sys.argv.append('--config-file')
sys.argv.append('/etc/saf/enabler_conf.ini')
DfaCli().cmdloop()
if __name__ == '__main__':
sys.exit(dfa_cli())

View File

@ -1,26 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from networking_cisco.apps.saf.agent import dfa_agent as dfa
def dfa_agent():
dfa.main()
if __name__ == '__main__':
sys.exit(dfa_agent())

View File

@ -1,27 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from networking_cisco.apps.saf.server import dfa_server as dfa
def dfa_server():
dfa.dfa_server()
if __name__ == '__main__':
sys.exit(dfa_server())

View File

@ -1,961 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This module provides APIs for communicating with DCNM."""
import re
import requests
import sys
from oslo_serialization import jsonutils
from networking_cisco._i18n import _LE, _LI
from networking_cisco.apps.saf.common import dfa_exceptions as dexc
from networking_cisco.apps.saf.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
UNKNOWN_SRVN_NODE_IP = '0.0.0.0'
UNKNOWN_DCI_ID = -1
class DFARESTClient(object):
"""DFA client class that provides APIs to interact with DCNM."""
def __init__(self, cfg):
self._base_ver = '7.1(0)'
self._is_iplus = False
self._ip = cfg.dcnm.dcnm_ip
self._user = cfg.dcnm.dcnm_user
self._pwd = cfg.dcnm.dcnm_password
self._part_name = cfg.dcnm.default_partition_name
if (not self._ip) or (not self._user) or (not self._pwd):
msg = ("[DFARESTClient] Input DCNM IP, user name or password"
"parameter is not specified")
raise ValueError(msg)
self._req_headers = {'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8'}
self.default_cfg_profile = cfg.dcnm.default_cfg_profile
self.default_vrf_profile = cfg.dcnm.default_vrf_profile
# url timeout: 10 seconds
self.timeout_resp = (10 if not cfg.dcnm.timeout_resp else
cfg.dcnm.timeout_resp)
self._exp_time = 100000
self._resp_ok = (requests.codes.ok, requests.codes.created,
requests.codes.accepted)
self.dcnm_protocol = self.get_dcnm_protocol()
# Fill the urls for DCNM Rest API's.
self.fill_urls()
self._cur_ver = self.get_version()
self._detect_iplus()
# Update the default network profile based on version of DCNM.
self._set_default_cfg_profile()
self._default_md = None
def _detect_iplus(self):
"""Check the DCNM version and determine if it's for iplus"""
ver_expr = "([0-9]+)\.([0-9]+)\((.*)\)"
re.compile(ver_expr)
v1 = re.match(ver_expr, self._cur_ver)
v2 = re.match(ver_expr, self._base_ver)
if int(v1.group(1)) > int(v2.group(1)):
self._is_iplus = True
elif int(v1.group(1)) == int(v2.group(1)):
if int(v1.group(2)) > int(v2.group(2)):
self._is_iplus = True
elif int(v1.group(2)) == int(v2.group(2)):
self._is_iplus = v1.group(3) >= v2.group(3)
LOG.info(_LI("DCNM version: %(cur_ver)s, iplus: %(is_iplus)s"),
{'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus})
def _failure_msg(self, response):
return "[%s] %s" % (response.status_code, response.text)
def get_segmentid_range(self, orchestrator_id):
"""Get segment id range from DCNM. """
url = "%s/%s" % (self._segmentid_ranges_url, orchestrator_id)
res = self._send_request('GET', url, None, 'segment-id range')
if res and res.status_code in self._resp_ok:
return res.json()
def set_segmentid_range(self, orchestrator_id, segid_min, segid_max):
"""set segment id range in DCNM. """
url = self._segmentid_ranges_url
payload = {'orchestratorId': orchestrator_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
res = self._send_request('POST', url, payload, 'segment-id range')
if not (res and res.status_code in self._resp_ok):
LOG.error(_LE("Failed to set segment id range for orchestrator "
"%(orch)s on DCNM: %(text)s"),
{'orch': orchestrator_id, 'text': res.text})
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def update_segmentid_range(self, orchestrator_id, segid_min, segid_max):
"""update segment id range in DCNM. """
url = "%s/%s" % (self._segmentid_ranges_url, orchestrator_id)
payload = {'orchestratorId': orchestrator_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
res = self._send_request('PUT', url, payload, 'segment-id range')
if not (res and res.status_code in self._resp_ok):
LOG.error(_LE("Failed to update segment id range for orchestrator "
"%(orch)s on DCNM: %(text)s"),
{'orch': orchestrator_id, 'text': res.text})
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def _set_default_cfg_profile(self):
"""Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version.
"""
try:
cfgplist = self.config_profile_list()
if self.default_cfg_profile not in cfgplist:
self.default_cfg_profile = ('defaultNetworkUniversalEfProfile'
if self._is_iplus else
'defaultNetworkIpv4EfProfile')
except dexc.DfaClientRequestFailed:
LOG.error(_LE("Failed to send requst to DCNM."))
self.default_cfg_profile = 'defaultNetworkIpv4EfProfile'
def _create_network(self, network_info):
"""Send create network request to DCNM.
:param network_info: network parameters to be created on DCNM
"""
url = self._create_network_url % (network_info['organizationName'],
network_info['partitionName'])
payload = network_info
LOG.info(_LI('url %(url)s payload %(payload)s'),
{'url': url, 'payload': payload})
return self._send_request('POST', url, payload, 'network')
def _config_profile_get(self, thisprofile):
"""Get information of a config profile from DCNM.
:param thisprofile: network config profile in request
"""
url = self._cfg_profile_get_url % (thisprofile)
payload = {}
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json()
def _config_profile_list(self):
"""Get list of supported config profile from DCNM."""
url = self._cfg_profile_list_url
payload = {}
try:
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error(_LE("Failed to send requst to DCNM."))
def _get_settings(self):
"""Get global mobility domain from DCNM."""
url = self._global_settings_url
payload = {}
res = self._send_request('GET', url, payload, 'settings')
if res and res.status_code in self._resp_ok:
return res.json()
def _set_default_mobility_domain(self):
settings = self._get_settings()
LOG.info(_LI("settings is %s") % settings)
if ('globalMobilityDomain' in settings.keys()):
global_md = settings.get('globalMobilityDomain')
self._default_md = global_md.get('name')
LOG.info(_LI("setting default md to be %s") % self._default_md)
else:
self._default_md = "md0"
def _create_org(self, orch_id, name, desc):
"""Create organization on the DCNM.
:param orch_id: orchestrator ID
:param name: Name of organization
:param desc: Description of organization
"""
url = self._org_url
payload = {
"organizationName": name,
"description": name if len(desc) == 0 else desc,
"orchestrationSource": orch_id}
return self._send_request('POST', url, payload, 'organization')
def _create_or_update_partition(self, org_name, part_name, desc,
dci_id=UNKNOWN_DCI_ID, vrf_prof=None,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
operation='POST'):
"""Send create or update partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
:param desc: description of partition
:dci_id: DCI ID for inter-DC
:vrf_prof: VRF Profile Name
:service_node_ip: Service Node's Address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None or dci_id == UNKNOWN_DCI_ID or (
service_node_ip == UNKNOWN_SRVN_NODE_IP):
part_info = self._get_partition(org_name, part_name)
if vrf_prof is None:
vrf_prof = self.get_partition_vrfProf(org_name, part_name,
part_info=part_info)
if dci_id == UNKNOWN_DCI_ID:
dci_id = self.get_partition_dciId(org_name, part_name,
part_info=part_info)
if service_node_ip == UNKNOWN_SRVN_NODE_IP:
service_node_ip = self.get_partition_serviceNodeIp(
org_name, part_name, part_info=part_info)
url = ((self._create_part_url % (org_name)) if operation == 'POST' else
self._update_part_url % (org_name, part_name))
payload = {
"partitionName": part_name,
"description": part_name if len(desc) == 0 else desc,
"serviceNodeIpAddress": service_node_ip,
"organizationName": org_name}
# Check the DCNM version and find out whether it is need to have
# extra payload for the new version when creating/updating a partition.
if self._is_iplus:
# Need to add extra payload for the new version.
enable_dci = "true" if dci_id and int(dci_id) != 0 else "false"
extra_payload = {
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"dciId": dci_id,
"enableDCIExtension": enable_dci}
payload.update(extra_payload)
return self._send_request(operation, url, payload, 'partition')
def _get_partition(self, org_name, part_name=None):
"""send get partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_name is None:
part_name = self._part_name
url = self._update_part_url % (org_name, part_name)
res = self._send_request("GET", url, '', 'partition')
if res and res.status_code in self._resp_ok:
return res.json()
def update_partition_static_route(self, org_name, part_name,
static_ip_list, vrf_prof=None,
service_node_ip=None):
"""Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None:
vrf_prof = self.default_vrf_profile
operation = 'PUT'
url = (self._update_part_url % (org_name, part_name))
ip_str = ''
ip_cnt = 0
for ip in static_ip_list:
ip_sub = "$n0" + str(ip_cnt) + "=" + str(ip) + ";"
ip_str = ip_str + ip_sub
ip_cnt = ip_cnt + 1
cfg_args = {
"$vrfName=" + org_name + ':' + part_name + ";"
"$include_serviceNodeIpAddress=" + service_node_ip + ";"
+ ip_str
}
cfg_args = ';'.join(cfg_args)
payload = {
"partitionName": part_name,
"organizationName": org_name,
"dciExtensionStatus": "Not configured",
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"configArg": cfg_args}
res = self._send_request(operation, url, payload, 'partition')
return (res is not None and res.status_code in self._resp_ok)
def _delete_org(self, org_name):
"""Send organization delete request to DCNM.
:param org_name: name of organization to be deleted
"""
url = self._del_org_url % (org_name)
return self._send_request('DELETE', url, '', 'organization')
def _delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition
"""
url = self._del_part % (org_name, partition_name)
return self._send_request('DELETE', url, '', 'partition')
def _delete_network(self, network_info):
"""Send network delete request to DCNM.
:param network_info: contains network info to be deleted.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
if 'mobDomainName' in network_info:
vlan_id = network_info['vlanId']
mob_dom_name = network_info['mobDomainName']
url = self._network_mob_url % (org_name, part_name, vlan_id,
mob_dom_name)
else:
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('DELETE', url, '', 'network')
def _get_network(self, network_info):
"""Send network get request to DCNM.
:param network_info: contains network info to query.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('GET', url, '', 'network')
def _login_request(self, url_login):
"""Internal function to send login request. """
expiration_time = self._exp_time
payload = {'expirationTime': expiration_time}
# TODO(padkrish), after testing with certificates, make the
# verify option configurable.
res = requests.post(url_login,
data=jsonutils.dumps(payload),
headers=self._req_headers,
auth=(self._user, self._pwd),
timeout=self.timeout_resp, verify=False)
session_id = ''
if res and res.status_code in self._resp_ok:
session_id = res.json().get('Dcnm-Token')
self._req_headers.update({'Dcnm-Token': session_id})
def _login(self):
"""Login request to DCNM. """
self._login_request(self._login_url)
def _logout_request(self, url_logout):
"""Internal logout request to DCNM. """
requests.post(url_logout,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False)
def _logout(self, url_arg=None):
"""Logout request to DCNM."""
self._logout_request(self._logout_url)
def _send_request(self, operation, url, payload, desc):
"""Send request to DCNM."""
res = None
try:
payload_json = None
if payload and payload != '':
payload_json = jsonutils.dumps(payload)
self._login()
desc_lookup = {'POST': ' creation', 'PUT': ' update',
'DELETE': ' deletion', 'GET': ' get'}
res = requests.request(operation, url, data=payload_json,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False)
desc += desc_lookup.get(operation, operation.lower())
LOG.info(_LI("DCNM-send_request: %(desc)s %(url)s %(pld)s"),
{'desc': desc, 'url': url, 'pld': payload})
self._logout()
except (requests.HTTPError, requests.Timeout,
requests.ConnectionError) as exc:
LOG.exception(_LE('Error during request: %s'), exc)
raise dexc.DfaClientRequestFailed(reason=exc)
return res
def config_profile_list(self):
"""Return config profile list from DCNM."""
these_profiles = self._config_profile_list() or []
profile_list = [q for p in these_profiles for q in
[p.get('profileName')]]
return profile_list
def config_profile_fwding_mode_get(self, profile_name):
"""Return forwarding mode of given config profile."""
profile_params = self._config_profile_get(profile_name)
fwd_cli = 'fabric forwarding mode proxy-gateway'
if profile_params and fwd_cli in profile_params['configCommands']:
return 'proxy-gateway'
else:
return 'anycast-gateway'
def get_config_profile_for_network(self, net_name):
"""Get the list of profiles."""
cfgplist = self.config_profile_list()
cfgname = net_name.partition(':')[2]
cfgtuple = set()
for cfg_prof in cfgplist:
if cfg_prof.startswith('defaultNetwork'):
cfg_alias = (cfg_prof.split('defaultNetwork')[1].
split('Profile')[0])
elif cfg_prof.endswith('Profile'):
cfg_alias = cfg_prof.split('Profile')[0]
else:
cfg_alias = cfg_prof
cfgtuple.update([(cfg_prof, cfg_alias)])
cfgp = [a for a, b in cfgtuple if cfgname == b]
prof = cfgp[0] if cfgp else self.default_cfg_profile
fwd_mod = self.config_profile_fwding_mode_get(prof)
return (prof, fwd_mod)
def create_network(self, tenant_name, network, subnet,
dhcp_range=True):
"""Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network
"""
seg_id = str(network.segmentation_id)
subnet_ip_mask = subnet.cidr.split('/')
gw_ip = subnet.gateway_ip
cfg_args = [
"$segmentId=" + seg_id,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=0",
"$vrfName=" + tenant_name + ':' + self._part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"segmentId": seg_id,
"vlanId": "0",
"mobilityDomainId": "None",
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": self._part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name,
self._part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info(_LI("Creating %s network in DCNM."), network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info(_LI("Created %s network in DCNM."), network_info)
else:
LOG.error(_LE("Failed to create %s network in DCNM."),
network_info)
raise dexc.DfaClientRequestFailed(reason=res)
def create_service_network(self, tenant_name, network, subnet,
dhcp_range=True):
"""Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network
"""
network_info = {}
subnet_ip_mask = subnet.cidr.split('/')
if self._default_md is None:
self._set_default_mobility_domain()
vlan_id = '0'
gw_ip = subnet.gateway_ip
part_name = network.part_name
if not part_name:
part_name = self._part_name
if network.vlan_id:
vlan_id = str(network.vlan_id)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
mob_domain_name = self._default_md
else:
mob_domain_name = None
seg_id = str(network.segmentation_id)
seg_str = "$segmentId=" + seg_id
cfg_args = [
seg_str,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=" + vlan_id,
"$vrfName=" + tenant_name + ':' + part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"vlanId": vlan_id,
"mobilityDomainId": mob_domain_name,
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if seg_id:
network_info["segmentId"] = seg_id
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if hasattr(subnet, 'secondary_gw'):
network_info["secondaryGateway"] = subnet.secondary_gw
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name, part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info(_LI("Creating %s network in DCNM."), network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info(_LI("Created %s network in DCNM."), network_info)
else:
LOG.error(_LE("Failed to create %s network in DCNM."),
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def delete_network(self, tenant_name, network):
"""Delete network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters
"""
seg_id = network.segmentation_id
network_info = {
'organizationName': tenant_name,
'partitionName': self._part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error(_LE("Failed to delete %s network in DCNM."),
network_info)
raise dexc.DfaClientRequestFailed(reason=res)
def delete_service_network(self, tenant_name, network):
"""Delete service network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters
"""
network_info = {}
part_name = network.part_name
if not part_name:
part_name = self._part_name
seg_id = str(network.segmentation_id)
if network.vlan:
vlan_id = str(network.vlan)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
# The current way will not work since _default_md is obtained
# during create_service_network. It's preferrable to get it
# during init TODO(padkrish)
if self._default_md is None:
self._set_default_mobility_domain()
mob_domain_name = self._default_md
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'mobDomainName': mob_domain_name,
'vlanId': vlan_id,
'segmentId': seg_id,
}
else:
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error(_LE("Failed to delete %s network in DCNM."),
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def delete_project(self, tenant_name, part_name):
"""Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition.
"""
res = self._delete_partition(tenant_name, part_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", part_name)
else:
LOG.error(_LE("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s"), {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
res = self._delete_org(tenant_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s organization in DCNM.", tenant_name)
else:
LOG.error(_LE("Failed to delete %(org)s organization in DCNM."
"Response: %(res)s"), {'org': tenant_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
def delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param partition_name: name of partition to be deleted
"""
res = self._delete_partition(org_name, partition_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", partition_name)
else:
LOG.error(_LE("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s"),
({'part': partition_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def create_project(self, orch_id, org_name, part_name, dci_id, desc=None):
"""Create project on the DCNM.
:param orch_id: orchestrator ID
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_org(orch_id, org_name, desc)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s organization in DCNM.", org_name)
else:
LOG.error(_LE("Failed to create %(org)s organization in DCNM."
"Response: %(res)s"), {'org': org_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
self.create_partition(org_name, part_name, dci_id,
self.default_vrf_profile, desc=desc)
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
"""Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error(_LE("Failed to update %(part)s partition in DCNM."
"Response: %(res)s"), {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
def create_partition(self, org_name, part_name, dci_id, vrf_prof,
service_node_ip=None, desc=None):
"""Create partition on the DCNM.
:param org_name: name of organization to be created
:param part_name: name of partition to be created
:param dci_id: DCI ID
:vrf_prof: VRF profile for the partition
:param service_node_ip: Specifies the Default route IP address.
:param desc: string that describes organization
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name,
desc, dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s partition in DCNM.", part_name)
else:
LOG.error(_LE("Failed to create %(part)s partition in DCNM."
"Response: %(res)s"), ({'part': part_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
def get_partition_vrfProf(self, org_name, part_name=None, part_info=None):
"""get VRF Profile for the partition from the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
vrf_profile = None
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info(_LI("query result from dcnm for partition info is %s"),
part_info)
if ("vrfProfileName" in part_info):
vrf_profile = part_info.get("vrfProfileName")
return vrf_profile
def get_partition_dciId(self, org_name, part_name, part_info=None):
"""get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info(_LI("query result from dcnm for partition info is %s"),
part_info)
if part_info is not None and "dciId" in part_info:
return part_info.get("dciId")
def get_partition_serviceNodeIp(self, org_name, part_name, part_info=None):
"""get Service Node IP address from the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info(_LI("query result from dcnm for partition info is %s"),
part_info)
if part_info is not None and "serviceNodeIpAddress" in part_info:
return part_info.get("serviceNodeIpAddress")
def get_partition_segmentId(self, org_name, part_name, part_info=None):
"""get partition Segment ID from the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info(_LI("query result from dcnm for partition info is %s"),
part_info)
if part_info is not None and "partitionSegmentId" in part_info:
return part_info.get("partitionSegmentId")
def list_networks(self, org, part):
"""Return list of networks from DCNM.
:param org: name of organization.
:param part: name of partition.
"""
if org and part:
list_url = self._del_part + '/networks'
list_url = list_url % (org, part)
res = self._send_request('GET', list_url, '', 'networks')
if res and res.status_code in self._resp_ok:
return res.json()
def list_organizations(self):
"""Return list of organizations from DCNM."""
try:
res = self._send_request('GET', self._org_url, '', 'organizations')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error(_LE("Failed to send request to DCNM."))
def get_network(self, org, segid):
"""Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network.
"""
network_info = {
'organizationName': org,
'partitionName': self._part_name,
'segmentId': segid,
}
res = self._get_network(network_info)
if res and res.status_code in self._resp_ok:
return res.json()
def get_version(self):
"""Get the DCNM version."""
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version')
except dexc.DfaClientRequestFailed as exc:
LOG.error(_LE("Failed to get DCNM version."))
sys.exit(_LE("ERROR: Failed to connect to DCNM: %s"), exc)
def _verify_protocol(self, protocol):
try:
self._login_request("%s://%s/rest/logon" % (protocol, self._ip))
self._logout_request("%s://%s/rest/logout" % (protocol, self._ip))
except (requests.HTTPError, requests.Timeout,
requests.ConnectionError) as exc:
LOG.error(_LE("Login Test failed for %(protocol)s Exc %(exc)s."),
{'protocol': protocol, 'exc': exc})
return False
return True
def get_dcnm_protocol(self):
"""Routine to find out if DCNM is using http or https.
DCNM 10 (Fuji-4) and above does not support http. Only https is
supported and enabled by default.
Prior DCNM versions supported both http and https. But, only http
was enabled by default.
So, enabler needs to find out if DCNM is supporting http or https to
be friendly with the existing installed setups.
"""
if self._verify_protocol('https'):
return 'https'
if self._verify_protocol('http'):
return 'http'
sys.exit(_LE("ERROR: Both http and https test failed"))
def _build_url(self, url_remaining):
"""This function builds the URL from host, protocol and string. """
return self.host_protocol_url + url_remaining
def fill_urls(self):
"""This assigns the URL's based on the protocol. """
protocol = self.dcnm_protocol
self._org_url = '%s://%s/rest/auto-config/organizations' % (
(protocol, self._ip))
self._create_network_url = ('%s://%s/' % (protocol, self._ip) +
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self.host_protocol_url = '%s://%s/' % (protocol, self._ip)
self._create_network_url = self._build_url(
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self._cfg_profile_list_url = '%s://%s/rest/auto-config/profiles' % (
(protocol, self._ip))
self._cfg_profile_get_url = self._cfg_profile_list_url + '/%s'
self._global_settings_url = self._build_url(
'rest/auto-config/settings')
self._create_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions')
self._update_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._del_org_url = self._build_url(
'rest/auto-config/organizations/%s')
self._del_part = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._network_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/segment/%s')
self._network_mob_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/vlan/%s/mobility-domain/%s')
self._segmentid_ranges_url = self._build_url(
'rest/settings/segmentid-ranges')
self._login_url = self._build_url('rest/logon')
self._logout_url = self._build_url('rest/logout')

View File

@ -1,198 +0,0 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import socket
from networking_cisco._i18n import _LE
from keystoneauth1.identity import generic
from keystoneauth1 import session
from keystoneclient import client as k_client
from neutronclient.v2_0 import client as n_client
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.common import rpc
LOG = logging.getLogger(__name__)
class EventsHandler(object):
"""This class defines methods to listen and process events."""
def __init__(self, ser_name, pqueue, c_pri, d_pri):
self._service = None
self._service_name = ser_name
self._clients = {}
self._nclient = None
self._pq = pqueue
self._create_pri = c_pri
self._update_pri = c_pri
self._delete_pri = d_pri
self._cfg = config.CiscoDFAConfig().cfg
self._q_agent = constants.DFA_AGENT_QUEUE
self._url = self._cfg.dfa_rpc.transport_url
dfaq = self._cfg.dfa_notify.cisco_dfa_notify_queue % (
{'service_name': ser_name})
notify_queue = dfaq
self._notify_queue = dfaq if dfaq in notify_queue else None
# Setup notification listener for the events.
self._setup_notification_listener(self._notify_queue, self._url)
if ser_name != 'keystone':
return
user = self._cfg.keystone_authtoken.username
project = self._cfg.keystone_authtoken.project_name
passwd = self._cfg.keystone_authtoken.password
url = self._cfg.keystone_authtoken.auth_url
u_domain = self._cfg.keystone_authtoken.user_domain_name
p_domain = self._cfg.keystone_authtoken.project_domain_name
auth = generic.Password(auth_url=url,
username=user,
password=passwd,
project_name=project,
project_domain_name=p_domain,
user_domain_name=u_domain)
sess = session.Session(auth=auth)
self._service = k_client.Client(session=sess)
@property
def nclient(self):
if self._nclient:
return self._nclient
user = self._cfg.neutron.username
project = self._cfg.neutron.project_name
passwd = self._cfg.neutron.password
url = self._cfg.neutron.auth_url
u_domain = self._cfg.keystone_authtoken.user_domain_name
p_domain = self._cfg.keystone_authtoken.project_domain_name
auth = generic.Password(auth_url=url,
username=user,
password=passwd,
project_name=project,
project_domain_name=p_domain,
user_domain_name=u_domain)
sess = session.Session(auth=auth)
self._nclient = n_client.Client(session=sess)
return self._nclient
def _setup_notification_listener(self, topic_name, url):
"""Setup notification listener for a service."""
self.notify_listener = rpc.DfaNotifcationListener(
topic_name, url, rpc.DfaNotificationEndpoints(self))
def start(self):
if self.notify_listener:
self.notify_listener.start()
def wait(self):
if self.notify_listener:
self.notify_listener.wait()
def create_rpc_client(self, thishost):
clnt = self._clients.get(thishost)
if clnt is None:
try:
host_ip = socket.gethostbyname(thishost)
except socket.gaierror:
LOG.error(_LE('Invalid host name for agent: %s'), thishost)
else:
clnt = rpc.DfaRpcClient(self._url,
'_'.join((self._q_agent, thishost)),
exchange=constants.DFA_EXCHANGE)
self._clients[thishost] = clnt
LOG.debug('Created client for agent: %(host)s:%(ip)s',
{'host': thishost, 'ip': host_ip})
def callback(self, timestamp, event_type, payload):
"""Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event
"""
try:
data = (event_type, payload)
LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, '
'payload: %(payload)s\n', (
{'event': event_type, 'payload': payload}))
if 'create' in event_type:
pri = self._create_pri
elif 'delete' in event_type:
pri = self._delete_pri
elif 'update' in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception(_LE('Error: %(err)s for event %(event)s'),
{'err': str(exc), 'event': event_type})
def event_handler(self):
"""Wait on queue for listening to the events."""
if not self._notify_queue:
LOG.error(_LE('event_handler: no notification queue for %s'),
self._service_name)
return
LOG.debug('calling event handler for %s', self)
self.start()
self.wait()
def send_vm_info(self, thishost, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("send_vm_info: Agent on %s is not active.", thishost)
return
context = {}
thismsg = clnt.make_msg('send_vm_info', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("send_vm_info: resp = %s", resp)
def update_ip_rule(self, thishost, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("update_ip_rule: Agent on %s is not active.", thishost)
return
context = {}
thismsg = clnt.make_msg('update_ip_rule', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("update_ip_rule: resp = %s", resp)
def send_msg_to_agent(self, thishost, msg_type, msg):
clnt = self._clients.get(thishost)
if clnt is None:
LOG.debug("send_msg_to_agent: Agent on %s is not active.",
thishost)
return
context = {'type': msg_type}
thismsg = clnt.make_msg('send_msg_to_agent', context, msg=msg)
resp = clnt.call(thismsg)
LOG.debug("send_msg_to_agent: resp = %s", resp)

Some files were not shown because too many files have changed in this diff Show More