Merge tag '4.4+dfsg1' into debian/newton

This commit is contained in:
Thomas Goirand 2016-07-15 16:15:41 +02:00
commit 3b16ee402b
338 changed files with 23994 additions and 10197 deletions

View File

@ -10,6 +10,6 @@
# W0614: Unused import %s from wildcard import
# R0801: Similar lines in %s files
disable=C0111,W0511,W0142,E0602,C0103,E1101,R0903,W0614,R0801
output-format=parseable
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
reports=yes
files-output=no

View File

@ -1,17 +1,21 @@
language: python
python:
- "2.7"
- "3.5" # Python 3.5 still needs to be installed on Travis-CI
env:
- TOX_ENV=py26
- TOX_ENV=py27
- TOX_ENV=py34
- TOX_ENV=py35
- TOX_ENV=pypy26
- TOX_ENV=pep8
install:
- "pip install tox"
- pip install tox coveralls
script:
- NOSE_VERBOSE=0 tox -e $TOX_ENV
after_success:
- coveralls
sudo: false

View File

@ -24,7 +24,7 @@ If you prefer to install Ryu from the source code::
% cd ryu; python ./setup.py install
If you want to write your Ryu application, have a look at
`Writing ryu application <http://ryu.readthedocs.org/en/latest/writing_ryu_app.html>`_ document.
`Writing ryu application <http://ryu.readthedocs.io/en/latest/writing_ryu_app.html>`_ document.
After writing your application, just type::
% ryu-manager yourapp.py
@ -38,11 +38,13 @@ Some functionalities of ryu requires extra packages:
- OF-Config requires lxml
- NETCONF requires paramiko
- BGP speaker (ssh console) requires paramiko
- OVSDB support requires ovs (Note: python 3.4 requires ovs>=2.6.0.dev0)
If you want to use the functionalities, please install requirements::
% pip install lxml
% pip install paramiko
% pip install ovs
Support

File diff suppressed because it is too large Load Diff

View File

@ -132,6 +132,7 @@ html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

View File

@ -9,6 +9,7 @@ Ryu provides some useful library for your network applications.
library_packet.rst
library_packet_ref.rst
library_pcap.rst
library_of_config.rst
library_bgp_speaker.rst
library_bgp_speaker_ref.rst

View File

@ -0,0 +1,27 @@
*****************
PCAP file library
*****************
Introduction
============
Ryu PCAP file library helps you to read/write PCAP file which file
format are described in `The Wireshark Wiki`_.
.. _The Wireshark Wiki: https://wiki.wireshark.org/Development/LibpcapFileFormat
Reading PCAP file
=================
For loading the packet data containing in PCAP files, you can use
pcaplib.Reader.
.. autoclass:: ryu.lib.pcaplib.Reader
Writing PCAP file
=================
For dumping the packet data which your RyuApp received, you can use
pcaplib.Writer.
.. autoclass:: ryu.lib.pcaplib.Writer

View File

@ -27,5 +27,5 @@ but also available in OF1.2+.
Nicira Extended Match Structures
================================
.. automodule:: ryu.ofproto.nx_match
.. automodule:: ryu.ofproto.nicira_ext

View File

@ -82,20 +82,10 @@ For example, EventOFPPacketIn for packet-in message.
The OpenFlow controller part of Ryu automatically decodes OpenFlow messages
received from switches and send these events to Ryu applications which
expressed an interest using ryu.controller.handler.set_ev_cls.
OpenFlow event classes have at least the following attributes.
OpenFlow event classes are subclass of the following class.
.. tabularcolumns:: |l|L|
.. autoclass:: ryu.controller.ofp_event.EventOFPMsgBase
============ =============================================================
Attribute Description
============ =============================================================
msg An object which describes the corresponding OpenFlow message.
msg.datapath A ryu.controller.controller.Datapath instance which describes
an OpenFlow switch from which we received this OpenFlow message.
============ =============================================================
The msg object has some more additional members whose values are extracted
from the original OpenFlow message.
See :ref:`ofproto_ref` for more info about OpenFlow messages.
ryu.base.app_manager.RyuApp
@ -103,267 +93,87 @@ ryu.base.app_manager.RyuApp
See :ref:`api_ref`.
ryu.controller.handler.set_ev_cls(ev_cls, dispatchers=None)
===========================================================
ryu.controller.handler.set_ev_cls
=================================
A decorator for Ryu application to declare an event handler.
Decorated method will become an event handler.
ev_cls is an event class whose instances this RyuApp wants to receive.
dispatchers argument specifies one of the following negotiation phases
(or a list of them) for which events should be generated for this handler.
Note that, in case an event changes the phase, the phase before the change
is used to check the interest.
.. tabularcolumns:: |l|L|
=========================================== ==================================
Negotiation phase Description
=========================================== ==================================
ryu.controller.handler.HANDSHAKE_DISPATCHER Sending and waiting for hello
message
ryu.controller.handler.CONFIG_DISPATCHER Version negotiated and sent
features-request message
ryu.controller.handler.MAIN_DISPATCHER Switch-features message received
and sent set-config message
ryu.controller.handler.DEAD_DISPATCHER Disconnect from the peer. Or
disconnecting due to some
unrecoverable errors.
=========================================== ==================================
.. autofunction:: ryu.controller.handler.set_ev_cls
ryu.controller.controller.Datapath
==================================
A class to describe an OpenFlow switch connected to this controller.
An instance has the following attributes.
.. tabularcolumns:: |l|L|
====================================== =======================================
Attribute Description
====================================== =======================================
id 64-bit OpenFlow Datapath ID.
Only available for
ryu.controller.handler.MAIN_DISPATCHER
phase.
ofproto A module which exports OpenFlow
definitions, mainly constants appeared
in the specification, for the
negotiated OpenFlow version. For
example, ryu.ofproto.ofproto_v1_0 for
OpenFlow 1.0.
ofproto_parser A module which exports OpenFlow wire
message encoder and decoder for the
negotiated OpenFlow version. For
example, ryu.ofproto.ofproto_v1_0_parser
for OpenFlow 1.0.
ofproto_parser.OFPxxxx(datapath, ....) A callable to prepare an OpenFlow
message for the given switch. It can
be sent with Datapath.send_msg later.
xxxx is a name of the message. For
example OFPFlowMod for flow-mod
message. Arguemnts depend on the
message.
set_xid(self, msg) Generate an OpenFlow XID and put it
in msg.xid.
send_msg(self, msg) Queue an OpenFlow message to send to
the corresponding switch. If msg.xid
is None, set_xid is automatically
called on the message before queueing.
send_packet_out deprecated
send_flow_mod deprecated
send_flow_del deprecated
send_delete_all_flows deprecated
send_barrier Queue an OpenFlow barrier message to
send to the switch.
send_nxt_set_flow_format deprecated
is_reserved_port deprecated
====================================== =======================================
.. autoclass:: ryu.controller.controller.Datapath
ryu.controller.event.EventBase
==============================
The base of all event classes.
A Ryu application can define its own event type by creating a subclass.
.. autoclass:: ryu.controller.event.EventBase
ryu.controller.event.EventRequestBase
=====================================
The base class for synchronous request for RyuApp.send_request.
.. autoclass:: ryu.controller.event.EventRequestBase
ryu.controller.event.EventReplyBase
===================================
The base class for synchronous request reply for RyuApp.send_reply.
.. autoclass:: ryu.controller.event.EventReplyBase
ryu.controller.ofp_event.EventOFPStateChange
============================================
An event class for negotiation phase change notification.
An instance of this class is sent to observer after changing
the negotiation phase.
An instance has at least the following attributes.
.. autoclass:: ryu.controller.ofp_event.EventOFPStateChange
========= ====================================================================
Attribute Description
========= ====================================================================
datapath ryu.controller.controller.Datapath instance of the switch
========= ====================================================================
ryu.controller.ofp_event.EventOFPPortStateChange
================================================
.. autoclass:: ryu.controller.ofp_event.EventOFPPortStateChange
ryu.controller.dpset.EventDP
============================
An event class to notify connect/disconnect of a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPStateChange.
An instance has at least the following attributes.
========= ====================================================================
Attribute Description
========= ====================================================================
dp A ryu.controller.controller.Datapath instance of the switch
enter True when the switch connected to our controller. False for
disconnect.
========= ====================================================================
.. autoclass:: ryu.controller.dpset.EventDP
ryu.controller.dpset.EventPortAdd
=================================
An event class for switch port status notification.
This event is generated when a new port is added to a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= ====================================================================
Attribute Description
========= ====================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= ====================================================================
.. autoclass:: ryu.controller.dpset.EventPortAdd
ryu.controller.dpset.EventPortDelete
====================================
An event class for switch port status notification.
This event is generated when a port is removed from a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= ====================================================================
Attribute Description
========= ====================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= ====================================================================
.. autoclass:: ryu.controller.dpset.EventPortDelete
ryu.controller.dpset.EventPortModify
====================================
An event class for switch port status notification.
This event is generated when some attribute of a port is changed.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= ====================================================================
Attribute Description
========= ====================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= ====================================================================
.. autoclass:: ryu.controller.dpset.EventPortModify
ryu.controller.network.EventNetworkPort
=======================================
An event class for notification of port arrival and deperture.
This event is generated when a port is introduced to or removed from a network
by the REST API.
An instance has at least the following attributes.
========== ===================================================================
Attribute Description
========== ===================================================================
network_id Network ID
dpid OpenFlow Datapath ID of the switch to which the port belongs.
port_no OpenFlow port number of the port
add_del True for adding a port. False for removing a port.
========== ===================================================================
.. autoclass:: ryu.controller.network.EventNetworkPort
ryu.controller.network.EventNetworkDel
======================================
An event class for network deletion.
This event is generated when a network is deleted by the REST API.
An instance has at least the following attributes.
========== ===================================================================
Attribute Description
========== ===================================================================
network_id Network ID
========== ===================================================================
.. autoclass:: ryu.controller.network.EventNetworkDel
ryu.controller.network.EventMacAddress
======================================
An event class for end-point MAC address registration.
This event is generated when a end-point MAC address is updated
by the REST API.
An instance has at least the following attributes.
=========== ==================================================================
Attribute Description
=========== ==================================================================
network_id Network ID
dpid OpenFlow Datapath ID of the switch to which the port belongs.
port_no OpenFlow port number of the port
mac_address The old MAC address of the port if add_del is False. Otherwise
the new MAC address.
add_del False if this event is a result of a port removal. Otherwise
True.
=========== ==================================================================
.. autoclass:: ryu.controller.network.EventMacAddress
ryu.controller.tunnels.EventTunnelKeyAdd
========================================
An event class for tunnel key registration.
This event is generated when a tunnel key is registered or updated
by the REST API.
An instance has at least the following attributes.
=========== ==================================================================
Attribute Description
=========== ==================================================================
network_id Network ID
tunnel_key Tunnel Key
=========== ==================================================================
.. autoclass:: ryu.controller.tunnels.EventTunnelKeyAdd
ryu.controller.tunnels.EventTunnelKeyDel
========================================
An event class for tunnel key registration.
This event is generated when a tunnel key is removed by the REST API.
An instance has at least the following attributes.
=========== ==================================================================
Attribute Description
=========== ==================================================================
network_id Network ID
tunnel_key Tunnel Key
=========== ==================================================================
.. autoclass:: ryu.controller.tunnels.EventTunnelKeyDel
ryu.controller.tunnels.EventTunnelPort
======================================
An event class for tunnel port registration.
This event is generated when a tunnel port is added or removed by the REST API.
An instance has at least the following attributes.
=========== ==================================================================
Attribute Description
=========== ==================================================================
dpid OpenFlow Datapath ID
port_no OpenFlow port number
remote_dpid OpenFlow port number of the tunnel peer
add_del True for adding a tunnel. False for removal.
=========== ==================================================================
.. autoclass:: ryu.controller.tunnels.EventTunnelPort

View File

@ -14,5 +14,5 @@
# limitations under the License.
version_info = (3, 30)
version_info = (4, 4)
version = '.'.join(map(str, version_info))

View File

@ -0,0 +1,101 @@
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class ExampleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ExampleSwitch13, self).__init__(*args, **kwargs)
# initialize mac address table.
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install the table-miss flow entry.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# construct flow_mod message and send it.
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# get Datapath ID to identify OpenFlow switches.
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
# analyse the received packets using the packet library.
pkt = packet.Packet(msg.data)
eth_pkt = pkt.get_protocol(ethernet.ethernet)
dst = eth_pkt.dst
src = eth_pkt.src
# get the received port number from packet_in message.
in_port = msg.match['in_port']
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
# if the destination mac address is already learned,
# decide which port to output the packet, otherwise FLOOD.
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
# construct action list.
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time.
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
# construct packet_out message and send it.
out = parser.OFPPacketOut(datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=in_port, actions=actions,
data=msg.data)
datapath.send_msg(out)

File diff suppressed because it is too large Load Diff

View File

@ -111,7 +111,11 @@ class ConfSwitchController(ControllerBase):
def set_key(self, req, dpid, key, **_kwargs):
def _set_val(dpid, key):
val = json.loads(req.body)
try:
val = req.json if req.body else {}
except ValueError:
return Response(status=http_client.BAD_REQUEST,
body='invalid syntax %s' % req.body)
self.conf_switch.set_key(dpid, key, val)
return None

View File

@ -492,8 +492,8 @@ class FirewallController(ControllerBase):
def _set_rule(self, req, switchid, vlan_id=VLANID_NONE):
try:
rule = json.loads(req.body)
except SyntaxError:
rule = req.json if req.body else {}
except ValueError:
FirewallController._LOGGER.debug('invalid syntax %s', req.body)
return Response(status=400)
@ -516,8 +516,8 @@ class FirewallController(ControllerBase):
def _delete_rule(self, req, switchid, vlan_id=VLANID_NONE):
try:
ruleid = json.loads(req.body)
except SyntaxError:
ruleid = req.json if req.body else {}
except ValueError:
FirewallController._LOGGER.debug('invalid syntax %s', req.body)
return Response(status=400)

View File

@ -506,8 +506,8 @@ class QoSController(ControllerBase):
def _access_switch(self, req, switchid, vlan_id, func, waiters):
try:
rest = json.loads(req.body) if req.body else {}
except SyntaxError:
rest = req.json if req.body else {}
except ValueError:
QoSController._LOGGER.debug('invalid syntax %s', req.body)
return Response(status=400)

View File

@ -376,42 +376,45 @@ class RouterController(ControllerBase):
@rest_command
def get_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'get_data', req.body)
'get_data', req)
# GET /router/{switch_id}/{vlan_id}
@rest_command
def get_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'get_data', req.body)
'get_data', req)
# POST /router/{switch_id}
@rest_command
def set_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'set_data', req.body)
'set_data', req)
# POST /router/{switch_id}/{vlan_id}
@rest_command
def set_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'set_data', req.body)
'set_data', req)
# DELETE /router/{switch_id}
@rest_command
def delete_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'delete_data', req.body)
'delete_data', req)
# DELETE /router/{switch_id}/{vlan_id}
@rest_command
def delete_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'delete_data', req.body)
'delete_data', req)
def _access_router(self, switch_id, vlan_id, func, rest_param):
def _access_router(self, switch_id, vlan_id, func, req):
rest_message = []
routers = self._get_router(switch_id)
param = json.loads(rest_param) if rest_param else {}
try:
param = req.json if req.body else {}
except ValueError:
raise SyntaxError('invalid syntax %s', req.body)
for router in routers.values():
function = getattr(router, func)
data = function(vlan_id, param, self.waiters)

View File

@ -158,6 +158,7 @@ class RyuApp(object):
self.threads = []
self.main_thread = None
self.events = hub.Queue(128)
self._events_sem = hub.BoundedSemaphore(self.events.maxsize)
if hasattr(self.__class__, 'LOGGER_NAME'):
self.logger = logging.getLogger(self.__class__.LOGGER_NAME)
else:
@ -280,13 +281,25 @@ class RyuApp(object):
def _event_loop(self):
while self.is_active or not self.events.empty():
ev, state = self.events.get()
self._events_sem.release()
if ev == self._event_stop:
continue
handlers = self.get_handlers(ev, state)
for handler in handlers:
handler(ev)
try:
handler(ev)
except hub.TaskExit:
# Normal exit.
# Propagate upwards, so we leave the event loop.
raise
except:
LOG.exception('%s: Exception occurred during handler processing. '
'Backtrace from offending handler '
'[%s] servicing event [%s] follows.',
self.name, handler.__name__, ev.__class__.__name__)
def _send_event(self, ev, state):
self._events_sem.acquire()
self.events.put((ev, state))
def send_event(self, name, ev, state=None):
@ -336,7 +349,7 @@ class RyuApp(object):
class AppManager(object):
# singletone
# singleton
_instance = None
@staticmethod
@ -373,6 +386,7 @@ class AppManager(object):
self.applications = {}
self.contexts_cls = {}
self.contexts = {}
self.close_sem = hub.Semaphore()
def load_app(self, name):
mod = utils.import_module(name)
@ -520,7 +534,7 @@ class AppManager(object):
self._close(app)
events = app.events
if not events.empty():
app.logger.debug('%s events remians %d', app.name, events.qsize())
app.logger.debug('%s events remains %d', app.name, events.qsize())
def close(self):
def close_all(close_dict):
@ -528,7 +542,10 @@ class AppManager(object):
self._close(app)
close_dict.clear()
for app_name in list(self.applications.keys()):
self.uninstantiate(app_name)
assert not self.applications
close_all(self.contexts)
# This semaphore prevents parallel execution of this function,
# as run_apps's finally clause starts another close() call.
with self.close_sem:
for app_name in list(self.applications.keys()):
self.uninstantiate(app_name)
assert not self.applications
close_all(self.contexts)

View File

@ -1,7 +1,9 @@
import sys
_orig_sys_path = None
def update_module_path():
# Adjust module loading path for third party libraries
import os
@ -16,6 +18,7 @@ def update_module_path():
sys.path.remove(path)
sys.path.insert(0, path) # prioritize our own copy than system's
def restore_module_path():
global _orig_sys_path

View File

@ -1 +0,0 @@
# This file intentionally left blank.

View File

@ -1,537 +0,0 @@
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import resource
import signal
import sys
import time
import ovs.dirs
import ovs.fatal_signal
#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
import ovs.util
import ovs.vlog
vlog = ovs.vlog.Vlog("daemon")
# --detach: Should we run in the background?
_detach = False
# --pidfile: Name of pidfile (null if none).
_pidfile = None
# Our pidfile's inode and device, if we have created one.
_pidfile_dev = None
_pidfile_ino = None
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
_overwrite_pidfile = False
# --no-chdir: Should we chdir to "/"?
_chdir = True
# --monitor: Should a supervisory process monitor the daemon and restart it if
# it dies due to an error signal?
_monitor = False
# File descriptor used by daemonize_start() and daemonize_complete().
_daemonize_fd = None
RESTART_EXIT_CODE = 5
def make_pidfile_name(name):
"""Returns the file name that would be used for a pidfile if 'name' were
provided to set_pidfile()."""
if name is None or name == "":
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
else:
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
def set_pidfile(name):
"""Sets up a following call to daemonize() to create a pidfile named
'name'. If 'name' begins with '/', then it is treated as an absolute path.
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
$(prefix)/var/run by default.
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
used."""
global _pidfile
_pidfile = make_pidfile_name(name)
def get_pidfile():
"""Returns an absolute path to the configured pidfile, or None if no
pidfile is configured."""
return _pidfile
def set_no_chdir():
"""Sets that we do not chdir to "/"."""
global _chdir
_chdir = False
def is_chdir_enabled():
"""Will we chdir to "/" as part of daemonizing?"""
return _chdir
def ignore_existing_pidfile():
"""Normally, daemonize() or daemonize_start() will terminate the program
with a message if a locked pidfile already exists. If this function is
called, an existing pidfile will be replaced, with a warning."""
global _overwrite_pidfile
_overwrite_pidfile = True
def set_detach():
"""Sets up a following call to daemonize() to detach from the foreground
session, running this process in the background."""
global _detach
_detach = True
def get_detach():
"""Will daemonize() really detach?"""
return _detach
def set_monitor():
"""Sets up a following call to daemonize() to fork a supervisory process to
monitor the daemon and restart it if it dies due to an error signal."""
global _monitor
_monitor = True
def _fatal(msg):
vlog.err(msg)
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def _make_pidfile():
"""If a pidfile has been configured, creates it and stores the running
process's pid in it. Ensures that the pidfile will be deleted when the
process exits."""
pid = os.getpid()
# Create a temporary pidfile.
tmpfile = "%s.tmp%d" % (_pidfile, pid)
ovs.fatal_signal.add_file_to_unlink(tmpfile)
try:
# This is global to keep Python from garbage-collecting and
# therefore closing our file after this function exits. That would
# unlock the lock for us, and we don't want that.
global file_handle
file_handle = open(tmpfile, "w")
except IOError, e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
except IOError, e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
except OSError, e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
except OSError, e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
while True:
try:
os.link(tmpfile, _pidfile)
error = 0
except OSError, e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
elif error != errno.EINTR:
break
if error:
_fatal("failed to link \"%s\" as \"%s\" (%s)"
% (tmpfile, _pidfile, os.strerror(error)))
# Ensure that the pidfile will get deleted on exit.
ovs.fatal_signal.add_file_to_unlink(_pidfile)
# Delete the temporary pidfile if it still exists.
if not _overwrite_pidfile:
error = ovs.fatal_signal.unlink_file_now(tmpfile)
if error:
_fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error)))
global _pidfile_dev
global _pidfile_ino
_pidfile_dev = s.st_dev
_pidfile_ino = s.st_ino
def daemonize():
"""If configured with set_pidfile() or set_detach(), creates the pid file
and detaches from the foreground session."""
daemonize_start()
daemonize_complete()
def _waitpid(pid, options):
while True:
try:
return os.waitpid(pid, options)
except OSError, e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
except OSError, e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
if pid > 0:
# Running in parent process.
os.close(wfd)
ovs.fatal_signal.fork()
while True:
try:
s = os.read(rfd, 1)
error = 0
except OSError, e:
s = ""
error = e.errno
if error != errno.EINTR:
break
if len(s) != 1:
retval, status = _waitpid(pid, 0)
if retval == pid:
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
# Child exited with an error. Convey the same error to
# our parent process as a courtesy.
sys.exit(os.WEXITSTATUS(status))
else:
sys.stderr.write("fork child failed to signal "
"startup (%s)\n"
% ovs.process.status_msg(status))
else:
assert retval < 0
sys.stderr.write("waitpid failed (%s)\n"
% os.strerror(-retval))
sys.exit(1)
os.close(rfd)
else:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
#ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
return pid
def _fork_notify_startup(fd):
if fd is not None:
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
if error:
sys.stderr.write("could not write to pipe\n")
sys.exit(1)
os.close(fd)
def _should_restart(status):
global RESTART_EXIT_CODE
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
return True
if os.WIFSIGNALED(status):
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
if os.WTERMSIG(status) == getattr(signal, signame, None):
return True
return False
def _monitor_daemon(daemon_pid):
# XXX should log daemon's stderr output at startup time
# XXX should use setproctitle module if available
last_restart = None
while True:
retval, status = _waitpid(daemon_pid, 0)
if retval < 0:
sys.stderr.write("waitpid failed\n")
sys.exit(1)
elif retval == daemon_pid:
status_msg = ("pid %d died, %s"
% (daemon_pid, ovs.process.status_msg(status)))
if _should_restart(status):
if os.WCOREDUMP(status):
# Disable further core dumps to save disk space.
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except resource.error:
vlog.warn("failed to disable core dumps")
# Throttle restarts to no more than once every 10 seconds.
if (last_restart is not None and
ovs.timeval.msec() < last_restart + 10000):
vlog.warn("%s, waiting until 10 seconds since last "
"restart" % status_msg)
while True:
now = ovs.timeval.msec()
wakeup = last_restart + 10000
if now > wakeup:
break
print "sleep %f" % ((wakeup - now) / 1000.0)
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.err("%s, restarting" % status_msg)
daemon_pid = _fork_and_wait_for_startup()
if not daemon_pid:
break
else:
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
# Running in new daemon process.
def _close_standard_fds():
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
then this keeps us from holding that session open artificially."""
null_fd = ovs.socket_util.get_null_fd()
if null_fd >= 0:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
def daemonize_start():
"""If daemonization is configured, then starts daemonization, by forking
and returning in the child process. The parent process hangs around until
the child lets it know either that it completed startup successfully (by
calling daemon_complete()) or that it failed to start up (by exiting with a
nonzero exit code)."""
if _detach:
if _fork_and_wait_for_startup() > 0:
# Running in parent process.
sys.exit(0)
# Running in daemon or monitor process.
if _monitor:
saved_daemonize_fd = _daemonize_fd
daemon_pid = _fork_and_wait_for_startup()
if daemon_pid > 0:
# Running in monitor process.
_fork_notify_startup(saved_daemonize_fd)
_close_standard_fds()
_monitor_daemon(daemon_pid)
# Running in daemon process
if _pidfile:
_make_pidfile()
def daemonize_complete():
"""If daemonization is configured, then this function notifies the parent
process that the child process has completed startup successfully."""
_fork_notify_startup(_daemonize_fd)
if _detach:
os.setsid()
if _chdir:
os.chdir("/")
_close_standard_fds()
def usage():
sys.stdout.write("""
Daemon options:
--detach run in background as daemon
--no-chdir do not chdir to '/'
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
--overwrite-pidfile with --pidfile, start even if already running
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
def __read_pidfile(pidfile, delete_if_stale):
if _pidfile_dev is not None:
try:
s = os.stat(pidfile)
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
# It's our own pidfile. We can't afford to open it,
# because closing *any* fd for a file that a process
# has locked also releases all the locks on that file.
#
# Fortunately, we know the associated pid anyhow.
return os.getpid()
except OSError:
pass
try:
file_handle = open(pidfile, "r+")
except IOError, e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
return -e.errno
# Python fcntl doesn't directly support F_GETLK so we have to just try
# to lock it.
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# pidfile exists but wasn't locked by anyone. Now we have the lock.
if not delete_if_stale:
file_handle.close()
vlog.warn("%s: pid file is stale" % pidfile)
return -errno.ESRCH
# Is the file we have locked still named 'pidfile'?
try:
raced = False
s = os.stat(pidfile)
s2 = os.fstat(file_handle.fileno())
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
raced = True
except IOError:
raced = True
if raced:
vlog.warn("%s: lost race to delete pidfile" % pidfile)
return -errno.EALREADY
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
except IOError, e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
else:
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
except IOError, e:
if e.errno not in [errno.EACCES, errno.EAGAIN]:
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
# Someone else has the pidfile locked.
try:
try:
error = int(file_handle.readline())
except IOError, e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
vlog.warn("%s does not contain a pid" % pidfile)
error = -errno.EINVAL
return error
finally:
try:
file_handle.close()
except IOError:
pass
def read_pidfile(pidfile):
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
successful, otherwise a negative errno value."""
return __read_pidfile(pidfile, False)
def _check_already_running():
pid = __read_pidfile(_pidfile, True)
if pid > 0:
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
elif pid < 0:
_fatal("%s: pidfile check failed (%s), aborting"
% (_pidfile, os.strerror(pid)))
def add_args(parser):
"""Populates 'parser', an ArgumentParser allocated using the argparse
module, with the command line arguments required by the daemon module."""
pidfile = make_pidfile_name(None)
group = parser.add_argument_group(title="Daemon Options")
group.add_argument("--detach", action="store_true",
help="Run in background as a daemon.")
group.add_argument("--no-chdir", action="store_true",
help="Do not chdir to '/'.")
group.add_argument("--monitor", action="store_true",
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
group.add_argument("--pidfile", nargs="?", const=pidfile,
help="Create pidfile (default %s)." % pidfile)
group.add_argument("--overwrite-pidfile", action="store_true",
help="With --pidfile, start even if already running.")
def handle_args(args):
"""Handles daemon module settings in 'args'. 'args' is an object
containing values parsed by the parse_args() method of ArgumentParser. The
parent ArgumentParser should have been prepared by add_args() before
calling parse_args()."""
if args.detach:
set_detach()
if args.no_chdir:
set_no_chdir()
if args.pidfile:
set_pidfile(args.pidfile)
if args.overwrite_pidfile:
ignore_existing_pidfile()
if args.monitor:
set_monitor()

View File

@ -1 +0,0 @@
# This file intentionally left blank.

View File

@ -1,547 +0,0 @@
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
import ovs.poller
import ovs.socket_util
import ovs.json
import ovs.jsonrpc
import ovs.ovsuuid
import ovs.db.parser
from ovs.db import error
import ovs.db.types
class ConstraintViolation(error.Error):
def __init__(self, msg, json=None):
error.Error.__init__(self, msg, json, tag="constraint violation")
def escapeCString(src):
dst = []
for c in src:
if c in "\\\"":
dst.append("\\" + c)
elif ord(c) < 32:
if c == '\n':
dst.append('\\n')
elif c == '\r':
dst.append('\\r')
elif c == '\a':
dst.append('\\a')
elif c == '\b':
dst.append('\\b')
elif c == '\f':
dst.append('\\f')
elif c == '\t':
dst.append('\\t')
elif c == '\v':
dst.append('\\v')
else:
dst.append('\\%03o' % ord(c))
else:
dst.append(c)
return ''.join(dst)
def returnUnchanged(x):
return x
class Atom(object):
def __init__(self, type_, value=None):
self.type = type_
if value is not None:
self.value = value
else:
self.value = type_.default_atom()
def __cmp__(self, other):
if not isinstance(other, Atom) or self.type != other.type:
return NotImplemented
elif self.value < other.value:
return -1
elif self.value > other.value:
return 1
else:
return 0
def __hash__(self):
return hash(self.value)
@staticmethod
def default(type_):
"""Returns the default value for the given type_, which must be an
instance of ovs.db.types.AtomicType.
The default value for each atomic type is;
- 0, for integer or real atoms.
- False, for a boolean atom.
- "", for a string atom.
- The all-zeros UUID, for a UUID atom."""
return Atom(type_)
def is_default(self):
return self == self.default(self.type)
@staticmethod
def from_json(base, json, symtab=None):
type_ = base.type
json = ovs.db.parser.float_to_int(json)
if ((type_ == ovs.db.types.IntegerType and type(json) in [int, long])
or (type_ == ovs.db.types.RealType
and type(json) in [int, long, float])
or (type_ == ovs.db.types.BooleanType and type(json) == bool)
or (type_ == ovs.db.types.StringType
and type(json) in [str, unicode])):
atom = Atom(type_, json)
elif type_ == ovs.db.types.UuidType:
atom = Atom(type_, ovs.ovsuuid.from_json(json, symtab))
else:
raise error.Error("expected %s" % type_.to_string(), json)
atom.check_constraints(base)
return atom
@staticmethod
def from_python(base, value):
value = ovs.db.parser.float_to_int(value)
if type(value) in base.type.python_types:
atom = Atom(base.type, value)
else:
raise error.Error("expected %s, got %s" % (base.type, type(value)))
atom.check_constraints(base)
return atom
def check_constraints(self, base):
"""Checks whether 'atom' meets the constraints (if any) defined in
'base' and raises an ovs.db.error.Error if any constraint is violated.
'base' and 'atom' must have the same type.
Checking UUID constraints is deferred to transaction commit time, so
this function does nothing for UUID constraints."""
assert base.type == self.type
if base.enum is not None and self not in base.enum:
raise ConstraintViolation(
"%s is not one of the allowed values (%s)"
% (self.to_string(), base.enum.to_string()))
elif base.type in [ovs.db.types.IntegerType, ovs.db.types.RealType]:
if ((base.min is None or self.value >= base.min) and
(base.max is None or self.value <= base.max)):
pass
elif base.min is not None and base.max is not None:
raise ConstraintViolation(
"%s is not in the valid range %.15g to %.15g (inclusive)"
% (self.to_string(), base.min, base.max))
elif base.min is not None:
raise ConstraintViolation(
"%s is less than minimum allowed value %.15g"
% (self.to_string(), base.min))
else:
raise ConstraintViolation(
"%s is greater than maximum allowed value %.15g"
% (self.to_string(), base.max))
elif base.type == ovs.db.types.StringType:
# XXX The C version validates that the string is valid UTF-8 here.
# Do we need to do that in Python too?
s = self.value
length = len(s)
if length < base.min_length:
raise ConstraintViolation(
'"%s" length %d is less than minimum allowed length %d'
% (s, length, base.min_length))
elif length > base.max_length:
raise ConstraintViolation(
'"%s" length %d is greater than maximum allowed '
'length %d' % (s, length, base.max_length))
def to_json(self):
if self.type == ovs.db.types.UuidType:
return ovs.ovsuuid.to_json(self.value)
else:
return self.value
def cInitAtom(self, var):
if self.type == ovs.db.types.IntegerType:
return ['%s.integer = %d;' % (var, self.value)]
elif self.type == ovs.db.types.RealType:
return ['%s.real = %.15g;' % (var, self.value)]
elif self.type == ovs.db.types.BooleanType:
if self.value:
return ['%s.boolean = true;']
else:
return ['%s.boolean = false;']
elif self.type == ovs.db.types.StringType:
return ['%s.string = xstrdup("%s");'
% (var, escapeCString(self.value))]
elif self.type == ovs.db.types.UuidType:
return ovs.ovsuuid.to_c_assignment(self.value, var)
def toEnglish(self, escapeLiteral=returnUnchanged):
if self.type == ovs.db.types.IntegerType:
return '%d' % self.value
elif self.type == ovs.db.types.RealType:
return '%.15g' % self.value
elif self.type == ovs.db.types.BooleanType:
if self.value:
return 'true'
else:
return 'false'
elif self.type == ovs.db.types.StringType:
return escapeLiteral(self.value)
elif self.type == ovs.db.types.UuidType:
return self.value.value
__need_quotes_re = re.compile("$|true|false|[^_a-zA-Z]|.*[^-._a-zA-Z]")
@staticmethod
def __string_needs_quotes(s):
return Atom.__need_quotes_re.match(s)
def to_string(self):
if self.type == ovs.db.types.IntegerType:
return '%d' % self.value
elif self.type == ovs.db.types.RealType:
return '%.15g' % self.value
elif self.type == ovs.db.types.BooleanType:
if self.value:
return 'true'
else:
return 'false'
elif self.type == ovs.db.types.StringType:
if Atom.__string_needs_quotes(self.value):
return ovs.json.to_string(self.value)
else:
return self.value
elif self.type == ovs.db.types.UuidType:
return str(self.value)
@staticmethod
def new(x):
if type(x) in [int, long]:
t = ovs.db.types.IntegerType
elif type(x) == float:
t = ovs.db.types.RealType
elif x in [False, True]:
t = ovs.db.types.BooleanType
elif type(x) in [str, unicode]:
t = ovs.db.types.StringType
elif isinstance(x, uuid):
t = ovs.db.types.UuidType
else:
raise TypeError
return Atom(t, x)
class Datum(object):
def __init__(self, type_, values={}):
self.type = type_
self.values = values
def __cmp__(self, other):
if not isinstance(other, Datum):
return NotImplemented
elif self.values < other.values:
return -1
elif self.values > other.values:
return 1
else:
return 0
__hash__ = None
def __contains__(self, item):
return item in self.values
def copy(self):
return Datum(self.type, dict(self.values))
@staticmethod
def default(type_):
if type_.n_min == 0:
values = {}
elif type_.is_map():
values = {type_.key.default(): type_.value.default()}
else:
values = {type_.key.default(): None}
return Datum(type_, values)
def is_default(self):
return self == Datum.default(self.type)
def check_constraints(self):
"""Checks that each of the atoms in 'datum' conforms to the constraints
specified by its 'type' and raises an ovs.db.error.Error.
This function is not commonly useful because the most ordinary way to
obtain a datum is ultimately via Datum.from_json() or Atom.from_json(),
which check constraints themselves."""
for keyAtom, valueAtom in self.values.iteritems():
keyAtom.check_constraints(self.type.key)
if valueAtom is not None:
valueAtom.check_constraints(self.type.value)
@staticmethod
def from_json(type_, json, symtab=None):
"""Parses 'json' as a datum of the type described by 'type'. If
successful, returns a new datum. On failure, raises an
ovs.db.error.Error.
Violations of constraints expressed by 'type' are treated as errors.
If 'symtab' is nonnull, then named UUIDs in 'symtab' are accepted.
Refer to ovsdb/SPECS for information about this, and for the syntax
that this function accepts."""
is_map = type_.is_map()
if (is_map or
(type(json) == list and len(json) > 0 and json[0] == "set")):
if is_map:
class_ = "map"
else:
class_ = "set"
inner = ovs.db.parser.unwrap_json(json, class_, [list, tuple],
"array")
n = len(inner)
if n < type_.n_min or n > type_.n_max:
raise error.Error("%s must have %d to %d members but %d are "
"present" % (class_, type_.n_min,
type_.n_max, n),
json)
values = {}
for element in inner:
if is_map:
key, value = ovs.db.parser.parse_json_pair(element)
keyAtom = Atom.from_json(type_.key, key, symtab)
valueAtom = Atom.from_json(type_.value, value, symtab)
else:
keyAtom = Atom.from_json(type_.key, element, symtab)
valueAtom = None
if keyAtom in values:
if is_map:
raise error.Error("map contains duplicate key")
else:
raise error.Error("set contains duplicate")
values[keyAtom] = valueAtom
return Datum(type_, values)
else:
keyAtom = Atom.from_json(type_.key, json, symtab)
return Datum(type_, {keyAtom: None})
def to_json(self):
if self.type.is_map():
return ["map", [[k.to_json(), v.to_json()]
for k, v in sorted(self.values.items())]]
elif len(self.values) == 1:
key = self.values.keys()[0]
return key.to_json()
else:
return ["set", [k.to_json() for k in sorted(self.values.keys())]]
def to_string(self):
head = tail = None
if self.type.n_max > 1 or len(self.values) == 0:
if self.type.is_map():
head = "{"
tail = "}"
else:
head = "["
tail = "]"
s = []
if head:
s.append(head)
for i, key in enumerate(sorted(self.values)):
if i:
s.append(", ")
s.append(key.to_string())
if self.type.is_map():
s.append("=")
s.append(self.values[key].to_string())
if tail:
s.append(tail)
return ''.join(s)
def as_list(self):
if self.type.is_map():
return [[k.value, v.value] for k, v in self.values.iteritems()]
else:
return [k.value for k in self.values.iterkeys()]
def as_dict(self):
return dict(self.values)
def as_scalar(self):
if len(self.values) == 1:
if self.type.is_map():
k, v = self.values.iteritems()[0]
return [k.value, v.value]
else:
return self.values.keys()[0].value
else:
return None
def to_python(self, uuid_to_row):
"""Returns this datum's value converted into a natural Python
representation of this datum's type, according to the following
rules:
- If the type has exactly one value and it is not a map (that is,
self.type.is_scalar() returns True), then the value is:
* An int or long, for an integer column.
* An int or long or float, for a real column.
* A bool, for a boolean column.
* A str or unicode object, for a string column.
* A uuid.UUID object, for a UUID column without a ref_table.
* An object represented the referenced row, for a UUID column with
a ref_table. (For the Idl, this object will be an ovs.db.idl.Row
object.)
If some error occurs (e.g. the database server's idea of the column
is different from the IDL's idea), then the default value for the
scalar type is used (see Atom.default()).
- Otherwise, if the type is not a map, then the value is a Python list
whose elements have the types described above.
- Otherwise, the type is a map, and the value is a Python dict that
maps from key to value, with key and value types determined as
described above.
'uuid_to_row' must be a function that takes a value and an
ovs.db.types.BaseType and translates UUIDs into row objects."""
if self.type.is_scalar():
value = uuid_to_row(self.as_scalar(), self.type.key)
if value is None:
return self.type.key.default()
else:
return value
elif self.type.is_map():
value = {}
for k, v in self.values.iteritems():
dk = uuid_to_row(k.value, self.type.key)
dv = uuid_to_row(v.value, self.type.value)
if dk is not None and dv is not None:
value[dk] = dv
return value
else:
s = set()
for k in self.values:
dk = uuid_to_row(k.value, self.type.key)
if dk is not None:
s.add(dk)
return sorted(s)
@staticmethod
def from_python(type_, value, row_to_uuid):
"""Returns a new Datum with the given ovs.db.types.Type 'type_'. The
new datum's value is taken from 'value', which must take the form
described as a valid return value from Datum.to_python() for 'type'.
Each scalar value within 'value' is initally passed through
'row_to_uuid', which should convert objects that represent rows (if
any) into uuid.UUID objects and return other data unchanged.
Raises ovs.db.error.Error if 'value' is not in an appropriate form for
'type_'."""
d = {}
if type(value) == dict:
for k, v in value.iteritems():
ka = Atom.from_python(type_.key, row_to_uuid(k))
va = Atom.from_python(type_.value, row_to_uuid(v))
d[ka] = va
elif type(value) in (list, tuple):
for k in value:
ka = Atom.from_python(type_.key, row_to_uuid(k))
d[ka] = None
else:
ka = Atom.from_python(type_.key, row_to_uuid(value))
d[ka] = None
datum = Datum(type_, d)
datum.check_constraints()
if not datum.conforms_to_type():
raise error.Error("%d values when type requires between %d and %d"
% (len(d), type_.n_min, type_.n_max))
return datum
def __getitem__(self, key):
if not isinstance(key, Atom):
key = Atom.new(key)
if not self.type.is_map():
raise IndexError
elif key not in self.values:
raise KeyError
else:
return self.values[key].value
def get(self, key, default=None):
if not isinstance(key, Atom):
key = Atom.new(key)
if key in self.values:
return self.values[key].value
else:
return default
def __str__(self):
return self.to_string()
def conforms_to_type(self):
n = len(self.values)
return self.type.n_min <= n <= self.type.n_max
def cInitDatum(self, var):
if len(self.values) == 0:
return ["ovsdb_datum_init_empty(%s);" % var]
s = ["%s->n = %d;" % (var, len(self.values))]
s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);"
% (var, len(self.values), var)]
for i, key in enumerate(sorted(self.values)):
s += key.cInitAtom("%s->keys[%d]" % (var, i))
if self.type.value:
s += ["%s->values = xmalloc(%d * sizeof *%s->values);"
% (var, len(self.values), var)]
for i, (key, value) in enumerate(sorted(self.values.items())):
s += value.cInitAtom("%s->values[%d]" % (var, i))
else:
s += ["%s->values = NULL;" % var]
if len(self.values) > 1:
s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);"
% (var, self.type.key.type.to_string().upper())]
return s

View File

@ -1,34 +0,0 @@
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ovs.json
class Error(Exception):
def __init__(self, msg, json=None, tag=None):
self.msg = msg
self.json = json
if tag is None:
if json is None:
self.tag = "ovsdb error"
else:
self.tag = "syntax error"
else:
self.tag = tag
# Compose message.
syntax = ""
if self.json is not None:
syntax = 'syntax "%s": ' % ovs.json.to_string(self.json)
Exception.__init__(self, "%s%s: %s" % (syntax, self.tag, self.msg))

File diff suppressed because it is too large Load Diff

View File

@ -1,109 +0,0 @@
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ovs.db import error
class Parser(object):
def __init__(self, json, name):
self.name = name
self.json = json
if type(json) != dict:
self.__raise_error("Object expected.")
self.used = set()
def __get(self, name, types, optional, default=None):
if name in self.json:
self.used.add(name)
member = float_to_int(self.json[name])
if is_identifier(member) and "id" in types:
return member
if len(types) and type(member) not in types:
self.__raise_error("Type mismatch for member '%s'." % name)
return member
else:
if not optional:
self.__raise_error("Required '%s' member is missing." % name)
return default
def get(self, name, types):
return self.__get(name, types, False)
def get_optional(self, name, types, default=None):
return self.__get(name, types, True, default)
def __raise_error(self, message):
raise error.Error("Parsing %s failed: %s" % (self.name, message),
self.json)
def finish(self):
missing = set(self.json) - set(self.used)
if missing:
name = missing.pop()
if len(missing) > 1:
present = "and %d other members are" % len(missing)
elif missing:
present = "and 1 other member are"
else:
present = "is"
self.__raise_error("Member '%s' %s present but not allowed here" %
(name, present))
def float_to_int(x):
# XXX still needed?
if type(x) == float:
integer = int(x)
if integer == x and -2 ** 53 <= integer < 2 ** 53:
return integer
return x
id_re = re.compile("[_a-zA-Z][_a-zA-Z0-9]*$")
def is_identifier(s):
return type(s) in [str, unicode] and id_re.match(s)
def json_type_to_string(type_):
if type_ == None:
return "null"
elif type_ == bool:
return "boolean"
elif type_ == dict:
return "object"
elif type_ == list:
return "array"
elif type_ in [int, long, float]:
return "number"
elif type_ in [str, unicode]:
return "string"
else:
return "<invalid>"
def unwrap_json(json, name, types, desc):
if (type(json) not in (list, tuple) or len(json) != 2 or json[0] != name or
type(json[1]) not in types):
raise error.Error('expected ["%s", <%s>]' % (name, desc), json)
return json[1]
def parse_json_pair(json):
if type(json) != list or len(json) != 2:
raise error.Error("expected 2-element array", json)
return json

View File

@ -1,271 +0,0 @@
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from ovs.db import error
import ovs.db.parser
from ovs.db import types
def _check_id(name, json):
if name.startswith('_'):
raise error.Error('names beginning with "_" are reserved', json)
elif not ovs.db.parser.is_identifier(name):
raise error.Error("name must be a valid id", json)
class DbSchema(object):
"""Schema for an OVSDB database."""
def __init__(self, name, version, tables):
self.name = name
self.version = version
self.tables = tables
# "isRoot" was not part of the original schema definition. Before it
# was added, there was no support for garbage collection. So, for
# backward compatibility, if the root set is empty then assume that
# every table is in the root set.
if self.__root_set_size() == 0:
for table in self.tables.itervalues():
table.is_root = True
# Find the "ref_table"s referenced by "ref_table_name"s.
#
# Also force certain columns to be persistent, as explained in
# __check_ref_table(). This requires 'is_root' to be known, so this
# must follow the loop updating 'is_root' above.
for table in self.tables.itervalues():
for column in table.columns.itervalues():
self.__follow_ref_table(column, column.type.key, "key")
self.__follow_ref_table(column, column.type.value, "value")
def __root_set_size(self):
"""Returns the number of tables in the schema's root set."""
n_root = 0
for table in self.tables.itervalues():
if table.is_root:
n_root += 1
return n_root
@staticmethod
def from_json(json):
parser = ovs.db.parser.Parser(json, "database schema")
name = parser.get("name", ['id'])
version = parser.get_optional("version", [str, unicode])
parser.get_optional("cksum", [str, unicode])
tablesJson = parser.get("tables", [dict])
parser.finish()
if (version is not None and
not re.match('[0-9]+\.[0-9]+\.[0-9]+$', version)):
raise error.Error('schema version "%s" not in format x.y.z'
% version)
tables = {}
for tableName, tableJson in tablesJson.iteritems():
_check_id(tableName, json)
tables[tableName] = TableSchema.from_json(tableJson, tableName)
return DbSchema(name, version, tables)
def to_json(self):
# "isRoot" was not part of the original schema definition. Before it
# was added, there was no support for garbage collection. So, for
# backward compatibility, if every table is in the root set then do not
# output "isRoot" in table schemas.
default_is_root = self.__root_set_size() == len(self.tables)
tables = {}
for table in self.tables.itervalues():
tables[table.name] = table.to_json(default_is_root)
json = {"name": self.name, "tables": tables}
if self.version:
json["version"] = self.version
return json
def copy(self):
return DbSchema.from_json(self.to_json())
def __follow_ref_table(self, column, base, base_name):
if not base or base.type != types.UuidType or not base.ref_table_name:
return
base.ref_table = self.tables.get(base.ref_table_name)
if not base.ref_table:
raise error.Error("column %s %s refers to undefined table %s"
% (column.name, base_name, base.ref_table_name),
tag="syntax error")
if base.is_strong_ref() and not base.ref_table.is_root:
# We cannot allow a strong reference to a non-root table to be
# ephemeral: if it is the only reference to a row, then replaying
# the database log from disk will cause the referenced row to be
# deleted, even though it did exist in memory. If there are
# references to that row later in the log (to modify it, to delete
# it, or just to point to it), then this will yield a transaction
# error.
column.persistent = True
class IdlSchema(DbSchema):
def __init__(self, name, version, tables, idlPrefix, idlHeader):
DbSchema.__init__(self, name, version, tables)
self.idlPrefix = idlPrefix
self.idlHeader = idlHeader
@staticmethod
def from_json(json):
parser = ovs.db.parser.Parser(json, "IDL schema")
idlPrefix = parser.get("idlPrefix", [str, unicode])
idlHeader = parser.get("idlHeader", [str, unicode])
subjson = dict(json)
del subjson["idlPrefix"]
del subjson["idlHeader"]
schema = DbSchema.from_json(subjson)
return IdlSchema(schema.name, schema.version, schema.tables,
idlPrefix, idlHeader)
def column_set_from_json(json, columns):
if json is None:
return tuple(columns)
elif type(json) != list:
raise error.Error("array of distinct column names expected", json)
else:
for column_name in json:
if type(column_name) not in [str, unicode]:
raise error.Error("array of distinct column names expected",
json)
elif column_name not in columns:
raise error.Error("%s is not a valid column name"
% column_name, json)
if len(set(json)) != len(json):
# Duplicate.
raise error.Error("array of distinct column names expected", json)
return tuple([columns[column_name] for column_name in json])
class TableSchema(object):
def __init__(self, name, columns, mutable=True, max_rows=sys.maxint,
is_root=True, indexes=[]):
self.name = name
self.columns = columns
self.mutable = mutable
self.max_rows = max_rows
self.is_root = is_root
self.indexes = indexes
@staticmethod
def from_json(json, name):
parser = ovs.db.parser.Parser(json, "table schema for table %s" % name)
columns_json = parser.get("columns", [dict])
mutable = parser.get_optional("mutable", [bool], True)
max_rows = parser.get_optional("maxRows", [int])
is_root = parser.get_optional("isRoot", [bool], False)
indexes_json = parser.get_optional("indexes", [list], [])
parser.finish()
if max_rows == None:
max_rows = sys.maxint
elif max_rows <= 0:
raise error.Error("maxRows must be at least 1", json)
if not columns_json:
raise error.Error("table must have at least one column", json)
columns = {}
for column_name, column_json in columns_json.iteritems():
_check_id(column_name, json)
columns[column_name] = ColumnSchema.from_json(column_json,
column_name)
indexes = []
for index_json in indexes_json:
index = column_set_from_json(index_json, columns)
if not index:
raise error.Error("index must have at least one column", json)
elif len(index) == 1:
index[0].unique = True
for column in index:
if not column.persistent:
raise error.Error("ephemeral columns (such as %s) may "
"not be indexed" % column.name, json)
indexes.append(index)
return TableSchema(name, columns, mutable, max_rows, is_root, indexes)
def to_json(self, default_is_root=False):
"""Returns this table schema serialized into JSON.
The "isRoot" member is included in the JSON only if its value would
differ from 'default_is_root'. Ordinarily 'default_is_root' should be
false, because ordinarily a table would be not be part of the root set
if its "isRoot" member is omitted. However, garbage collection was not
orginally included in OVSDB, so in older schemas that do not include
any "isRoot" members, every table is implicitly part of the root set.
To serialize such a schema in a way that can be read by older OVSDB
tools, specify 'default_is_root' as True.
"""
json = {}
if not self.mutable:
json["mutable"] = False
if default_is_root != self.is_root:
json["isRoot"] = self.is_root
json["columns"] = columns = {}
for column in self.columns.itervalues():
if not column.name.startswith("_"):
columns[column.name] = column.to_json()
if self.max_rows != sys.maxint:
json["maxRows"] = self.max_rows
if self.indexes:
json["indexes"] = []
for index in self.indexes:
json["indexes"].append([column.name for column in index])
return json
class ColumnSchema(object):
def __init__(self, name, mutable, persistent, type_):
self.name = name
self.mutable = mutable
self.persistent = persistent
self.type = type_
self.unique = False
@staticmethod
def from_json(json, name):
parser = ovs.db.parser.Parser(json, "schema for column %s" % name)
mutable = parser.get_optional("mutable", [bool], True)
ephemeral = parser.get_optional("ephemeral", [bool], False)
type_ = types.Type.from_json(parser.get("type", [dict, str, unicode]))
parser.finish()
return ColumnSchema(name, mutable, not ephemeral, type_)
def to_json(self):
json = {"type": self.type.to_json()}
if not self.mutable:
json["mutable"] = False
if not self.persistent:
json["ephemeral"] = True
return json

View File

@ -1,587 +0,0 @@
# Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from ovs.db import error
import ovs.db.parser
import ovs.db.data
import ovs.ovsuuid
class AtomicType(object):
def __init__(self, name, default, python_types):
self.name = name
self.default = default
self.python_types = python_types
@staticmethod
def from_string(s):
if s != "void":
for atomic_type in ATOMIC_TYPES:
if s == atomic_type.name:
return atomic_type
raise error.Error('"%s" is not an atomic-type' % s, s)
@staticmethod
def from_json(json):
if type(json) not in [str, unicode]:
raise error.Error("atomic-type expected", json)
else:
return AtomicType.from_string(json)
def __str__(self):
return self.name
def to_string(self):
return self.name
def to_json(self):
return self.name
def default_atom(self):
return ovs.db.data.Atom(self, self.default)
VoidType = AtomicType("void", None, ())
IntegerType = AtomicType("integer", 0, (int, long))
RealType = AtomicType("real", 0.0, (int, long, float))
BooleanType = AtomicType("boolean", False, (bool,))
StringType = AtomicType("string", "", (str, unicode))
UuidType = AtomicType("uuid", ovs.ovsuuid.zero(), (uuid.UUID,))
ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType,
UuidType]
def escapeCString(src):
dst = ""
for c in src:
if c in "\\\"":
dst += "\\" + c
elif ord(c) < 32:
if c == '\n':
dst += '\\n'
elif c == '\r':
dst += '\\r'
elif c == '\a':
dst += '\\a'
elif c == '\b':
dst += '\\b'
elif c == '\f':
dst += '\\f'
elif c == '\t':
dst += '\\t'
elif c == '\v':
dst += '\\v'
else:
dst += '\\%03o' % ord(c)
else:
dst += c
return dst
def commafy(x):
"""Returns integer x formatted in decimal with thousands set off by
commas."""
return _commafy("%d" % x)
def _commafy(s):
if s.startswith('-'):
return '-' + _commafy(s[1:])
elif len(s) <= 3:
return s
else:
return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
def returnUnchanged(x):
return x
class BaseType(object):
def __init__(self, type_, enum=None, min=None, max=None,
min_length=0, max_length=sys.maxint, ref_table_name=None):
assert isinstance(type_, AtomicType)
self.type = type_
self.enum = enum
self.min = min
self.max = max
self.min_length = min_length
self.max_length = max_length
self.ref_table_name = ref_table_name
if ref_table_name:
self.ref_type = 'strong'
else:
self.ref_type = None
self.ref_table = None
def default(self):
return ovs.db.data.Atom.default(self.type)
def __eq__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
return (self.type == other.type and self.enum == other.enum and
self.min == other.min and self.max == other.max and
self.min_length == other.min_length and
self.max_length == other.max_length and
self.ref_table_name == other.ref_table_name)
def __ne__(self, other):
if not isinstance(other, BaseType):
return NotImplemented
else:
return not (self == other)
@staticmethod
def __parse_uint(parser, name, default):
value = parser.get_optional(name, [int, long])
if value is None:
value = default
else:
max_value = 2 ** 32 - 1
if not (0 <= value <= max_value):
raise error.Error("%s out of valid range 0 to %d"
% (name, max_value), value)
return value
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return BaseType(AtomicType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
atomic_type = AtomicType.from_json(parser.get("type", [str, unicode]))
base = BaseType(atomic_type)
enum = parser.get_optional("enum", [])
if enum is not None:
base.enum = ovs.db.data.Datum.from_json(
BaseType.get_enum_type(base.type), enum)
elif base.type == IntegerType:
base.min = parser.get_optional("minInteger", [int, long])
base.max = parser.get_optional("maxInteger", [int, long])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minInteger exceeds maxInteger", json)
elif base.type == RealType:
base.min = parser.get_optional("minReal", [int, long, float])
base.max = parser.get_optional("maxReal", [int, long, float])
if (base.min is not None and base.max is not None
and base.min > base.max):
raise error.Error("minReal exceeds maxReal", json)
elif base.type == StringType:
base.min_length = BaseType.__parse_uint(parser, "minLength", 0)
base.max_length = BaseType.__parse_uint(parser, "maxLength",
sys.maxint)
if base.min_length > base.max_length:
raise error.Error("minLength exceeds maxLength", json)
elif base.type == UuidType:
base.ref_table_name = parser.get_optional("refTable", ['id'])
if base.ref_table_name:
base.ref_type = parser.get_optional("refType", [str, unicode],
"strong")
if base.ref_type not in ['strong', 'weak']:
raise error.Error('refType must be "strong" or "weak" '
'(not "%s")' % base.ref_type)
parser.finish()
return base
def to_json(self):
if not self.has_constraints():
return self.type.to_json()
json = {'type': self.type.to_json()}
if self.enum:
json['enum'] = self.enum.to_json()
if self.type == IntegerType:
if self.min is not None:
json['minInteger'] = self.min
if self.max is not None:
json['maxInteger'] = self.max
elif self.type == RealType:
if self.min is not None:
json['minReal'] = self.min
if self.max is not None:
json['maxReal'] = self.max
elif self.type == StringType:
if self.min_length != 0:
json['minLength'] = self.min_length
if self.max_length != sys.maxint:
json['maxLength'] = self.max_length
elif self.type == UuidType:
if self.ref_table_name:
json['refTable'] = self.ref_table_name
if self.ref_type != 'strong':
json['refType'] = self.ref_type
return json
def copy(self):
base = BaseType(self.type, self.enum.copy(), self.min, self.max,
self.min_length, self.max_length, self.ref_table_name)
base.ref_table = self.ref_table
return base
def is_valid(self):
if self.type in (VoidType, BooleanType, UuidType):
return True
elif self.type in (IntegerType, RealType):
return self.min is None or self.max is None or self.min <= self.max
elif self.type == StringType:
return self.min_length <= self.max_length
else:
return False
def has_constraints(self):
return (self.enum is not None or self.min is not None or
self.max is not None or
self.min_length != 0 or self.max_length != sys.maxint or
self.ref_table_name is not None)
def without_constraints(self):
return BaseType(self.type)
@staticmethod
def get_enum_type(atomic_type):
"""Returns the type of the 'enum' member for a BaseType whose
'type' is 'atomic_type'."""
return Type(BaseType(atomic_type), None, 1, sys.maxint)
def is_ref(self):
return self.type == UuidType and self.ref_table_name is not None
def is_strong_ref(self):
return self.is_ref() and self.ref_type == 'strong'
def is_weak_ref(self):
return self.is_ref() and self.ref_type == 'weak'
def toEnglish(self, escapeLiteral=returnUnchanged):
if self.type == UuidType and self.ref_table_name:
s = escapeLiteral(self.ref_table_name)
if self.ref_type == 'weak':
s = "weak reference to " + s
return s
else:
return self.type.to_string()
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
if self.enum:
literals = [value.toEnglish(escapeLiteral)
for value in self.enum.values]
if len(literals) == 2:
english = 'either %s or %s' % (literals[0], literals[1])
else:
english = 'one of %s, %s, or %s' % (literals[0],
', '.join(literals[1:-1]),
literals[-1])
elif self.min is not None and self.max is not None:
if self.type == IntegerType:
english = 'in range %s to %s' % (
escapeNumber(commafy(self.min)),
escapeNumber(commafy(self.max)))
else:
english = 'in range %s to %s' % (
escapeNumber("%g" % self.min),
escapeNumber("%g" % self.max))
elif self.min is not None:
if self.type == IntegerType:
english = 'at least %s' % escapeNumber(commafy(self.min))
else:
english = 'at least %s' % escapeNumber("%g" % self.min)
elif self.max is not None:
if self.type == IntegerType:
english = 'at most %s' % escapeNumber(commafy(self.max))
else:
english = 'at most %s' % escapeNumber("%g" % self.max)
elif self.min_length != 0 and self.max_length != sys.maxint:
if self.min_length == self.max_length:
english = ('exactly %s characters long'
% commafy(self.min_length))
else:
english = ('between %s and %s characters long'
% (commafy(self.min_length),
commafy(self.max_length)))
elif self.min_length != 0:
return 'at least %s characters long' % commafy(self.min_length)
elif self.max_length != sys.maxint:
english = 'at most %s characters long' % commafy(self.max_length)
else:
english = ''
return english
def toCType(self, prefix):
if self.ref_table_name:
return "struct %s%s *" % (prefix, self.ref_table_name.lower())
else:
return {IntegerType: 'int64_t ',
RealType: 'double ',
UuidType: 'struct uuid ',
BooleanType: 'bool ',
StringType: 'char *'}[self.type]
def toAtomicType(self):
return "OVSDB_TYPE_%s" % self.type.to_string().upper()
def copyCValue(self, dst, src):
args = {'dst': dst, 'src': src}
if self.ref_table_name:
return ("%(dst)s = %(src)s->header_.uuid;") % args
elif self.type == StringType:
return "%(dst)s = xstrdup(%(src)s);" % args
else:
return "%(dst)s = %(src)s;" % args
def initCDefault(self, var, is_optional):
if self.ref_table_name:
return "%s = NULL;" % var
elif self.type == StringType and not is_optional:
return '%s = "";' % var
else:
pattern = {IntegerType: '%s = 0;',
RealType: '%s = 0.0;',
UuidType: 'uuid_zero(&%s);',
BooleanType: '%s = false;',
StringType: '%s = NULL;'}[self.type]
return pattern % var
def cInitBaseType(self, indent, var):
stmts = []
stmts.append('ovsdb_base_type_init(&%s, %s);' % (
var, self.toAtomicType()))
if self.enum:
stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);"
% (var, var))
stmts += self.enum.cInitDatum("%s.enum_" % var)
if self.type == IntegerType:
if self.min is not None:
stmts.append('%s.u.integer.min = INT64_C(%d);'
% (var, self.min))
if self.max is not None:
stmts.append('%s.u.integer.max = INT64_C(%d);'
% (var, self.max))
elif self.type == RealType:
if self.min is not None:
stmts.append('%s.u.real.min = %d;' % (var, self.min))
if self.max is not None:
stmts.append('%s.u.real.max = %d;' % (var, self.max))
elif self.type == StringType:
if self.min_length is not None:
stmts.append('%s.u.string.minLen = %d;'
% (var, self.min_length))
if self.max_length != sys.maxint:
stmts.append('%s.u.string.maxLen = %d;'
% (var, self.max_length))
elif self.type == UuidType:
if self.ref_table_name is not None:
stmts.append('%s.u.uuid.refTableName = "%s";'
% (var, escapeCString(self.ref_table_name)))
stmts.append('%s.u.uuid.refType = OVSDB_REF_%s;'
% (var, self.ref_type.upper()))
return '\n'.join([indent + stmt for stmt in stmts])
class Type(object):
DEFAULT_MIN = 1
DEFAULT_MAX = 1
def __init__(self, key, value=None, n_min=DEFAULT_MIN, n_max=DEFAULT_MAX):
self.key = key
self.value = value
self.n_min = n_min
self.n_max = n_max
def copy(self):
if self.value is None:
value = None
else:
value = self.value.copy()
return Type(self.key.copy(), value, self.n_min, self.n_max)
def __eq__(self, other):
if not isinstance(other, Type):
return NotImplemented
return (self.key == other.key and self.value == other.value and
self.n_min == other.n_min and self.n_max == other.n_max)
def __ne__(self, other):
if not isinstance(other, Type):
return NotImplemented
else:
return not (self == other)
def is_valid(self):
return (self.key.type != VoidType and self.key.is_valid() and
(self.value is None or
(self.value.type != VoidType and self.value.is_valid())) and
self.n_min <= 1 <= self.n_max)
def is_scalar(self):
return self.n_min == 1 and self.n_max == 1 and not self.value
def is_optional(self):
return self.n_min == 0 and self.n_max == 1
def is_composite(self):
return self.n_max > 1
def is_set(self):
return self.value is None and (self.n_min != 1 or self.n_max != 1)
def is_map(self):
return self.value is not None
def is_smap(self):
return (self.is_map()
and self.key.type == StringType
and self.value.type == StringType)
def is_optional_pointer(self):
return (self.is_optional() and not self.value
and (self.key.type == StringType or self.key.ref_table_name))
@staticmethod
def __n_from_json(json, default):
if json is None:
return default
elif type(json) == int and 0 <= json <= sys.maxint:
return json
else:
raise error.Error("bad min or max value", json)
@staticmethod
def from_json(json):
if type(json) in [str, unicode]:
return Type(BaseType.from_json(json))
parser = ovs.db.parser.Parser(json, "ovsdb type")
key_json = parser.get("key", [dict, str, unicode])
value_json = parser.get_optional("value", [dict, str, unicode])
min_json = parser.get_optional("min", [int])
max_json = parser.get_optional("max", [int, str, unicode])
parser.finish()
key = BaseType.from_json(key_json)
if value_json:
value = BaseType.from_json(value_json)
else:
value = None
n_min = Type.__n_from_json(min_json, Type.DEFAULT_MIN)
if max_json == 'unlimited':
n_max = sys.maxint
else:
n_max = Type.__n_from_json(max_json, Type.DEFAULT_MAX)
type_ = Type(key, value, n_min, n_max)
if not type_.is_valid():
raise error.Error("ovsdb type fails constraint checks", json)
return type_
def to_json(self):
if self.is_scalar() and not self.key.has_constraints():
return self.key.to_json()
json = {"key": self.key.to_json()}
if self.value is not None:
json["value"] = self.value.to_json()
if self.n_min != Type.DEFAULT_MIN:
json["min"] = self.n_min
if self.n_max == sys.maxint:
json["max"] = "unlimited"
elif self.n_max != Type.DEFAULT_MAX:
json["max"] = self.n_max
return json
def toEnglish(self, escapeLiteral=returnUnchanged):
keyName = self.key.toEnglish(escapeLiteral)
if self.value:
valueName = self.value.toEnglish(escapeLiteral)
if self.is_scalar():
return keyName
elif self.is_optional():
if self.value:
return "optional %s-%s pair" % (keyName, valueName)
else:
return "optional %s" % keyName
else:
if self.n_max == sys.maxint:
if self.n_min:
quantity = "%s or more " % commafy(self.n_min)
else:
quantity = ""
elif self.n_min:
quantity = "%s to %s " % (commafy(self.n_min),
commafy(self.n_max))
else:
quantity = "up to %s " % commafy(self.n_max)
if self.value:
return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
else:
if keyName.endswith('s'):
plural = keyName + "es"
else:
plural = keyName + "s"
return "set of %s%s" % (quantity, plural)
def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
escapeNumber=returnUnchanged):
constraints = []
keyConstraints = self.key.constraintsToEnglish(escapeLiteral,
escapeNumber)
if keyConstraints:
if self.value:
constraints.append('key %s' % keyConstraints)
else:
constraints.append(keyConstraints)
if self.value:
valueConstraints = self.value.constraintsToEnglish(escapeLiteral,
escapeNumber)
if valueConstraints:
constraints.append('value %s' % valueConstraints)
return ', '.join(constraints)
def cDeclComment(self):
if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType:
return "\t/* Always nonnull. */"
else:
return ""
def cInitType(self, indent, var):
initKey = self.key.cInitBaseType(indent, "%s.key" % var)
if self.value:
initValue = self.value.cInitBaseType(indent, "%s.value" % var)
else:
initValue = ('%sovsdb_base_type_init(&%s.value, '
'OVSDB_TYPE_VOID);' % (indent, var))
initMin = "%s%s.n_min = %s;" % (indent, var, self.n_min)
if self.n_max == sys.maxint:
n_max = "UINT_MAX"
else:
n_max = self.n_max
initMax = "%s%s.n_max = %s;" % (indent, var, n_max)
return "\n".join((initKey, initValue, initMin, initMax))

View File

@ -1,13 +0,0 @@
import os
PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """/usr/share/openvswitch""")
RUNDIR = os.environ.get("OVS_RUNDIR", """/var/run/openvswitch""")
LOGDIR = os.environ.get("OVS_LOGDIR", """/var/log/openvswitch""")
BINDIR = os.environ.get("OVS_BINDIR", """/usr/bin""")
DBDIR = os.environ.get("OVS_DBDIR")
if not DBDIR:
sysconfdir = os.environ.get("OVS_SYSCONFDIR")
if sysconfdir:
DBDIR = "%s/openvswitch" % sysconfdir
else:
DBDIR = """/etc/openvswitch"""

View File

@ -1,136 +0,0 @@
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import signal
import ovs.vlog
_hooks = []
vlog = ovs.vlog.Vlog("fatal-signal")
def add_hook(hook, cancel, run_at_exit):
_init()
_hooks.append((hook, cancel, run_at_exit))
def fork():
"""Clears all of the fatal signal hooks without executing them. If any of
the hooks passed a 'cancel' function to add_hook(), then those functions
will be called, allowing them to free resources, etc.
Following a fork, one of the resulting processes can call this function to
allow it to terminate without calling the hooks registered before calling
this function. New hooks registered after calling this function will take
effect normally."""
global _hooks
for hook, cancel, run_at_exit in _hooks:
if cancel:
cancel()
_hooks = []
_added_hook = False
_files = {}
def add_file_to_unlink(file):
"""Registers 'file' to be unlinked when the program terminates via
sys.exit() or a fatal signal."""
global _added_hook
if not _added_hook:
_added_hook = True
add_hook(_unlink_files, _cancel_files, True)
_files[file] = None
def remove_file_to_unlink(file):
"""Unregisters 'file' from being unlinked when the program terminates via
sys.exit() or a fatal signal."""
if file in _files:
del _files[file]
def unlink_file_now(file):
"""Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
Returns 0 if successful, otherwise a positive errno value."""
error = _unlink(file)
if error:
vlog.warn("could not unlink \"%s\" (%s)" % (file, os.strerror(error)))
remove_file_to_unlink(file)
return error
def _unlink_files():
for file_ in _files:
_unlink(file_)
def _cancel_files():
global _added_hook
global _files
_added_hook = False
_files = {}
def _unlink(file_):
try:
os.unlink(file_)
return 0
except OSError, e:
return e.errno
def _signal_handler(signr, _):
_call_hooks(signr)
# Re-raise the signal with the default handling so that the program
# termination status reflects that we were killed by this signal.
signal.signal(signr, signal.SIG_DFL)
os.kill(os.getpid(), signr)
def _atexit_handler():
_call_hooks(0)
recurse = False
def _call_hooks(signr):
global recurse
if recurse:
return
recurse = True
for hook, cancel, run_at_exit in _hooks:
if signr != 0 or run_at_exit:
hook()
_inited = False
def _init():
global _inited
if not _inited:
_inited = True
for signr in (signal.SIGTERM, signal.SIGINT,
signal.SIGHUP, signal.SIGALRM):
if signal.getsignal(signr) == signal.SIG_DFL:
signal.signal(signr, _signal_handler)
atexit.register(_atexit_handler)

View File

@ -1,586 +0,0 @@
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import StringIO
import sys
__pychecker__ = 'no-stringiter'
escapes = {ord('"'): u"\\\"",
ord("\\"): u"\\\\",
ord("\b"): u"\\b",
ord("\f"): u"\\f",
ord("\n"): u"\\n",
ord("\r"): u"\\r",
ord("\t"): u"\\t"}
for esc in range(32):
if esc not in escapes:
escapes[esc] = u"\\u%04x" % esc
SPACES_PER_LEVEL = 2
class _Serializer(object):
def __init__(self, stream, pretty, sort_keys):
self.stream = stream
self.pretty = pretty
self.sort_keys = sort_keys
self.depth = 0
def __serialize_string(self, s):
self.stream.write(u'"%s"' % ''.join(escapes.get(ord(c), c) for c in s))
def __indent_line(self):
if self.pretty:
self.stream.write('\n')
self.stream.write(' ' * (SPACES_PER_LEVEL * self.depth))
def serialize(self, obj):
if obj is None:
self.stream.write(u"null")
elif obj is False:
self.stream.write(u"false")
elif obj is True:
self.stream.write(u"true")
elif type(obj) in (int, long):
self.stream.write(u"%d" % obj)
elif type(obj) == float:
self.stream.write("%.15g" % obj)
elif type(obj) == unicode:
self.__serialize_string(obj)
elif type(obj) == str:
self.__serialize_string(unicode(obj))
elif type(obj) == dict:
self.stream.write(u"{")
self.depth += 1
self.__indent_line()
if self.sort_keys:
items = sorted(obj.items())
else:
items = obj.iteritems()
for i, (key, value) in enumerate(items):
if i > 0:
self.stream.write(u",")
self.__indent_line()
self.__serialize_string(unicode(key))
self.stream.write(u":")
if self.pretty:
self.stream.write(u' ')
self.serialize(value)
self.stream.write(u"}")
self.depth -= 1
elif type(obj) in (list, tuple):
self.stream.write(u"[")
self.depth += 1
if obj:
self.__indent_line()
for i, value in enumerate(obj):
if i > 0:
self.stream.write(u",")
self.__indent_line()
self.serialize(value)
self.depth -= 1
self.stream.write(u"]")
else:
raise Exception("can't serialize %s as JSON" % obj)
def to_stream(obj, stream, pretty=False, sort_keys=True):
_Serializer(stream, pretty, sort_keys).serialize(obj)
def to_file(obj, name, pretty=False, sort_keys=True):
stream = open(name, "w")
try:
to_stream(obj, stream, pretty, sort_keys)
finally:
stream.close()
def to_string(obj, pretty=False, sort_keys=True):
output = StringIO.StringIO()
to_stream(obj, output, pretty, sort_keys)
s = output.getvalue()
output.close()
return s
def from_stream(stream):
p = Parser(check_trailer=True)
while True:
buf = stream.read(4096)
if buf == "" or p.feed(buf) != len(buf):
break
return p.finish()
def from_file(name):
stream = open(name, "r")
try:
return from_stream(stream)
finally:
stream.close()
def from_string(s):
try:
s = unicode(s, 'utf-8')
except UnicodeDecodeError, e:
seq = ' '.join(["0x%2x" % ord(c)
for c in e.object[e.start:e.end] if ord(c) >= 0x80])
return ("not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq)
p = Parser(check_trailer=True)
p.feed(s)
return p.finish()
class Parser(object):
## Maximum height of parsing stack. ##
MAX_HEIGHT = 1000
def __init__(self, check_trailer=False):
self.check_trailer = check_trailer
# Lexical analysis.
self.lex_state = Parser.__lex_start
self.buffer = ""
self.line_number = 0
self.column_number = 0
self.byte_number = 0
# Parsing.
self.parse_state = Parser.__parse_start
self.stack = []
self.member_name = None
# Parse status.
self.done = False
self.error = None
def __lex_start_space(self, c):
pass
def __lex_start_alpha(self, c):
self.buffer = c
self.lex_state = Parser.__lex_keyword
def __lex_start_token(self, c):
self.__parser_input(c)
def __lex_start_number(self, c):
self.buffer = c
self.lex_state = Parser.__lex_number
def __lex_start_string(self, _):
self.lex_state = Parser.__lex_string
def __lex_start_error(self, c):
if ord(c) >= 32 and ord(c) < 128:
self.__error("invalid character '%s'" % c)
else:
self.__error("invalid character U+%04x" % ord(c))
__lex_start_actions = {}
for c in " \t\n\r":
__lex_start_actions[c] = __lex_start_space
for c in "abcdefghijklmnopqrstuvwxyz":
__lex_start_actions[c] = __lex_start_alpha
for c in "[{]}:,":
__lex_start_actions[c] = __lex_start_token
for c in "-0123456789":
__lex_start_actions[c] = __lex_start_number
__lex_start_actions['"'] = __lex_start_string
def __lex_start(self, c):
Parser.__lex_start_actions.get(
c, Parser.__lex_start_error)(self, c)
return True
__lex_alpha = {}
for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
__lex_alpha[c] = True
def __lex_finish_keyword(self):
if self.buffer == "false":
self.__parser_input(False)
elif self.buffer == "true":
self.__parser_input(True)
elif self.buffer == "null":
self.__parser_input(None)
else:
self.__error("invalid keyword '%s'" % self.buffer)
def __lex_keyword(self, c):
if c in Parser.__lex_alpha:
self.buffer += c
return True
else:
self.__lex_finish_keyword()
return False
__number_re = re.compile("(-)?(0|[1-9][0-9]*)"
"(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$")
def __lex_finish_number(self):
s = self.buffer
m = Parser.__number_re.match(s)
if m:
sign, integer, fraction, exp = m.groups()
if (exp is not None and
(long(exp) > sys.maxint or long(exp) < -sys.maxint - 1)):
self.__error("exponent outside valid range")
return
if fraction is not None and len(fraction.lstrip('0')) == 0:
fraction = None
sig_string = integer
if fraction is not None:
sig_string += fraction
significand = int(sig_string)
pow10 = 0
if fraction is not None:
pow10 -= len(fraction)
if exp is not None:
pow10 += long(exp)
if significand == 0:
self.__parser_input(0)
return
elif significand <= 2 ** 63:
while pow10 > 0 and significand <= 2 ** 63:
significand *= 10
pow10 -= 1
while pow10 < 0 and significand % 10 == 0:
significand /= 10
pow10 += 1
if (pow10 == 0 and
((not sign and significand < 2 ** 63) or
(sign and significand <= 2 ** 63))):
if sign:
self.__parser_input(-significand)
else:
self.__parser_input(significand)
return
value = float(s)
if value == float("inf") or value == float("-inf"):
self.__error("number outside valid range")
return
if value == 0:
# Suppress negative zero.
value = 0
self.__parser_input(value)
elif re.match("-?0[0-9]", s):
self.__error("leading zeros not allowed")
elif re.match("-([^0-9]|$)", s):
self.__error("'-' must be followed by digit")
elif re.match("-?(0|[1-9][0-9]*)\.([^0-9]|$)", s):
self.__error("decimal point must be followed by digit")
elif re.search("e[-+]?([^0-9]|$)", s):
self.__error("exponent must contain at least one digit")
else:
self.__error("syntax error in number")
def __lex_number(self, c):
if c in ".0123456789eE-+":
self.buffer += c
return True
else:
self.__lex_finish_number()
return False
__4hex_re = re.compile("[0-9a-fA-F]{4}")
def __lex_4hex(self, s):
if len(s) < 4:
self.__error("quoted string ends within \\u escape")
elif not Parser.__4hex_re.match(s):
self.__error("malformed \\u escape")
elif s == "0000":
self.__error("null bytes not supported in quoted strings")
else:
return int(s, 16)
@staticmethod
def __is_leading_surrogate(c):
"""Returns true if 'c' is a Unicode code point for a leading
surrogate."""
return c >= 0xd800 and c <= 0xdbff
@staticmethod
def __is_trailing_surrogate(c):
"""Returns true if 'c' is a Unicode code point for a trailing
surrogate."""
return c >= 0xdc00 and c <= 0xdfff
@staticmethod
def __utf16_decode_surrogate_pair(leading, trailing):
"""Returns the unicode code point corresponding to leading surrogate
'leading' and trailing surrogate 'trailing'. The return value will not
make any sense if 'leading' or 'trailing' are not in the correct ranges
for leading or trailing surrogates."""
# Leading surrogate: 110110wwwwxxxxxx
# Trailing surrogate: 110111xxxxxxxxxx
# Code point: 000uuuuuxxxxxxxxxxxxxxxx
w = (leading >> 6) & 0xf
u = w + 1
x0 = leading & 0x3f
x1 = trailing & 0x3ff
return (u << 16) | (x0 << 10) | x1
__unescape = {'"': u'"',
"\\": u"\\",
"/": u"/",
"b": u"\b",
"f": u"\f",
"n": u"\n",
"r": u"\r",
"t": u"\t"}
def __lex_finish_string(self):
inp = self.buffer
out = u""
while len(inp):
backslash = inp.find('\\')
if backslash == -1:
out += inp
break
out += inp[:backslash]
inp = inp[backslash + 1:]
if inp == "":
self.__error("quoted string may not end with backslash")
return
replacement = Parser.__unescape.get(inp[0])
if replacement is not None:
out += replacement
inp = inp[1:]
continue
elif inp[0] != u'u':
self.__error("bad escape \\%s" % inp[0])
return
c0 = self.__lex_4hex(inp[1:5])
if c0 is None:
return
inp = inp[5:]
if Parser.__is_leading_surrogate(c0):
if inp[:2] != u'\\u':
self.__error("malformed escaped surrogate pair")
return
c1 = self.__lex_4hex(inp[2:6])
if c1 is None:
return
if not Parser.__is_trailing_surrogate(c1):
self.__error("second half of escaped surrogate pair is "
"not trailing surrogate")
return
code_point = Parser.__utf16_decode_surrogate_pair(c0, c1)
inp = inp[6:]
else:
code_point = c0
out += unichr(code_point)
self.__parser_input('string', out)
def __lex_string_escape(self, c):
self.buffer += c
self.lex_state = Parser.__lex_string
return True
def __lex_string(self, c):
if c == '\\':
self.buffer += c
self.lex_state = Parser.__lex_string_escape
elif c == '"':
self.__lex_finish_string()
elif ord(c) >= 0x20:
self.buffer += c
else:
self.__error("U+%04X must be escaped in quoted string" % ord(c))
return True
def __lex_input(self, c):
eat = self.lex_state(self, c)
assert eat is True or eat is False
return eat
def __parse_start(self, token, unused_string):
if token == '{':
self.__push_object()
elif token == '[':
self.__push_array()
else:
self.__error("syntax error at beginning of input")
def __parse_end(self, unused_token, unused_string):
self.__error("trailing garbage at end of input")
def __parse_object_init(self, token, string):
if token == '}':
self.__parser_pop()
else:
self.__parse_object_name(token, string)
def __parse_object_name(self, token, string):
if token == 'string':
self.member_name = string
self.parse_state = Parser.__parse_object_colon
else:
self.__error("syntax error parsing object expecting string")
def __parse_object_colon(self, token, unused_string):
if token == ":":
self.parse_state = Parser.__parse_object_value
else:
self.__error("syntax error parsing object expecting ':'")
def __parse_object_value(self, token, string):
self.__parse_value(token, string, Parser.__parse_object_next)
def __parse_object_next(self, token, unused_string):
if token == ",":
self.parse_state = Parser.__parse_object_name
elif token == "}":
self.__parser_pop()
else:
self.__error("syntax error expecting '}' or ','")
def __parse_array_init(self, token, string):
if token == ']':
self.__parser_pop()
else:
self.__parse_array_value(token, string)
def __parse_array_value(self, token, string):
self.__parse_value(token, string, Parser.__parse_array_next)
def __parse_array_next(self, token, unused_string):
if token == ",":
self.parse_state = Parser.__parse_array_value
elif token == "]":
self.__parser_pop()
else:
self.__error("syntax error expecting ']' or ','")
def __parser_input(self, token, string=None):
self.lex_state = Parser.__lex_start
self.buffer = ""
self.parse_state(self, token, string)
def __put_value(self, value):
top = self.stack[-1]
if type(top) == dict:
top[self.member_name] = value
else:
top.append(value)
def __parser_push(self, new_json, next_state):
if len(self.stack) < Parser.MAX_HEIGHT:
if len(self.stack) > 0:
self.__put_value(new_json)
self.stack.append(new_json)
self.parse_state = next_state
else:
self.__error("input exceeds maximum nesting depth %d" %
Parser.MAX_HEIGHT)
def __push_object(self):
self.__parser_push({}, Parser.__parse_object_init)
def __push_array(self):
self.__parser_push([], Parser.__parse_array_init)
def __parser_pop(self):
if len(self.stack) == 1:
self.parse_state = Parser.__parse_end
if not self.check_trailer:
self.done = True
else:
self.stack.pop()
top = self.stack[-1]
if type(top) == list:
self.parse_state = Parser.__parse_array_next
else:
self.parse_state = Parser.__parse_object_next
def __parse_value(self, token, string, next_state):
if token in [False, None, True] or type(token) in [int, long, float]:
self.__put_value(token)
elif token == 'string':
self.__put_value(string)
else:
if token == '{':
self.__push_object()
elif token == '[':
self.__push_array()
else:
self.__error("syntax error expecting value")
return
self.parse_state = next_state
def __error(self, message):
if self.error is None:
self.error = ("line %d, column %d, byte %d: %s"
% (self.line_number, self.column_number,
self.byte_number, message))
self.done = True
def feed(self, s):
i = 0
while True:
if self.done or i >= len(s):
return i
c = s[i]
if self.__lex_input(c):
self.byte_number += 1
if c == '\n':
self.column_number = 0
self.line_number += 1
else:
self.column_number += 1
i += 1
def is_done(self):
return self.done
def finish(self):
if self.lex_state == Parser.__lex_start:
pass
elif self.lex_state in (Parser.__lex_string,
Parser.__lex_string_escape):
self.__error("unexpected end of input in quoted string")
else:
self.__lex_input(" ")
if self.parse_state == Parser.__parse_start:
self.__error("empty input stream")
elif self.parse_state != Parser.__parse_end:
self.__error("unexpected end of input")
if self.error == None:
assert len(self.stack) == 1
return self.stack.pop()
else:
return self.error

View File

@ -1,560 +0,0 @@
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import ovs.json
import ovs.poller
import ovs.reconnect
import ovs.stream
import ovs.timeval
import ovs.util
import ovs.vlog
EOF = ovs.util.EOF
vlog = ovs.vlog.Vlog("jsonrpc")
class Message(object):
T_REQUEST = 0 # Request.
T_NOTIFY = 1 # Notification.
T_REPLY = 2 # Successful reply.
T_ERROR = 3 # Error reply.
__types = {T_REQUEST: "request",
T_NOTIFY: "notification",
T_REPLY: "reply",
T_ERROR: "error"}
def __init__(self, type_, method, params, result, error, id):
self.type = type_
self.method = method
self.params = params
self.result = result
self.error = error
self.id = id
_next_id = 0
@staticmethod
def _create_id():
this_id = Message._next_id
Message._next_id += 1
return this_id
@staticmethod
def create_request(method, params):
return Message(Message.T_REQUEST, method, params, None, None,
Message._create_id())
@staticmethod
def create_notify(method, params):
return Message(Message.T_NOTIFY, method, params, None, None,
None)
@staticmethod
def create_reply(result, id):
return Message(Message.T_REPLY, None, None, result, None, id)
@staticmethod
def create_error(error, id):
return Message(Message.T_ERROR, None, None, None, error, id)
@staticmethod
def type_to_string(type_):
return Message.__types[type_]
def __validate_arg(self, value, name, must_have):
if (value is not None) == (must_have != 0):
return None
else:
type_name = Message.type_to_string(self.type)
if must_have:
verb = "must"
else:
verb = "must not"
return "%s %s have \"%s\"" % (type_name, verb, name)
def is_valid(self):
if self.params is not None and type(self.params) != list:
return "\"params\" must be JSON array"
pattern = {Message.T_REQUEST: 0x11001,
Message.T_NOTIFY: 0x11000,
Message.T_REPLY: 0x00101,
Message.T_ERROR: 0x00011}.get(self.type)
if pattern is None:
return "invalid JSON-RPC message type %s" % self.type
return (
self.__validate_arg(self.method, "method", pattern & 0x10000) or
self.__validate_arg(self.params, "params", pattern & 0x1000) or
self.__validate_arg(self.result, "result", pattern & 0x100) or
self.__validate_arg(self.error, "error", pattern & 0x10) or
self.__validate_arg(self.id, "id", pattern & 0x1))
@staticmethod
def from_json(json):
if type(json) != dict:
return "message is not a JSON object"
# Make a copy to avoid modifying the caller's dict.
json = dict(json)
if "method" in json:
method = json.pop("method")
if type(method) not in [str, unicode]:
return "method is not a JSON string"
else:
method = None
params = json.pop("params", None)
result = json.pop("result", None)
error = json.pop("error", None)
id_ = json.pop("id", None)
if len(json):
return "message has unexpected member \"%s\"" % json.popitem()[0]
if result is not None:
msg_type = Message.T_REPLY
elif error is not None:
msg_type = Message.T_ERROR
elif id_ is not None:
msg_type = Message.T_REQUEST
else:
msg_type = Message.T_NOTIFY
msg = Message(msg_type, method, params, result, error, id_)
validation_error = msg.is_valid()
if validation_error is not None:
return validation_error
else:
return msg
def to_json(self):
json = {}
if self.method is not None:
json["method"] = self.method
if self.params is not None:
json["params"] = self.params
if self.result is not None or self.type == Message.T_ERROR:
json["result"] = self.result
if self.error is not None or self.type == Message.T_REPLY:
json["error"] = self.error
if self.id is not None or self.type == Message.T_NOTIFY:
json["id"] = self.id
return json
def __str__(self):
s = [Message.type_to_string(self.type)]
if self.method is not None:
s.append("method=\"%s\"" % self.method)
if self.params is not None:
s.append("params=" + ovs.json.to_string(self.params))
if self.result is not None:
s.append("result=" + ovs.json.to_string(self.result))
if self.error is not None:
s.append("error=" + ovs.json.to_string(self.error))
if self.id is not None:
s.append("id=" + ovs.json.to_string(self.id))
return ", ".join(s)
class Connection(object):
def __init__(self, stream):
self.name = stream.name
self.stream = stream
self.status = 0
self.input = ""
self.output = ""
self.parser = None
self.received_bytes = 0
def close(self):
self.stream.close()
self.stream = None
def run(self):
if self.status:
return
while len(self.output):
retval = self.stream.send(self.output)
if retval >= 0:
self.output = self.output[retval:]
else:
if retval != -errno.EAGAIN:
vlog.warn("%s: send error: %s" %
(self.name, os.strerror(-retval)))
self.error(-retval)
break
def wait(self, poller):
if not self.status:
self.stream.run_wait(poller)
if len(self.output):
self.stream.send_wait(poller)
def get_status(self):
return self.status
def get_backlog(self):
if self.status != 0:
return 0
else:
return len(self.output)
def get_received_bytes(self):
return self.received_bytes
def __log_msg(self, title, msg):
vlog.dbg("%s: %s %s" % (self.name, title, msg))
def send(self, msg):
if self.status:
return self.status
self.__log_msg("send", msg)
was_empty = len(self.output) == 0
self.output += ovs.json.to_string(msg.to_json())
if was_empty:
self.run()
return self.status
def send_block(self, msg):
error = self.send(msg)
if error:
return error
while True:
self.run()
if not self.get_backlog() or self.get_status():
return self.status
poller = ovs.poller.Poller()
self.wait(poller)
poller.block()
def recv(self):
if self.status:
return self.status, None
while True:
if not self.input:
error, data = self.stream.recv(4096)
if error:
if error == errno.EAGAIN:
return error, None
else:
# XXX rate-limit
vlog.warn("%s: receive error: %s"
% (self.name, os.strerror(error)))
self.error(error)
return self.status, None
elif not data:
self.error(EOF)
return EOF, None
else:
self.input += data
self.received_bytes += len(data)
else:
if self.parser is None:
self.parser = ovs.json.Parser()
self.input = self.input[self.parser.feed(self.input):]
if self.parser.is_done():
msg = self.__process_msg()
if msg:
return 0, msg
else:
return self.status, None
def recv_block(self):
while True:
error, msg = self.recv()
if error != errno.EAGAIN:
return error, msg
self.run()
poller = ovs.poller.Poller()
self.wait(poller)
self.recv_wait(poller)
poller.block()
def transact_block(self, request):
id_ = request.id
error = self.send(request)
reply = None
while not error:
error, reply = self.recv_block()
if (reply
and (reply.type == Message.T_REPLY
or reply.type == Message.T_ERROR)
and reply.id == id_):
break
return error, reply
def __process_msg(self):
json = self.parser.finish()
self.parser = None
if type(json) in [str, unicode]:
# XXX rate-limit
vlog.warn("%s: error parsing stream: %s" % (self.name, json))
self.error(errno.EPROTO)
return
msg = Message.from_json(json)
if not isinstance(msg, Message):
# XXX rate-limit
vlog.warn("%s: received bad JSON-RPC message: %s"
% (self.name, msg))
self.error(errno.EPROTO)
return
self.__log_msg("received", msg)
return msg
def recv_wait(self, poller):
if self.status or self.input:
poller.immediate_wake()
else:
self.stream.recv_wait(poller)
def error(self, error):
if self.status == 0:
self.status = error
self.stream.close()
self.output = ""
class Session(object):
"""A JSON-RPC session with reconnection."""
def __init__(self, reconnect, rpc):
self.reconnect = reconnect
self.rpc = rpc
self.stream = None
self.pstream = None
self.seqno = 0
@staticmethod
def open(name):
"""Creates and returns a Session that maintains a JSON-RPC session to
'name', which should be a string acceptable to ovs.stream.Stream or
ovs.stream.PassiveStream's initializer.
If 'name' is an active connection method, e.g. "tcp:127.1.2.3", the new
session connects and reconnects, with back-off, to 'name'.
If 'name' is a passive connection method, e.g. "ptcp:", the new session
listens for connections to 'name'. It maintains at most one connection
at any given time. Any new connection causes the previous one (if any)
to be dropped."""
reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
reconnect.set_name(name)
reconnect.enable(ovs.timeval.msec())
if ovs.stream.PassiveStream.is_valid_name(name):
reconnect.set_passive(True, ovs.timeval.msec())
if ovs.stream.stream_or_pstream_needs_probes(name):
reconnect.set_probe_interval(0)
return Session(reconnect, None)
@staticmethod
def open_unreliably(jsonrpc):
reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
reconnect.set_quiet(True)
reconnect.set_name(jsonrpc.name)
reconnect.set_max_tries(0)
reconnect.connected(ovs.timeval.msec())
return Session(reconnect, jsonrpc)
def close(self):
if self.rpc is not None:
self.rpc.close()
self.rpc = None
if self.stream is not None:
self.stream.close()
self.stream = None
if self.pstream is not None:
self.pstream.close()
self.pstream = None
def __disconnect(self):
if self.rpc is not None:
self.rpc.error(EOF)
self.rpc.close()
self.rpc = None
self.seqno += 1
elif self.stream is not None:
self.stream.close()
self.stream = None
self.seqno += 1
def __connect(self):
self.__disconnect()
name = self.reconnect.get_name()
if not self.reconnect.is_passive():
error, self.stream = ovs.stream.Stream.open(name)
if not error:
self.reconnect.connecting(ovs.timeval.msec())
else:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
elif self.pstream is not None:
error, self.pstream = ovs.stream.PassiveStream.open(name)
if not error:
self.reconnect.listening(ovs.timeval.msec())
else:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
self.seqno += 1
def run(self):
if self.pstream is not None:
error, stream = self.pstream.accept()
if error == 0:
if self.rpc or self.stream:
# XXX rate-limit
vlog.info("%s: new connection replacing active "
"connection" % self.reconnect.get_name())
self.__disconnect()
self.reconnect.connected(ovs.timeval.msec())
self.rpc = Connection(stream)
elif error != errno.EAGAIN:
self.reconnect.listen_error(ovs.timeval.msec(), error)
self.pstream.close()
self.pstream = None
if self.rpc:
backlog = self.rpc.get_backlog()
self.rpc.run()
if self.rpc.get_backlog() < backlog:
# Data previously caught in a queue was successfully sent (or
# there's an error, which we'll catch below).
#
# We don't count data that is successfully sent immediately as
# activity, because there's a lot of queuing downstream from
# us, which means that we can push a lot of data into a
# connection that has stalled and won't ever recover.
self.reconnect.activity(ovs.timeval.msec())
error = self.rpc.get_status()
if error != 0:
self.reconnect.disconnected(ovs.timeval.msec(), error)
self.__disconnect()
elif self.stream is not None:
self.stream.run()
error = self.stream.connect()
if error == 0:
self.reconnect.connected(ovs.timeval.msec())
self.rpc = Connection(self.stream)
self.stream = None
elif error != errno.EAGAIN:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
self.stream.close()
self.stream = None
action = self.reconnect.run(ovs.timeval.msec())
if action == ovs.reconnect.CONNECT:
self.__connect()
elif action == ovs.reconnect.DISCONNECT:
self.reconnect.disconnected(ovs.timeval.msec(), 0)
self.__disconnect()
elif action == ovs.reconnect.PROBE:
if self.rpc:
request = Message.create_request("echo", [])
request.id = "echo"
self.rpc.send(request)
else:
assert action == None
def wait(self, poller):
if self.rpc is not None:
self.rpc.wait(poller)
elif self.stream is not None:
self.stream.run_wait(poller)
self.stream.connect_wait(poller)
if self.pstream is not None:
self.pstream.wait(poller)
self.reconnect.wait(poller, ovs.timeval.msec())
def get_backlog(self):
if self.rpc is not None:
return self.rpc.get_backlog()
else:
return 0
def get_name(self):
return self.reconnect.get_name()
def send(self, msg):
if self.rpc is not None:
return self.rpc.send(msg)
else:
return errno.ENOTCONN
def recv(self):
if self.rpc is not None:
received_bytes = self.rpc.get_received_bytes()
error, msg = self.rpc.recv()
if received_bytes != self.rpc.get_received_bytes():
# Data was successfully received.
#
# Previously we only counted receiving a full message as
# activity, but with large messages or a slow connection that
# policy could time out the session mid-message.
self.reconnect.activity(ovs.timeval.msec())
if not error:
if msg.type == Message.T_REQUEST and msg.method == "echo":
# Echo request. Send reply.
self.send(Message.create_reply(msg.params, msg.id))
elif msg.type == Message.T_REPLY and msg.id == "echo":
# It's a reply to our echo request. Suppress it.
pass
else:
return msg
return None
def recv_wait(self, poller):
if self.rpc is not None:
self.rpc.recv_wait(poller)
def is_alive(self):
if self.rpc is not None or self.stream is not None:
return True
else:
max_tries = self.reconnect.get_max_tries()
return max_tries is None or max_tries > 0
def is_connected(self):
return self.rpc is not None
def get_seqno(self):
return self.seqno
def force_reconnect(self):
self.reconnect.force_reconnect(ovs.timeval.msec())

View File

@ -1,70 +0,0 @@
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
from ovs.db import error
import ovs.db.parser
uuidRE = re.compile("^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$"
.replace('x', '[0-9a-fA-F]'))
def zero():
return uuid.UUID(int=0)
def is_valid_string(s):
return uuidRE.match(s) is not None
def from_string(s):
if not is_valid_string(s):
raise error.Error("%s is not a valid UUID" % s)
return uuid.UUID(s)
def from_json(json, symtab=None):
try:
s = ovs.db.parser.unwrap_json(json, "uuid", [str, unicode], "string")
if not uuidRE.match(s):
raise error.Error("\"%s\" is not a valid UUID" % s, json)
return uuid.UUID(s)
except error.Error, e:
if not symtab:
raise e
try:
name = ovs.db.parser.unwrap_json(json, "named-uuid",
[str, unicode], "string")
except error.Error:
raise e
if name not in symtab:
symtab[name] = uuid.uuid4()
return symtab[name]
def to_json(uuid_):
return ["uuid", str(uuid_)]
def to_c_assignment(uuid_, var):
"""Returns an array of strings, each of which contain a C statement. The
statements assign 'uuid_' to a "struct uuid" as defined in Open vSwitch
lib/uuid.h."""
hex_string = uuid_.hex
return ["%s.parts[%d] = 0x%s;" % (var, x, hex_string[x * 8:(x + 1) * 8])
for x in range(4)]

View File

@ -1,203 +0,0 @@
# Copyright (c) 2010 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import ovs.timeval
import ovs.vlog
import select
import socket
try:
import eventlet.patcher
def _using_eventlet_green_select():
return eventlet.patcher.is_monkey_patched(select)
except:
def _using_eventlet_green_select():
return False
vlog = ovs.vlog.Vlog("poller")
POLLIN = 0x001
POLLOUT = 0x004
POLLERR = 0x008
POLLHUP = 0x010
POLLNVAL = 0x020
# eventlet/gevent doesn't support select.poll. If select.poll is used,
# python interpreter is blocked as a whole instead of switching from the
# current thread that is about to block to other runnable thread.
# So emulate select.poll by select.select because using python means that
# performance isn't so important.
class _SelectSelect(object):
""" select.poll emulation by using select.select.
Only register and poll are needed at the moment.
"""
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def register(self, fd, events):
if isinstance(fd, socket.socket):
fd = fd.fileno()
assert isinstance(fd, int)
if events & POLLIN:
self.rlist.append(fd)
events &= ~POLLIN
if events & POLLOUT:
self.wlist.append(fd)
events &= ~POLLOUT
if events:
self.xlist.append(fd)
def poll(self, timeout):
if timeout == -1:
# epoll uses -1 for infinite timeout, select uses None.
timeout = None
else:
timeout = float(timeout) / 1000
# XXX workaround a bug in eventlet
# see https://github.com/eventlet/eventlet/pull/25
if timeout == 0 and _using_eventlet_green_select():
timeout = 0.1
rlist, wlist, xlist = select.select(self.rlist, self.wlist, self.xlist,
timeout)
# collections.defaultdict is introduced by python 2.5 and
# XenServer uses python 2.4. We don't use it for XenServer.
# events_dict = collections.defaultdict(int)
# events_dict[fd] |= event
events_dict = {}
for fd in rlist:
events_dict[fd] = events_dict.get(fd, 0) | POLLIN
for fd in wlist:
events_dict[fd] = events_dict.get(fd, 0) | POLLOUT
for fd in xlist:
events_dict[fd] = events_dict.get(fd, 0) | (POLLERR |
POLLHUP |
POLLNVAL)
return events_dict.items()
SelectPoll = _SelectSelect
# If eventlet/gevent isn't used, we can use select.poll by replacing
# _SelectPoll with select.poll class
# _SelectPoll = select.poll
class Poller(object):
"""High-level wrapper around the "poll" system call.
Intended usage is for the program's main loop to go about its business
servicing whatever events it needs to. Then, when it runs out of immediate
tasks, it calls each subordinate module or object's "wait" function, which
in turn calls one (or more) of the functions Poller.fd_wait(),
Poller.immediate_wake(), and Poller.timer_wait() to register to be awakened
when the appropriate event occurs. Then the main loop calls
Poller.block(), which blocks until one of the registered events happens."""
def __init__(self):
self.__reset()
def fd_wait(self, fd, events):
"""Registers 'fd' as waiting for the specified 'events' (which should
be select.POLLIN or select.POLLOUT or their bitwise-OR). The following
call to self.block() will wake up when 'fd' becomes ready for one or
more of the requested events.
The event registration is one-shot: only the following call to
self.block() is affected. The event will need to be re-registered
after self.block() is called if it is to persist.
'fd' may be an integer file descriptor or an object with a fileno()
method that returns an integer file descriptor."""
self.poll.register(fd, events)
def __timer_wait(self, msec):
if self.timeout < 0 or msec < self.timeout:
self.timeout = msec
def timer_wait(self, msec):
"""Causes the following call to self.block() to block for no more than
'msec' milliseconds. If 'msec' is nonpositive, the following call to
self.block() will not block at all.
The timer registration is one-shot: only the following call to
self.block() is affected. The timer will need to be re-registered
after self.block() is called if it is to persist."""
if msec <= 0:
self.immediate_wake()
else:
self.__timer_wait(msec)
def timer_wait_until(self, msec):
"""Causes the following call to self.block() to wake up when the
current time, as returned by ovs.timeval.msec(), reaches 'msec' or
later. If 'msec' is earlier than the current time, the following call
to self.block() will not block at all.
The timer registration is one-shot: only the following call to
self.block() is affected. The timer will need to be re-registered
after self.block() is called if it is to persist."""
now = ovs.timeval.msec()
if msec <= now:
self.immediate_wake()
else:
self.__timer_wait(msec - now)
def immediate_wake(self):
"""Causes the following call to self.block() to wake up immediately,
without blocking."""
self.timeout = 0
def block(self):
"""Blocks until one or more of the events registered with
self.fd_wait() occurs, or until the minimum duration registered with
self.timer_wait() elapses, or not at all if self.immediate_wake() has
been called."""
try:
try:
events = self.poll.poll(self.timeout)
self.__log_wakeup(events)
except select.error, e:
# XXX rate-limit
error, msg = e
if error != errno.EINTR:
vlog.err("poll: %s" % e[1])
finally:
self.__reset()
def __log_wakeup(self, events):
if not events:
vlog.dbg("%d-ms timeout" % self.timeout)
else:
for fd, revents in events:
if revents != 0:
s = ""
if revents & POLLIN:
s += "[POLLIN]"
if revents & POLLOUT:
s += "[POLLOUT]"
if revents & POLLERR:
s += "[POLLERR]"
if revents & POLLHUP:
s += "[POLLHUP]"
if revents & POLLNVAL:
s += "[POLLNVAL]"
vlog.dbg("%s on fd %d" % (s, fd))
def __reset(self):
self.poll = SelectPoll()
self.timeout = -1

View File

@ -1,41 +0,0 @@
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
def _signal_status_msg(type_, signr):
s = "%s by signal %d" % (type_, signr)
for name in signal.__dict__:
if name.startswith("SIG") and getattr(signal, name) == signr:
return "%s (%s)" % (s, name)
return s
def status_msg(status):
"""Given 'status', which is a process status in the form reported by
waitpid(2) and returned by process_status(), returns a string describing
how the process terminated."""
if os.WIFEXITED(status):
s = "exit status %d" % os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
s = _signal_status_msg("killed", os.WTERMSIG(status))
elif os.WIFSTOPPED(status):
s = _signal_status_msg("stopped", os.WSTOPSIG(status))
else:
s = "terminated abnormally (%x)" % status
if os.WCOREDUMP(status):
s += ", core dumped"
return s

View File

@ -1,588 +0,0 @@
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ovs.vlog
import ovs.util
# Values returned by Reconnect.run()
CONNECT = 'connect'
DISCONNECT = 'disconnect'
PROBE = 'probe'
EOF = ovs.util.EOF
vlog = ovs.vlog.Vlog("reconnect")
class Reconnect(object):
"""A finite-state machine for connecting and reconnecting to a network
resource with exponential backoff. It also provides optional support for
detecting a connection on which the peer is no longer responding.
The library does not implement anything networking related, only an FSM for
networking code to use.
Many Reconnect methods take a "now" argument. This makes testing easier
since there is no hidden state. When not testing, just pass the return
value of ovs.time.msec(). (Perhaps this design should be revisited
later.)"""
class Void(object):
name = "VOID"
is_connected = False
@staticmethod
def deadline(fsm):
return None
@staticmethod
def run(fsm, now):
return None
class Listening(object):
name = "LISTENING"
is_connected = False
@staticmethod
def deadline(fsm):
return None
@staticmethod
def run(fsm, now):
return None
class Backoff(object):
name = "BACKOFF"
is_connected = False
@staticmethod
def deadline(fsm):
return fsm.state_entered + fsm.backoff
@staticmethod
def run(fsm, now):
return CONNECT
class ConnectInProgress(object):
name = "CONNECTING"
is_connected = False
@staticmethod
def deadline(fsm):
return fsm.state_entered + max(1000, fsm.backoff)
@staticmethod
def run(fsm, now):
return DISCONNECT
class Active(object):
name = "ACTIVE"
is_connected = True
@staticmethod
def deadline(fsm):
if fsm.probe_interval:
base = max(fsm.last_activity, fsm.state_entered)
return base + fsm.probe_interval
return None
@staticmethod
def run(fsm, now):
vlog.dbg("%s: idle %d ms, sending inactivity probe"
% (fsm.name,
now - max(fsm.last_activity, fsm.state_entered)))
fsm._transition(now, Reconnect.Idle)
return PROBE
class Idle(object):
name = "IDLE"
is_connected = True
@staticmethod
def deadline(fsm):
if fsm.probe_interval:
return fsm.state_entered + fsm.probe_interval
return None
@staticmethod
def run(fsm, now):
vlog.err("%s: no response to inactivity probe after %.3g "
"seconds, disconnecting"
% (fsm.name, (now - fsm.state_entered) / 1000.0))
return DISCONNECT
class Reconnect(object):
name = "RECONNECT"
is_connected = False
@staticmethod
def deadline(fsm):
return fsm.state_entered
@staticmethod
def run(fsm, now):
return DISCONNECT
def __init__(self, now):
"""Creates and returns a new reconnect FSM with default settings. The
FSM is initially disabled. The caller will likely want to call
self.enable() and self.set_name() on the returned object."""
self.name = "void"
self.min_backoff = 1000
self.max_backoff = 8000
self.probe_interval = 5000
self.passive = False
self.info_level = vlog.info
self.state = Reconnect.Void
self.state_entered = now
self.backoff = 0
self.last_activity = now
self.last_connected = None
self.last_disconnected = None
self.max_tries = None
self.creation_time = now
self.n_attempted_connections = 0
self.n_successful_connections = 0
self.total_connected_duration = 0
self.seqno = 0
def set_quiet(self, quiet):
"""If 'quiet' is true, this object will log informational messages at
debug level, by default keeping them out of log files. This is
appropriate if the connection is one that is expected to be
short-lived, so that the log messages are merely distracting.
If 'quiet' is false, this object logs informational messages at info
level. This is the default.
This setting has no effect on the log level of debugging, warning, or
error messages."""
if quiet:
self.info_level = vlog.dbg
else:
self.info_level = vlog.info
def get_name(self):
return self.name
def set_name(self, name):
"""Sets this object's name to 'name'. If 'name' is None, then "void"
is used instead.
The name is used in log messages."""
if name is None:
self.name = "void"
else:
self.name = name
def get_min_backoff(self):
"""Return the minimum number of milliseconds to back off between
consecutive connection attempts. The default is 1000 ms."""
return self.min_backoff
def get_max_backoff(self):
"""Return the maximum number of milliseconds to back off between
consecutive connection attempts. The default is 8000 ms."""
return self.max_backoff
def get_probe_interval(self):
"""Returns the "probe interval" in milliseconds. If this is zero, it
disables the connection keepalive feature. If it is nonzero, then if
the interval passes while the FSM is connected and without
self.activity() being called, self.run() returns ovs.reconnect.PROBE.
If the interval passes again without self.activity() being called,
self.run() returns ovs.reconnect.DISCONNECT."""
return self.probe_interval
def set_max_tries(self, max_tries):
"""Limits the maximum number of times that this object will ask the
client to try to reconnect to 'max_tries'. None (the default) means an
unlimited number of tries.
After the number of tries has expired, the FSM will disable itself
instead of backing off and retrying."""
self.max_tries = max_tries
def get_max_tries(self):
"""Returns the current remaining number of connection attempts,
None if the number is unlimited."""
return self.max_tries
def set_backoff(self, min_backoff, max_backoff):
"""Configures the backoff parameters for this FSM. 'min_backoff' is
the minimum number of milliseconds, and 'max_backoff' is the maximum,
between connection attempts.
'min_backoff' must be at least 1000, and 'max_backoff' must be greater
than or equal to 'min_backoff'."""
self.min_backoff = max(min_backoff, 1000)
if self.max_backoff:
self.max_backoff = max(max_backoff, 1000)
else:
self.max_backoff = 8000
if self.min_backoff > self.max_backoff:
self.max_backoff = self.min_backoff
if (self.state == Reconnect.Backoff and
self.backoff > self.max_backoff):
self.backoff = self.max_backoff
def set_probe_interval(self, probe_interval):
"""Sets the "probe interval" to 'probe_interval', in milliseconds. If
this is zero, it disables the connection keepalive feature. If it is
nonzero, then if the interval passes while this FSM is connected and
without self.activity() being called, self.run() returns
ovs.reconnect.PROBE. If the interval passes again without
self.activity() being called, self.run() returns
ovs.reconnect.DISCONNECT.
If 'probe_interval' is nonzero, then it will be forced to a value of at
least 1000 ms."""
if probe_interval:
self.probe_interval = max(1000, probe_interval)
else:
self.probe_interval = 0
def is_passive(self):
"""Returns true if 'fsm' is in passive mode, false if 'fsm' is in
active mode (the default)."""
return self.passive
def set_passive(self, passive, now):
"""Configures this FSM for active or passive mode. In active mode (the
default), the FSM is attempting to connect to a remote host. In
passive mode, the FSM is listening for connections from a remote
host."""
if self.passive != passive:
self.passive = passive
if ((passive and self.state in (Reconnect.ConnectInProgress,
Reconnect.Reconnect)) or
(not passive and self.state == Reconnect.Listening
and self.__may_retry())):
self._transition(now, Reconnect.Backoff)
self.backoff = 0
def is_enabled(self):
"""Returns true if this FSM has been enabled with self.enable().
Calling another function that indicates a change in connection state,
such as self.disconnected() or self.force_reconnect(), will also enable
a reconnect FSM."""
return self.state != Reconnect.Void
def enable(self, now):
"""If this FSM is disabled (the default for newly created FSMs),
enables it, so that the next call to reconnect_run() for 'fsm' will
return ovs.reconnect.CONNECT.
If this FSM is not disabled, this function has no effect."""
if self.state == Reconnect.Void and self.__may_retry():
self._transition(now, Reconnect.Backoff)
self.backoff = 0
def disable(self, now):
"""Disables this FSM. Until 'fsm' is enabled again, self.run() will
always return 0."""
if self.state != Reconnect.Void:
self._transition(now, Reconnect.Void)
def force_reconnect(self, now):
"""If this FSM is enabled and currently connected (or attempting to
connect), forces self.run() to return ovs.reconnect.DISCONNECT the next
time it is called, which should cause the client to drop the connection
(or attempt), back off, and then reconnect."""
if self.state in (Reconnect.ConnectInProgress,
Reconnect.Active,
Reconnect.Idle):
self._transition(now, Reconnect.Reconnect)
def disconnected(self, now, error):
"""Tell this FSM that the connection dropped or that a connection
attempt failed. 'error' specifies the reason: a positive value
represents an errno value, EOF indicates that the connection was closed
by the peer (e.g. read() returned 0), and 0 indicates no specific
error.
The FSM will back off, then reconnect."""
if self.state not in (Reconnect.Backoff, Reconnect.Void):
# Report what happened
if self.state in (Reconnect.Active, Reconnect.Idle):
if error > 0:
vlog.warn("%s: connection dropped (%s)"
% (self.name, os.strerror(error)))
elif error == EOF:
self.info_level("%s: connection closed by peer"
% self.name)
else:
self.info_level("%s: connection dropped" % self.name)
elif self.state == Reconnect.Listening:
if error > 0:
vlog.warn("%s: error listening for connections (%s)"
% (self.name, os.strerror(error)))
else:
self.info_level("%s: error listening for connections"
% self.name)
else:
if self.passive:
type_ = "listen"
else:
type_ = "connection"
if error > 0:
vlog.warn("%s: %s attempt failed (%s)"
% (self.name, type_, os.strerror(error)))
else:
self.info_level("%s: %s attempt timed out"
% (self.name, type_))
if (self.state in (Reconnect.Active, Reconnect.Idle)):
self.last_disconnected = now
# Back off
if (self.state in (Reconnect.Active, Reconnect.Idle) and
(self.last_activity - self.last_connected >= self.backoff or
self.passive)):
if self.passive:
self.backoff = 0
else:
self.backoff = self.min_backoff
else:
if self.backoff < self.min_backoff:
self.backoff = self.min_backoff
elif self.backoff >= self.max_backoff / 2:
self.backoff = self.max_backoff
else:
self.backoff *= 2
if self.passive:
self.info_level("%s: waiting %.3g seconds before trying "
"to listen again"
% (self.name, self.backoff / 1000.0))
else:
self.info_level("%s: waiting %.3g seconds before reconnect"
% (self.name, self.backoff / 1000.0))
if self.__may_retry():
self._transition(now, Reconnect.Backoff)
else:
self._transition(now, Reconnect.Void)
def connecting(self, now):
"""Tell this FSM that a connection or listening attempt is in progress.
The FSM will start a timer, after which the connection or listening
attempt will be aborted (by returning ovs.reconnect.DISCONNECT from
self.run())."""
if self.state != Reconnect.ConnectInProgress:
if self.passive:
self.info_level("%s: listening..." % self.name)
else:
self.info_level("%s: connecting..." % self.name)
self._transition(now, Reconnect.ConnectInProgress)
def listening(self, now):
"""Tell this FSM that the client is listening for connection attempts.
This state last indefinitely until the client reports some change.
The natural progression from this state is for the client to report
that a connection has been accepted or is in progress of being
accepted, by calling self.connecting() or self.connected().
The client may also report that listening failed (e.g. accept()
returned an unexpected error such as ENOMEM) by calling
self.listen_error(), in which case the FSM will back off and eventually
return ovs.reconnect.CONNECT from self.run() to tell the client to try
listening again."""
if self.state != Reconnect.Listening:
self.info_level("%s: listening..." % self.name)
self._transition(now, Reconnect.Listening)
def listen_error(self, now, error):
"""Tell this FSM that the client's attempt to accept a connection
failed (e.g. accept() returned an unexpected error such as ENOMEM).
If the FSM is currently listening (self.listening() was called), it
will back off and eventually return ovs.reconnect.CONNECT from
self.run() to tell the client to try listening again. If there is an
active connection, this will be delayed until that connection drops."""
if self.state == Reconnect.Listening:
self.disconnected(now, error)
def connected(self, now):
"""Tell this FSM that the connection was successful.
The FSM will start the probe interval timer, which is reset by
self.activity(). If the timer expires, a probe will be sent (by
returning ovs.reconnect.PROBE from self.run(). If the timer expires
again without being reset, the connection will be aborted (by returning
ovs.reconnect.DISCONNECT from self.run()."""
if not self.state.is_connected:
self.connecting(now)
self.info_level("%s: connected" % self.name)
self._transition(now, Reconnect.Active)
self.last_connected = now
def connect_failed(self, now, error):
"""Tell this FSM that the connection attempt failed.
The FSM will back off and attempt to reconnect."""
self.connecting(now)
self.disconnected(now, error)
def activity(self, now):
"""Tell this FSM that some activity occurred on the connection. This
resets the probe interval timer, so that the connection is known not to
be idle."""
if self.state != Reconnect.Active:
self._transition(now, Reconnect.Active)
self.last_activity = now
def _transition(self, now, state):
if self.state == Reconnect.ConnectInProgress:
self.n_attempted_connections += 1
if state == Reconnect.Active:
self.n_successful_connections += 1
connected_before = self.state.is_connected
connected_now = state.is_connected
if connected_before != connected_now:
if connected_before:
self.total_connected_duration += now - self.last_connected
self.seqno += 1
vlog.dbg("%s: entering %s" % (self.name, state.name))
self.state = state
self.state_entered = now
def run(self, now):
"""Assesses whether any action should be taken on this FSM. The return
value is one of:
- None: The client need not take any action.
- Active client, ovs.reconnect.CONNECT: The client should start a
connection attempt and indicate this by calling
self.connecting(). If the connection attempt has definitely
succeeded, it should call self.connected(). If the connection
attempt has definitely failed, it should call
self.connect_failed().
The FSM is smart enough to back off correctly after successful
connections that quickly abort, so it is OK to call
self.connected() after a low-level successful connection
(e.g. connect()) even if the connection might soon abort due to a
failure at a high-level (e.g. SSL negotiation failure).
- Passive client, ovs.reconnect.CONNECT: The client should try to
listen for a connection, if it is not already listening. It
should call self.listening() if successful, otherwise
self.connecting() or reconnected_connect_failed() if the attempt
is in progress or definitely failed, respectively.
A listening passive client should constantly attempt to accept a
new connection and report an accepted connection with
self.connected().
- ovs.reconnect.DISCONNECT: The client should abort the current
connection or connection attempt or listen attempt and call
self.disconnected() or self.connect_failed() to indicate it.
- ovs.reconnect.PROBE: The client should send some kind of request
to the peer that will elicit a response, to ensure that the
connection is indeed in working order. (This will only be
returned if the "probe interval" is nonzero--see
self.set_probe_interval())."""
deadline = self.state.deadline(self)
if deadline is not None and now >= deadline:
return self.state.run(self, now)
else:
return None
def wait(self, poller, now):
"""Causes the next call to poller.block() to wake up when self.run()
should be called."""
timeout = self.timeout(now)
if timeout >= 0:
poller.timer_wait(timeout)
def timeout(self, now):
"""Returns the number of milliseconds after which self.run() should be
called if nothing else notable happens in the meantime, or None if this
is currently unnecessary."""
deadline = self.state.deadline(self)
if deadline is not None:
remaining = deadline - now
return max(0, remaining)
else:
return None
def is_connected(self):
"""Returns True if this FSM is currently believed to be connected, that
is, if self.connected() was called more recently than any call to
self.connect_failed() or self.disconnected() or self.disable(), and
False otherwise."""
return self.state.is_connected
def get_last_connect_elapsed(self, now):
"""Returns the number of milliseconds since 'fsm' was last connected
to its peer. Returns None if never connected."""
if self.last_connected:
return now - self.last_connected
else:
return None
def get_last_disconnect_elapsed(self, now):
"""Returns the number of milliseconds since 'fsm' was last disconnected
from its peer. Returns None if never disconnected."""
if self.last_disconnected:
return now - self.last_disconnected
else:
return None
def get_stats(self, now):
class Stats(object):
pass
stats = Stats()
stats.creation_time = self.creation_time
stats.last_connected = self.last_connected
stats.last_disconnected = self.last_disconnected
stats.last_activity = self.last_activity
stats.backoff = self.backoff
stats.seqno = self.seqno
stats.is_connected = self.is_connected()
stats.msec_since_connect = self.get_last_connect_elapsed(now)
stats.msec_since_disconnect = self.get_last_disconnect_elapsed(now)
stats.total_connected_duration = self.total_connected_duration
if self.is_connected():
stats.total_connected_duration += (
self.get_last_connect_elapsed(now))
stats.n_attempted_connections = self.n_attempted_connections
stats.n_successful_connections = self.n_successful_connections
stats.state = self.state.name
stats.state_elapsed = now - self.state_entered
return stats
def __may_retry(self):
if self.max_tries is None:
return True
elif self.max_tries > 0:
self.max_tries -= 1
return True
else:
return False

View File

@ -1,192 +0,0 @@
# Copyright (c) 2010, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import select
import socket
import sys
import ovs.fatal_signal
import ovs.poller
import ovs.vlog
vlog = ovs.vlog.Vlog("socket_util")
def make_unix_socket(style, nonblock, bind_path, connect_path):
"""Creates a Unix domain socket in the given 'style' (either
socket.SOCK_DGRAM or socket.SOCK_STREAM) that is bound to 'bind_path' (if
'bind_path' is not None) and connected to 'connect_path' (if 'connect_path'
is not None). If 'nonblock' is true, the socket is made non-blocking.
Returns (error, socket): on success 'error' is 0 and 'socket' is a new
socket object, on failure 'error' is a positive errno value and 'socket' is
None."""
try:
sock = socket.socket(socket.AF_UNIX, style)
except socket.error, e:
return get_exception_errno(e), None
try:
if nonblock:
set_nonblocking(sock)
if bind_path is not None:
# Delete bind_path but ignore ENOENT.
try:
os.unlink(bind_path)
except OSError, e:
if e.errno != errno.ENOENT:
return e.errno, None
ovs.fatal_signal.add_file_to_unlink(bind_path)
sock.bind(bind_path)
try:
if sys.hexversion >= 0x02060000:
os.fchmod(sock.fileno(), 0700)
else:
os.chmod("/dev/fd/%d" % sock.fileno(), 0700)
except OSError, e:
pass
if connect_path is not None:
try:
sock.connect(connect_path)
except socket.error, e:
if get_exception_errno(e) != errno.EINPROGRESS:
raise
return 0, sock
except socket.error, e:
sock.close()
if bind_path is not None:
ovs.fatal_signal.unlink_file_now(bind_path)
return get_exception_errno(e), None
def check_connection_completion(sock):
p = ovs.poller.SelectPoll()
p.register(sock, ovs.poller.POLLOUT)
if len(p.poll(0)) == 1:
return get_socket_error(sock)
else:
return errno.EAGAIN
def inet_parse_active(target, default_port):
address = target.split(":")
host_name = address[0]
if not host_name:
raise ValueError("%s: bad peer name format" % target)
if len(address) >= 2:
port = int(address[1])
elif default_port:
port = default_port
else:
raise ValueError("%s: port number must be specified" % target)
return (host_name, port)
def inet_open_active(style, target, default_port, dscp):
address = inet_parse_active(target, default_port)
try:
sock = socket.socket(socket.AF_INET, style, 0)
except socket.error, e:
return get_exception_errno(e), None
try:
set_nonblocking(sock)
set_dscp(sock, dscp)
try:
sock.connect(address)
except socket.error, e:
if get_exception_errno(e) != errno.EINPROGRESS:
raise
return 0, sock
except socket.error, e:
sock.close()
return get_exception_errno(e), None
def get_socket_error(sock):
"""Returns the errno value associated with 'socket' (0 if no error) and
resets the socket's error status."""
return sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
def get_exception_errno(e):
"""A lot of methods on Python socket objects raise socket.error, but that
exception is documented as having two completely different forms of
arguments: either a string or a (errno, string) tuple. We only want the
errno."""
if type(e.args) == tuple:
return e.args[0]
else:
return errno.EPROTO
null_fd = -1
def get_null_fd():
"""Returns a readable and writable fd for /dev/null, if successful,
otherwise a negative errno value. The caller must not close the returned
fd (because the same fd will be handed out to subsequent callers)."""
global null_fd
if null_fd < 0:
try:
null_fd = os.open("/dev/null", os.O_RDWR)
except OSError, e:
vlog.err("could not open /dev/null: %s" % os.strerror(e.errno))
return -e.errno
return null_fd
def write_fully(fd, buf):
"""Returns an (error, bytes_written) tuple where 'error' is 0 on success,
otherwise a positive errno value, and 'bytes_written' is the number of
bytes that were written before the error occurred. 'error' is 0 if and
only if 'bytes_written' is len(buf)."""
bytes_written = 0
if len(buf) == 0:
return 0, 0
while True:
try:
retval = os.write(fd, buf)
assert retval >= 0
if retval == len(buf):
return 0, bytes_written + len(buf)
elif retval == 0:
vlog.warn("write returned 0")
return errno.EPROTO, bytes_written
else:
bytes_written += retval
buf = buf[:retval]
except OSError, e:
return e.errno, bytes_written
def set_nonblocking(sock):
try:
sock.setblocking(0)
except socket.error, e:
vlog.err("could not set nonblocking mode on socket: %s"
% os.strerror(get_socket_error(e)))
def set_dscp(sock, dscp):
if dscp > 63:
raise ValueError("Invalid dscp %d" % dscp)
val = dscp << 2
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, val)

View File

@ -1,362 +0,0 @@
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import socket
import ovs.poller
import ovs.socket_util
import ovs.vlog
vlog = ovs.vlog.Vlog("stream")
def stream_or_pstream_needs_probes(name):
""" 1 if the stream or pstream specified by 'name' needs periodic probes to
verify connectivity. For [p]streams which need probes, it can take a long
time to notice the connection was dropped. Returns 0 if probes aren't
needed, and -1 if 'name' is invalid"""
if PassiveStream.is_valid_name(name) or Stream.is_valid_name(name):
# Only unix and punix are supported currently.
return 0
else:
return -1
class Stream(object):
"""Bidirectional byte stream. Currently only Unix domain sockets
are implemented."""
# States.
__S_CONNECTING = 0
__S_CONNECTED = 1
__S_DISCONNECTED = 2
# Kinds of events that one might wait for.
W_CONNECT = 0 # Connect complete (success or failure).
W_RECV = 1 # Data received.
W_SEND = 2 # Send buffer room available.
_SOCKET_METHODS = {}
@staticmethod
def register_method(method, cls):
Stream._SOCKET_METHODS[method + ":"] = cls
@staticmethod
def _find_method(name):
for method, cls in Stream._SOCKET_METHODS.items():
if name.startswith(method):
return cls
return None
@staticmethod
def is_valid_name(name):
"""Returns True if 'name' is a stream name in the form "TYPE:ARGS" and
TYPE is a supported stream type (currently only "unix:" and "tcp:"),
otherwise False."""
return bool(Stream._find_method(name))
def __init__(self, socket, name, status):
self.socket = socket
self.name = name
if status == errno.EAGAIN:
self.state = Stream.__S_CONNECTING
elif status == 0:
self.state = Stream.__S_CONNECTED
else:
self.state = Stream.__S_DISCONNECTED
self.error = 0
# Default value of dscp bits for connection between controller and manager.
# Value of IPTOS_PREC_INTERNETCONTROL = 0xc0 which is defined
# in <netinet/ip.h> is used.
IPTOS_PREC_INTERNETCONTROL = 0xc0
DSCP_DEFAULT = IPTOS_PREC_INTERNETCONTROL >> 2
@staticmethod
def open(name, dscp=DSCP_DEFAULT):
"""Attempts to connect a stream to a remote peer. 'name' is a
connection name in the form "TYPE:ARGS", where TYPE is an active stream
class's name and ARGS are stream class-specific. Currently the only
supported TYPEs are "unix" and "tcp".
Returns (error, stream): on success 'error' is 0 and 'stream' is the
new Stream, on failure 'error' is a positive errno value and 'stream'
is None.
Never returns errno.EAGAIN or errno.EINPROGRESS. Instead, returns 0
and a new Stream. The connect() method can be used to check for
successful connection completion."""
cls = Stream._find_method(name)
if not cls:
return errno.EAFNOSUPPORT, None
suffix = name.split(":", 1)[1]
error, sock = cls._open(suffix, dscp)
if error:
return error, None
else:
status = ovs.socket_util.check_connection_completion(sock)
return 0, Stream(sock, name, status)
@staticmethod
def _open(suffix, dscp):
raise NotImplementedError("This method must be overrided by subclass")
@staticmethod
def open_block((error, stream)):
"""Blocks until a Stream completes its connection attempt, either
succeeding or failing. (error, stream) should be the tuple returned by
Stream.open(). Returns a tuple of the same form.
Typical usage:
error, stream = Stream.open_block(Stream.open("unix:/tmp/socket"))"""
if not error:
while True:
error = stream.connect()
if error != errno.EAGAIN:
break
stream.run()
poller = ovs.poller.Poller()
stream.run_wait(poller)
stream.connect_wait(poller)
poller.block()
assert error != errno.EINPROGRESS
if error and stream:
stream.close()
stream = None
return error, stream
def close(self):
self.socket.close()
def __scs_connecting(self):
retval = ovs.socket_util.check_connection_completion(self.socket)
assert retval != errno.EINPROGRESS
if retval == 0:
self.state = Stream.__S_CONNECTED
elif retval != errno.EAGAIN:
self.state = Stream.__S_DISCONNECTED
self.error = retval
def connect(self):
"""Tries to complete the connection on this stream. If the connection
is complete, returns 0 if the connection was successful or a positive
errno value if it failed. If the connection is still in progress,
returns errno.EAGAIN."""
if self.state == Stream.__S_CONNECTING:
self.__scs_connecting()
if self.state == Stream.__S_CONNECTING:
return errno.EAGAIN
elif self.state == Stream.__S_CONNECTED:
return 0
else:
assert self.state == Stream.__S_DISCONNECTED
return self.error
def recv(self, n):
"""Tries to receive up to 'n' bytes from this stream. Returns a
(error, string) tuple:
- If successful, 'error' is zero and 'string' contains between 1
and 'n' bytes of data.
- On error, 'error' is a positive errno value.
- If the connection has been closed in the normal fashion or if 'n'
is 0, the tuple is (0, "").
The recv function will not block waiting for data to arrive. If no
data have been received, it returns (errno.EAGAIN, "") immediately."""
retval = self.connect()
if retval != 0:
return (retval, "")
elif n == 0:
return (0, "")
try:
return (0, self.socket.recv(n))
except socket.error, e:
return (ovs.socket_util.get_exception_errno(e), "")
def send(self, buf):
"""Tries to send 'buf' on this stream.
If successful, returns the number of bytes sent, between 1 and
len(buf). 0 is only a valid return value if len(buf) is 0.
On error, returns a negative errno value.
Will not block. If no bytes can be immediately accepted for
transmission, returns -errno.EAGAIN immediately."""
retval = self.connect()
if retval != 0:
return -retval
elif len(buf) == 0:
return 0
try:
return self.socket.send(buf)
except socket.error, e:
return -ovs.socket_util.get_exception_errno(e)
def run(self):
pass
def run_wait(self, poller):
pass
def wait(self, poller, wait):
assert wait in (Stream.W_CONNECT, Stream.W_RECV, Stream.W_SEND)
if self.state == Stream.__S_DISCONNECTED:
poller.immediate_wake()
return
if self.state == Stream.__S_CONNECTING:
wait = Stream.W_CONNECT
if wait == Stream.W_RECV:
poller.fd_wait(self.socket, ovs.poller.POLLIN)
else:
poller.fd_wait(self.socket, ovs.poller.POLLOUT)
def connect_wait(self, poller):
self.wait(poller, Stream.W_CONNECT)
def recv_wait(self, poller):
self.wait(poller, Stream.W_RECV)
def send_wait(self, poller):
self.wait(poller, Stream.W_SEND)
def __del__(self):
# Don't delete the file: we might have forked.
self.socket.close()
class PassiveStream(object):
@staticmethod
def is_valid_name(name):
"""Returns True if 'name' is a passive stream name in the form
"TYPE:ARGS" and TYPE is a supported passive stream type (currently only
"punix:"), otherwise False."""
return name.startswith("punix:")
def __init__(self, sock, name, bind_path):
self.name = name
self.socket = sock
self.bind_path = bind_path
@staticmethod
def open(name):
"""Attempts to start listening for remote stream connections. 'name'
is a connection name in the form "TYPE:ARGS", where TYPE is an passive
stream class's name and ARGS are stream class-specific. Currently the
only supported TYPE is "punix".
Returns (error, pstream): on success 'error' is 0 and 'pstream' is the
new PassiveStream, on failure 'error' is a positive errno value and
'pstream' is None."""
if not PassiveStream.is_valid_name(name):
return errno.EAFNOSUPPORT, None
bind_path = name[6:]
error, sock = ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
True, bind_path, None)
if error:
return error, None
try:
sock.listen(10)
except socket.error, e:
vlog.err("%s: listen: %s" % (name, os.strerror(e.error)))
sock.close()
return e.error, None
return 0, PassiveStream(sock, name, bind_path)
def close(self):
"""Closes this PassiveStream."""
self.socket.close()
if self.bind_path is not None:
ovs.fatal_signal.unlink_file_now(self.bind_path)
self.bind_path = None
def accept(self):
"""Tries to accept a new connection on this passive stream. Returns
(error, stream): if successful, 'error' is 0 and 'stream' is the new
Stream object, and on failure 'error' is a positive errno value and
'stream' is None.
Will not block waiting for a connection. If no connection is ready to
be accepted, returns (errno.EAGAIN, None) immediately."""
while True:
try:
sock, addr = self.socket.accept()
ovs.socket_util.set_nonblocking(sock)
return 0, Stream(sock, "unix:%s" % addr, 0)
except socket.error, e:
error = ovs.socket_util.get_exception_errno(e)
if error != errno.EAGAIN:
# XXX rate-limit
vlog.dbg("accept: %s" % os.strerror(error))
return error, None
def wait(self, poller):
poller.fd_wait(self.socket, ovs.poller.POLLIN)
def __del__(self):
# Don't delete the file: we might have forked.
self.socket.close()
def usage(name):
return """
Active %s connection methods:
unix:FILE Unix domain socket named FILE
tcp:IP:PORT TCP socket to IP with port no of PORT
Passive %s connection methods:
punix:FILE Listen on Unix domain socket FILE""" % (name, name)
class UnixStream(Stream):
@staticmethod
def _open(suffix, dscp):
connect_path = suffix
return ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
True, None, connect_path)
Stream.register_method("unix", UnixStream)
class TCPStream(Stream):
@staticmethod
def _open(suffix, dscp):
error, sock = ovs.socket_util.inet_open_active(socket.SOCK_STREAM,
suffix, 0, dscp)
if not error:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return error, sock
Stream.register_method("tcp", TCPStream)

View File

@ -1,83 +0,0 @@
# Copyright (c) 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ovs.util
commands = {}
strtypes = types.StringTypes
class _UnixctlCommand(object):
def __init__(self, usage, min_args, max_args, callback, aux):
self.usage = usage
self.min_args = min_args
self.max_args = max_args
self.callback = callback
self.aux = aux
def _unixctl_help(conn, unused_argv, unused_aux):
reply = "The available commands are:\n"
command_names = sorted(commands.keys())
for name in command_names:
reply += " "
usage = commands[name].usage
if usage:
reply += "%-23s %s" % (name, usage)
else:
reply += name
reply += "\n"
conn.reply(reply)
def command_register(name, usage, min_args, max_args, callback, aux):
""" Registers a command with the given 'name' to be exposed by the
UnixctlServer. 'usage' describes the arguments to the command; it is used
only for presentation to the user in "help" output.
'callback' is called when the command is received. It is passed a
UnixctlConnection object, the list of arguments as unicode strings, and
'aux'. Normally 'callback' should reply by calling
UnixctlConnection.reply() or UnixctlConnection.reply_error() before it
returns, but if the command cannot be handled immediately, then it can
defer the reply until later. A given connection can only process a single
request at a time, so a reply must be made eventually to avoid blocking
that connection."""
assert isinstance(name, strtypes)
assert isinstance(usage, strtypes)
assert isinstance(min_args, int)
assert isinstance(max_args, int)
assert isinstance(callback, types.FunctionType)
if name not in commands:
commands[name] = _UnixctlCommand(usage, min_args, max_args, callback,
aux)
def socket_name_from_target(target):
assert isinstance(target, strtypes)
if target.startswith("/"):
return 0, target
pidfile_name = "%s/%s.pid" % (ovs.dirs.RUNDIR, target)
pid = ovs.daemon.read_pidfile(pidfile_name)
if pid < 0:
return -pid, "cannot read pidfile \"%s\"" % pidfile_name
return 0, "%s/%s.%d.ctl" % (ovs.dirs.RUNDIR, target, pid)
command_register("help", "", 0, 0, _unixctl_help, None)

View File

@ -1,70 +0,0 @@
# Copyright (c) 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import os
import types
import ovs.jsonrpc
import ovs.stream
import ovs.util
vlog = ovs.vlog.Vlog("unixctl_client")
strtypes = types.StringTypes
class UnixctlClient(object):
def __init__(self, conn):
assert isinstance(conn, ovs.jsonrpc.Connection)
self._conn = conn
def transact(self, command, argv):
assert isinstance(command, strtypes)
assert isinstance(argv, list)
for arg in argv:
assert isinstance(arg, strtypes)
request = ovs.jsonrpc.Message.create_request(command, argv)
error, reply = self._conn.transact_block(request)
if error:
vlog.warn("error communicating with %s: %s"
% (self._conn.name, os.strerror(error)))
return error, None, None
if reply.error is not None:
return 0, str(reply.error), None
else:
assert reply.result is not None
return 0, None, str(reply.result)
def close(self):
self._conn.close()
self.conn = None
@staticmethod
def create(path):
assert isinstance(path, str)
unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
error, stream = ovs.stream.Stream.open_block(
ovs.stream.Stream.open(unix))
if error:
vlog.warn("failed to connect to %s" % path)
return error, None
return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))

View File

@ -1,247 +0,0 @@
# Copyright (c) 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import os
import types
import ovs.dirs
import ovs.jsonrpc
import ovs.stream
import ovs.unixctl
import ovs.util
import ovs.version
import ovs.vlog
Message = ovs.jsonrpc.Message
vlog = ovs.vlog.Vlog("unixctl_server")
strtypes = types.StringTypes
class UnixctlConnection(object):
def __init__(self, rpc):
assert isinstance(rpc, ovs.jsonrpc.Connection)
self._rpc = rpc
self._request_id = None
def run(self):
self._rpc.run()
error = self._rpc.get_status()
if error or self._rpc.get_backlog():
return error
for _ in range(10):
if error or self._request_id:
break
error, msg = self._rpc.recv()
if msg:
if msg.type == Message.T_REQUEST:
self._process_command(msg)
else:
# XXX: rate-limit
vlog.warn("%s: received unexpected %s message"
% (self._rpc.name,
Message.type_to_string(msg.type)))
error = errno.EINVAL
if not error:
error = self._rpc.get_status()
return error
def reply(self, body):
self._reply_impl(True, body)
def reply_error(self, body):
self._reply_impl(False, body)
# Called only by unixctl classes.
def _close(self):
self._rpc.close()
self._request_id = None
def _wait(self, poller):
self._rpc.wait(poller)
if not self._rpc.get_backlog():
self._rpc.recv_wait(poller)
def _reply_impl(self, success, body):
assert isinstance(success, bool)
assert body is None or isinstance(body, strtypes)
assert self._request_id is not None
if body is None:
body = ""
if body and not body.endswith("\n"):
body += "\n"
if success:
reply = Message.create_reply(body, self._request_id)
else:
reply = Message.create_error(body, self._request_id)
self._rpc.send(reply)
self._request_id = None
def _process_command(self, request):
assert isinstance(request, ovs.jsonrpc.Message)
assert request.type == ovs.jsonrpc.Message.T_REQUEST
self._request_id = request.id
error = None
params = request.params
method = request.method
command = ovs.unixctl.commands.get(method)
if command is None:
error = '"%s" is not a valid command' % method
elif len(params) < command.min_args:
error = '"%s" command requires at least %d arguments' \
% (method, command.min_args)
elif len(params) > command.max_args:
error = '"%s" command takes at most %d arguments' \
% (method, command.max_args)
else:
for param in params:
if not isinstance(param, strtypes):
error = '"%s" command has non-string argument' % method
break
if error is None:
unicode_params = [unicode(p) for p in params]
command.callback(self, unicode_params, command.aux)
if error:
self.reply_error(error)
def _unixctl_version(conn, unused_argv, version):
assert isinstance(conn, UnixctlConnection)
version = "%s (Open vSwitch) %s" % (ovs.util.PROGRAM_NAME, version)
conn.reply(version)
class UnixctlServer(object):
def __init__(self, listener):
assert isinstance(listener, ovs.stream.PassiveStream)
self._listener = listener
self._conns = []
def run(self):
for _ in range(10):
error, stream = self._listener.accept()
if not error:
rpc = ovs.jsonrpc.Connection(stream)
self._conns.append(UnixctlConnection(rpc))
elif error == errno.EAGAIN:
break
else:
# XXX: rate-limit
vlog.warn("%s: accept failed: %s" % (self._listener.name,
os.strerror(error)))
for conn in copy.copy(self._conns):
error = conn.run()
if error and error != errno.EAGAIN:
conn._close()
self._conns.remove(conn)
def wait(self, poller):
self._listener.wait(poller)
for conn in self._conns:
conn._wait(poller)
def close(self):
for conn in self._conns:
conn._close()
self._conns = None
self._listener.close()
self._listener = None
@staticmethod
def create(path, version=None):
"""Creates a new UnixctlServer which listens on a unixctl socket
created at 'path'. If 'path' is None, the default path is chosen.
'version' contains the version of the server as reported by the unixctl
version command. If None, ovs.version.VERSION is used."""
assert path is None or isinstance(path, strtypes)
if path is not None:
path = "punix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
else:
path = "punix:%s/%s.%d.ctl" % (ovs.dirs.RUNDIR,
ovs.util.PROGRAM_NAME, os.getpid())
if version is None:
version = ovs.version.VERSION
error, listener = ovs.stream.PassiveStream.open(path)
if error:
ovs.util.ovs_error(error, "could not initialize control socket %s"
% path)
return error, None
ovs.unixctl.command_register("version", "", 0, 0, _unixctl_version,
version)
return 0, UnixctlServer(listener)
class UnixctlClient(object):
def __init__(self, conn):
assert isinstance(conn, ovs.jsonrpc.Connection)
self._conn = conn
def transact(self, command, argv):
assert isinstance(command, strtypes)
assert isinstance(argv, list)
for arg in argv:
assert isinstance(arg, strtypes)
request = Message.create_request(command, argv)
error, reply = self._conn.transact_block(request)
if error:
vlog.warn("error communicating with %s: %s"
% (self._conn.name, os.strerror(error)))
return error, None, None
if reply.error is not None:
return 0, str(reply.error), None
else:
assert reply.result is not None
return 0, None, str(reply.result)
def close(self):
self._conn.close()
self.conn = None
@staticmethod
def create(path):
assert isinstance(path, str)
unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
error, stream = ovs.stream.Stream.open_block(
ovs.stream.Stream.open(unix))
if error:
vlog.warn("failed to connect to %s" % path)
return error, None
return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))

View File

@ -1,93 +0,0 @@
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import sys
PROGRAM_NAME = os.path.basename(sys.argv[0])
EOF = -1
def abs_file_name(dir_, file_name):
"""If 'file_name' starts with '/', returns a copy of 'file_name'.
Otherwise, returns an absolute path to 'file_name' considering it relative
to 'dir_', which itself must be absolute. 'dir_' may be None or the empty
string, in which case the current working directory is used.
Returns None if 'dir_' is None and getcwd() fails.
This differs from os.path.abspath() in that it will never change the
meaning of a file name."""
if file_name.startswith('/'):
return file_name
else:
if dir_ is None or dir_ == "":
try:
dir_ = os.getcwd()
except OSError:
return None
if dir_.endswith('/'):
return dir_ + file_name
else:
return "%s/%s" % (dir_, file_name)
def ovs_retval_to_string(retval):
"""Many OVS functions return an int which is one of:
- 0: no error yet
- >0: errno value
- EOF: end of file (not necessarily an error; depends on the function
called)
Returns the appropriate human-readable string."""
if not retval:
return ""
if retval > 0:
return os.strerror(retval)
if retval == EOF:
return "End of file"
return "***unknown return value: %s***" % retval
def ovs_error(err_no, message, vlog=None):
"""Prints 'message' on stderr and emits an ERROR level log message to
'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with
ovs_retval_to_string() and appended to the message inside parentheses.
'message' should not end with a new-line, because this function will add
one itself."""
err_msg = "%s: %s" % (PROGRAM_NAME, message)
if err_no:
err_msg += " (%s)" % ovs_retval_to_string(err_no)
sys.stderr.write("%s\n" % err_msg)
if vlog:
vlog.err(err_msg)
def ovs_fatal(*args, **kwargs):
"""Prints 'message' on stderr and emits an ERROR level log message to
'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with
ovs_retval_to_string() and appended to the message inside parentheses.
Then, terminates with exit code 1 (indicating a failure).
'message' should not end with a new-line, because this function will add
one itself."""
ovs_error(*args, **kwargs)
sys.exit(1)

View File

@ -1,2 +0,0 @@
# Generated automatically -- do not modify! -*- buffer-read-only: t -*-
VERSION = "1.7.90"

View File

@ -1,267 +0,0 @@
# Copyright (c) 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import logging.handlers
import re
import socket
import sys
import ovs.dirs
import ovs.unixctl
import ovs.util
FACILITIES = {"console": "info", "file": "info", "syslog": "info"}
LEVELS = {
"dbg": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARNING,
"err": logging.ERROR,
"emer": logging.CRITICAL,
"off": logging.CRITICAL
}
def get_level(level_str):
return LEVELS.get(level_str.lower())
class Vlog:
__inited = False
__msg_num = 0
__mfl = {} # Module -> facility -> level
__log_file = None
__file_handler = None
def __init__(self, name):
"""Creates a new Vlog object representing a module called 'name'. The
created Vlog object will do nothing until the Vlog.init() static method
is called. Once called, no more Vlog objects may be created."""
assert not Vlog.__inited
self.name = name.lower()
if name not in Vlog.__mfl:
Vlog.__mfl[self.name] = FACILITIES.copy()
def __log(self, level, message, **kwargs):
if not Vlog.__inited:
return
now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
message = ("%s|%s|%s|%s|%s"
% (now, Vlog.__msg_num, self.name, level, message))
level = LEVELS.get(level.lower(), logging.DEBUG)
Vlog.__msg_num += 1
for f, f_level in Vlog.__mfl[self.name].iteritems():
f_level = LEVELS.get(f_level, logging.CRITICAL)
if level >= f_level:
logging.getLogger(f).log(level, message, **kwargs)
def emer(self, message, **kwargs):
self.__log("EMER", message, **kwargs)
def err(self, message, **kwargs):
self.__log("ERR", message, **kwargs)
def warn(self, message, **kwargs):
self.__log("WARN", message, **kwargs)
def info(self, message, **kwargs):
self.__log("INFO", message, **kwargs)
def dbg(self, message, **kwargs):
self.__log("DBG", message, **kwargs)
def exception(self, message):
"""Logs 'message' at ERR log level. Includes a backtrace when in
exception context."""
self.err(message, exc_info=True)
@staticmethod
def init(log_file=None):
"""Intializes the Vlog module. Causes Vlog to write to 'log_file' if
not None. Should be called after all Vlog objects have been created.
No logging will occur until this function is called."""
if Vlog.__inited:
return
Vlog.__inited = True
logging.raiseExceptions = False
Vlog.__log_file = log_file
for f in FACILITIES:
logger = logging.getLogger(f)
logger.setLevel(logging.DEBUG)
try:
if f == "console":
logger.addHandler(logging.StreamHandler(sys.stderr))
elif f == "syslog":
logger.addHandler(logging.handlers.SysLogHandler(
address="/dev/log",
facility=logging.handlers.SysLogHandler.LOG_DAEMON))
elif f == "file" and Vlog.__log_file:
Vlog.__file_handler = logging.FileHandler(Vlog.__log_file)
logger.addHandler(Vlog.__file_handler)
except (IOError, socket.error):
logger.setLevel(logging.CRITICAL)
ovs.unixctl.command_register("vlog/reopen", "", 0, 0,
Vlog._unixctl_vlog_reopen, None)
ovs.unixctl.command_register("vlog/set", "spec", 1, sys.maxint,
Vlog._unixctl_vlog_set, None)
ovs.unixctl.command_register("vlog/list", "", 0, 0,
Vlog._unixctl_vlog_list, None)
@staticmethod
def set_level(module, facility, level):
""" Sets the log level of the 'module'-'facility' tuple to 'level'.
All three arguments are strings which are interpreted the same as
arguments to the --verbose flag. Should be called after all Vlog
objects have already been created."""
module = module.lower()
facility = facility.lower()
level = level.lower()
if facility != "any" and facility not in FACILITIES:
return
if module != "any" and module not in Vlog.__mfl:
return
if level not in LEVELS:
return
if module == "any":
modules = Vlog.__mfl.keys()
else:
modules = [module]
if facility == "any":
facilities = FACILITIES.keys()
else:
facilities = [facility]
for m in modules:
for f in facilities:
Vlog.__mfl[m][f] = level
@staticmethod
def set_levels_from_string(s):
module = None
level = None
facility = None
for word in [w.lower() for w in re.split('[ :]', s)]:
if word == "any":
pass
elif word in FACILITIES:
if facility:
return "cannot specify multiple facilities"
facility = word
elif word in LEVELS:
if level:
return "cannot specify multiple levels"
level = word
elif word in Vlog.__mfl:
if module:
return "cannot specify multiple modules"
module = word
else:
return "no facility, level, or module \"%s\"" % word
Vlog.set_level(module or "any", facility or "any", level or "any")
@staticmethod
def get_levels():
lines = [" console syslog file\n",
" ------- ------ ------\n"]
lines.extend(sorted(["%-16s %4s %4s %4s\n"
% (m,
Vlog.__mfl[m]["console"],
Vlog.__mfl[m]["syslog"],
Vlog.__mfl[m]["file"]) for m in Vlog.__mfl]))
return ''.join(lines)
@staticmethod
def reopen_log_file():
"""Closes and then attempts to re-open the current log file. (This is
useful just after log rotation, to ensure that the new log file starts
being used.)"""
if Vlog.__log_file:
logger = logging.getLogger("file")
logger.removeHandler(Vlog.__file_handler)
Vlog.__file_handler = logging.FileHandler(Vlog.__log_file)
logger.addHandler(Vlog.__file_handler)
@staticmethod
def _unixctl_vlog_reopen(conn, unused_argv, unused_aux):
if Vlog.__log_file:
Vlog.reopen_log_file()
conn.reply(None)
else:
conn.reply("Logging to file not configured")
@staticmethod
def _unixctl_vlog_set(conn, argv, unused_aux):
for arg in argv:
msg = Vlog.set_levels_from_string(arg)
if msg:
conn.reply(msg)
return
conn.reply(None)
@staticmethod
def _unixctl_vlog_list(conn, unused_argv, unused_aux):
conn.reply(Vlog.get_levels())
def add_args(parser):
"""Adds vlog related options to 'parser', an ArgumentParser object. The
resulting arguments parsed by 'parser' should be passed to handle_args."""
group = parser.add_argument_group(title="Logging Options")
group.add_argument("--log-file", nargs="?", const="default",
help="Enables logging to a file. Default log file"
" is used if LOG_FILE is omitted.")
group.add_argument("-v", "--verbose", nargs="*",
help="Sets logging levels, see ovs-vswitchd(8)."
" Defaults to dbg.")
def handle_args(args):
""" Handles command line arguments ('args') parsed by an ArgumentParser.
The ArgumentParser should have been primed by add_args(). Also takes care
of initializing the Vlog module."""
log_file = args.log_file
if log_file == "default":
log_file = "%s/%s.log" % (ovs.dirs.LOGDIR, ovs.util.PROGRAM_NAME)
if args.verbose is None:
args.verbose = []
elif args.verbose == []:
args.verbose = ["any:any:dbg"]
for verbose in args.verbose:
msg = Vlog.set_levels_from_string(verbose)
if msg:
ovs.util.ovs_fatal(0, "processing \"%s\": %s" % (verbose, msg))
Vlog.init(log_file)

View File

@ -30,7 +30,7 @@ from ryu.lib.hub import StreamServer
import traceback
import random
import ssl
from socket import IPPROTO_TCP, TCP_NODELAY, timeout as SocketTimeout, error as SocketError
from socket import IPPROTO_TCP, TCP_NODELAY, SHUT_RDWR, timeout as SocketTimeout
import warnings
import ryu.base.app_manager
@ -41,8 +41,8 @@ from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import nx_match
from ryu.controller import handler
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER, DEAD_DISPATCHER
from ryu.lib.dpid import dpid_to_str
@ -51,31 +51,56 @@ LOG = logging.getLogger('ryu.controller.controller')
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt('ofp-listen-host', default='', help='openflow listen host'),
cfg.IntOpt('ofp-tcp-listen-port', default=ofproto_common.OFP_TCP_PORT,
help='openflow tcp listen port'),
cfg.IntOpt('ofp-ssl-listen-port', default=ofproto_common.OFP_SSL_PORT,
help='openflow ssl listen port'),
cfg.IntOpt('ofp-tcp-listen-port', default=None,
help='openflow tcp listen port '
'(default: %d)' % ofproto_common.OFP_TCP_PORT),
cfg.IntOpt('ofp-ssl-listen-port', default=None,
help='openflow ssl listen port '
'(default: %d)' % ofproto_common.OFP_SSL_PORT),
cfg.StrOpt('ctl-privkey', default=None, help='controller private key'),
cfg.StrOpt('ctl-cert', default=None, help='controller certificate'),
cfg.StrOpt('ca-certs', default=None, help='CA certificates'),
cfg.FloatOpt('socket-timeout', default=5.0, help='Time, in seconds, to await completion of socket operations.')
cfg.StrOpt('ca-certs', default=None, help='CA certificates')
])
CONF.register_opts([
cfg.FloatOpt('socket-timeout',
default=5.0,
help='Time, in seconds, to await completion of socket operations.'),
cfg.FloatOpt('echo-request-interval',
default=15.0,
help='Time, in seconds, between sending echo requests to a datapath.'),
cfg.IntOpt('maximum-unreplied-echo-requests',
default=0,
min=0,
help='Maximum number of unreplied echo requests before datapath is disconnected.')
])
class OpenFlowController(object):
def __init__(self):
super(OpenFlowController, self).__init__()
if not CONF.ofp_tcp_listen_port and not CONF.ofp_ssl_listen_port:
self.ofp_tcp_listen_port = ofproto_common.OFP_TCP_PORT
self.ofp_ssl_listen_port = ofproto_common.OFP_SSL_PORT
# For the backward compatibility, we spawn a server loop
# listening on the old OpenFlow listen port 6633.
hub.spawn(self.server_loop,
ofproto_common.OFP_TCP_PORT_OLD,
ofproto_common.OFP_SSL_PORT_OLD)
else:
self.ofp_tcp_listen_port = CONF.ofp_tcp_listen_port
self.ofp_ssl_listen_port = CONF.ofp_ssl_listen_port
# entry point
def __call__(self):
# LOG.debug('call')
self.server_loop()
self.server_loop(self.ofp_tcp_listen_port,
self.ofp_ssl_listen_port)
def server_loop(self):
def server_loop(self, ofp_tcp_listen_port, ofp_ssl_listen_port):
if CONF.ctl_privkey is not None and CONF.ctl_cert is not None:
if CONF.ca_certs is not None:
server = StreamServer((CONF.ofp_listen_host,
CONF.ofp_ssl_listen_port),
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert,
@ -84,14 +109,14 @@ class OpenFlowController(object):
ssl_version=ssl.PROTOCOL_TLSv1)
else:
server = StreamServer((CONF.ofp_listen_host,
CONF.ofp_ssl_listen_port),
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
server = StreamServer((CONF.ofp_listen_host,
CONF.ofp_tcp_listen_port),
ofp_tcp_listen_port),
datapath_connection_factory)
# LOG.debug('loop')
@ -103,12 +128,67 @@ def _deactivate(method):
try:
method(self)
finally:
self.send_active = False
self.set_state(handler.DEAD_DISPATCHER)
try:
self.socket.shutdown(SHUT_RDWR)
except (EOFError, IOError):
pass
if not self.is_active:
self.socket.close()
return deactivate
class Datapath(ofproto_protocol.ProtocolDesc):
"""
A class to describe an OpenFlow switch connected to this controller.
An instance has the following attributes.
.. tabularcolumns:: |l|L|
==================================== ======================================
Attribute Description
==================================== ======================================
id 64-bit OpenFlow Datapath ID.
Only available for
ryu.controller.handler.MAIN_DISPATCHER
phase.
ofproto A module which exports OpenFlow
definitions, mainly constants appeared
in the specification, for the
negotiated OpenFlow version. For
example, ryu.ofproto.ofproto_v1_0 for
OpenFlow 1.0.
ofproto_parser A module which exports OpenFlow wire
message encoder and decoder for the
negotiated OpenFlow version.
For example,
ryu.ofproto.ofproto_v1_0_parser
for OpenFlow 1.0.
ofproto_parser.OFPxxxx(datapath,...) A callable to prepare an OpenFlow
message for the given switch. It can
be sent with Datapath.send_msg later.
xxxx is a name of the message. For
example OFPFlowMod for flow-mod
message. Arguemnts depend on the
message.
set_xid(self, msg) Generate an OpenFlow XID and put it
in msg.xid.
send_msg(self, msg) Queue an OpenFlow message to send to
the corresponding switch. If msg.xid
is None, set_xid is automatically
called on the message before queueing.
send_packet_out deprecated
send_flow_mod deprecated
send_flow_del deprecated
send_delete_all_flows deprecated
send_barrier Queue an OpenFlow barrier message to
send to the switch.
send_nxt_set_flow_format deprecated
is_reserved_port deprecated
==================================== ======================================
"""
def __init__(self, socket, address):
super(Datapath, self).__init__()
@ -116,43 +196,28 @@ class Datapath(ofproto_protocol.ProtocolDesc):
self.socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
self.socket.settimeout(CONF.socket_timeout)
self.address = address
self.send_active = True
self.close_requested = False
self.is_active = True
# The limit is arbitrary. We need to limit queue size to
# prevent it from eating memory up
# prevent it from eating memory up.
self.send_q = hub.Queue(16)
self._send_q_sem = hub.BoundedSemaphore(self.send_q.maxsize)
self.echo_request_interval = CONF.echo_request_interval
self.max_unreplied_echo_requests = CONF.maximum_unreplied_echo_requests
self.unreplied_echo_requests = []
self.xid = random.randint(0, self.ofproto.MAX_XID)
self.id = None # datapath_id is unknown yet
self._ports = None
self.flow_format = ofproto_v1_0.NXFF_OPENFLOW10
self.ofp_brick = ryu.base.app_manager.lookup_service_brick('ofp_event')
self.set_state(handler.HANDSHAKE_DISPATCHER)
def _get_ports(self):
if (self.ofproto_parser is not None and
self.ofproto_parser.ofproto.OFP_VERSION >= 0x04):
message = (
'Datapath#ports is kept for compatibility with the previous '
'openflow versions (< 1.3). '
'This not be updated by EventOFPPortStatus message. '
'If you want to be updated, you can use '
'\'ryu.controller.dpset\' or \'ryu.topology.switches\'.'
)
warnings.warn(message, stacklevel=2)
return self._ports
def _set_ports(self, ports):
self._ports = ports
# To show warning when Datapath#ports is read
ports = property(_get_ports, _set_ports)
self.set_state(HANDSHAKE_DISPATCHER)
@_deactivate
def close(self):
self.close_requested = True
if self.state != DEAD_DISPATCHER:
self.set_state(DEAD_DISPATCHER)
def set_state(self, state):
self.state = state
@ -167,19 +232,19 @@ class Datapath(ofproto_protocol.ProtocolDesc):
required_len = ofproto_common.OFP_HEADER_SIZE
count = 0
while True:
ret = ""
while self.state != DEAD_DISPATCHER:
try:
ret = self.socket.recv(required_len)
except SocketTimeout:
if not self.close_requested:
continue
except SocketError:
self.close_requested = True
continue
except ssl.SSLError:
# eventlet throws SSLError (which is a subclass of IOError)
# on SSL socket read timeout; re-try the loop in this case.
continue
except (EOFError, IOError):
break
if (len(ret) == 0) or (self.close_requested):
self.socket.close()
if len(ret) == 0:
break
buf += ret
@ -215,30 +280,45 @@ class Datapath(ofproto_protocol.ProtocolDesc):
count = 0
hub.sleep(0)
@_deactivate
def _send_loop(self):
try:
while self.send_active:
while self.state != DEAD_DISPATCHER:
buf = self.send_q.get()
self._send_q_sem.release()
self.socket.sendall(buf)
except SocketTimeout:
LOG.debug("Socket timed out while sending data to switch at address %s",
self.address)
except IOError as ioe:
LOG.debug("Socket error while sending data to switch at address %s: [%d] %s",
self.address, ioe.errno, ioe.strerror)
# Convert ioe.errno to a string, just in case it was somehow set to None.
errno = "%s" % ioe.errno
LOG.debug("Socket error while sending data to switch at address %s: [%s] %s",
self.address, errno, ioe.strerror)
finally:
q = self.send_q
# first, clear self.send_q to prevent new references.
# First, clear self.send_q to prevent new references.
self.send_q = None
# there might be threads currently blocking in send_q.put().
# unblock them by draining the queue.
# Now, drain the send_q, releasing the associated semaphore for each entry.
# This should release all threads waiting to acquire the semaphore.
try:
while q.get(block=False):
pass
self._send_q_sem.release()
except hub.QueueEmpty:
pass
# Finally, ensure the _recv_loop terminates.
self.close()
def send(self, buf):
msg_enqueued = False
self._send_q_sem.acquire()
if self.send_q:
self.send_q.put(buf)
msg_enqueued = True
else:
self._send_q_sem.release()
if not msg_enqueued:
LOG.debug('Datapath in process of terminating; send() to %s discarded.',
self.address)
def set_xid(self, msg):
self.xid += 1
@ -254,6 +334,23 @@ class Datapath(ofproto_protocol.ProtocolDesc):
# LOG.debug('send_msg %s', msg)
self.send(msg.buf)
def _echo_request_loop(self):
if not self.max_unreplied_echo_requests:
return
while (self.send_q and
(len(self.unreplied_echo_requests) <= self.max_unreplied_echo_requests)):
echo_req = self.ofproto_parser.OFPEchoRequest(self)
self.unreplied_echo_requests.append(self.set_xid(echo_req))
self.send_msg(echo_req)
hub.sleep(self.echo_request_interval)
self.close()
def acknowledge_echo_reply(self, xid):
try:
self.unreplied_echo_requests.remove(xid)
except:
pass
def serve(self):
send_thr = hub.spawn(self._send_loop)
@ -261,11 +358,15 @@ class Datapath(ofproto_protocol.ProtocolDesc):
hello = self.ofproto_parser.OFPHello(self)
self.send_msg(hello)
echo_thr = hub.spawn(self._echo_request_loop)
try:
self._recv_loop()
finally:
hub.kill(send_thr)
hub.joinall([send_thr])
hub.kill(echo_thr)
hub.joinall([send_thr, echo_thr])
self.is_active = False
#
# Utility methods for convenience

View File

@ -44,6 +44,23 @@ class EventDPBase(event.EventBase):
class EventDP(EventDPBase):
"""
An event class to notify connect/disconnect of a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPStateChange.
An instance has at least the following attributes.
========= =================================================================
Attribute Description
========= =================================================================
dp A ryu.controller.controller.Datapath instance of the switch
enter True when the switch connected to our controller. False for
disconnect.
ports A list of port instances.
========= =================================================================
"""
def __init__(self, dp, enter_leave):
# enter_leave
# True: dp entered
@ -67,16 +84,64 @@ class EventPortBase(EventDPBase):
class EventPortAdd(EventPortBase):
"""
An event class for switch port status "ADD" notification.
This event is generated when a new port is added to a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= =================================================================
Attribute Description
========= =================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= =================================================================
"""
def __init__(self, dp, port):
super(EventPortAdd, self).__init__(dp, port)
class EventPortDelete(EventPortBase):
"""
An event class for switch port status "DELETE" notification.
This event is generated when a port is removed from a switch.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= =================================================================
Attribute Description
========= =================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= =================================================================
"""
def __init__(self, dp, port):
super(EventPortDelete, self).__init__(dp, port)
class EventPortModify(EventPortBase):
"""
An event class for switch port status "MODIFY" notification.
This event is generated when some attribute of a port is changed.
For OpenFlow switches, one can get the same notification by observing
ryu.controller.ofp_event.EventOFPPortStatus.
An instance has at least the following attributes.
========= ====================================================================
Attribute Description
========= ====================================================================
dp A ryu.controller.controller.Datapath instance of the switch
port port number
========= ====================================================================
"""
def __init__(self, dp, new_port):
super(EventPortModify, self).__init__(dp, new_port)

View File

@ -16,11 +16,20 @@
class EventBase(object):
# Nothing yet
pass
"""
The base of all event classes.
A Ryu application can define its own event type by creating a subclass.
"""
def __init__(self):
super(EventBase, self).__init__()
class EventRequestBase(EventBase):
"""
The base class for synchronous request for RyuApp.send_request.
"""
def __init__(self):
super(EventRequestBase, self).__init__()
self.dst = None # app.name of provide the event.
@ -30,6 +39,9 @@ class EventRequestBase(EventBase):
class EventReplyBase(EventBase):
"""
The base class for synchronous request reply for RyuApp.send_reply.
"""
def __init__(self, dst):
super(EventReplyBase, self).__init__()
self.dst = dst

View File

@ -47,6 +47,33 @@ class _Caller(object):
# should be named something like 'observe_event'
def set_ev_cls(ev_cls, dispatchers=None):
"""
A decorator for Ryu application to declare an event handler.
Decorated method will become an event handler.
ev_cls is an event class whose instances this RyuApp wants to receive.
dispatchers argument specifies one of the following negotiation phases
(or a list of them) for which events should be generated for this handler.
Note that, in case an event changes the phase, the phase before the change
is used to check the interest.
.. tabularcolumns:: |l|L|
=========================================== ===============================
Negotiation phase Description
=========================================== ===============================
ryu.controller.handler.HANDSHAKE_DISPATCHER Sending and waiting for hello
message
ryu.controller.handler.CONFIG_DISPATCHER Version negotiated and sent
features-request message
ryu.controller.handler.MAIN_DISPATCHER Switch-features message
received and sent set-config
message
ryu.controller.handler.DEAD_DISPATCHER Disconnect from the peer. Or
disconnecting due to some
unrecoverable errors.
=========================================== ===============================
"""
def _set_ev_cls_dec(handler):
if 'callers' not in dir(handler):
handler.callers = {}

View File

@ -49,8 +49,8 @@ class MacToNetwork(object):
# VM-> tap-> ovs-> ext-port-> wire-> ext-port-> ovs-> tap-> VM
return
LOG.warn('duplicated nw_id: mac %s nw old %s new %s',
haddr_to_str(mac), _nw_id, nw_id)
LOG.warning('duplicated nw_id: mac %s nw old %s new %s',
haddr_to_str(mac), _nw_id, nw_id)
raise MacAddressDuplicated(mac=mac)

View File

@ -18,23 +18,55 @@ import collections
from ryu.base import app_manager
import ryu.exception as ryu_exc
from ryu.app.rest_nw_id import NW_ID_UNKNOWN
from ryu.controller import event
from ryu.exception import NetworkNotFound, NetworkAlreadyExist
from ryu.exception import PortAlreadyExist, PortNotFound, PortUnknown
NW_ID_UNKNOWN = '__NW_ID_UNKNOWN__'
class MacAddressAlreadyExist(ryu_exc.RyuException):
message = 'port (%(dpid)s, %(port)s) has already mac %(mac_address)s'
class EventNetworkDel(event.EventBase):
"""
An event class for network deletion.
This event is generated when a network is deleted by the REST API.
An instance has at least the following attributes.
========== ===================================================================
Attribute Description
========== ===================================================================
network_id Network ID
========== ===================================================================
"""
def __init__(self, network_id):
super(EventNetworkDel, self).__init__()
self.network_id = network_id
class EventNetworkPort(event.EventBase):
"""
An event class for notification of port arrival and deperture.
This event is generated when a port is introduced to or removed from a
network by the REST API.
An instance has at least the following attributes.
========== ================================================================
Attribute Description
========== ================================================================
network_id Network ID
dpid OpenFlow Datapath ID of the switch to which the port belongs.
port_no OpenFlow port number of the port
add_del True for adding a port. False for removing a port.
========== ================================================================
"""
def __init__(self, network_id, dpid, port_no, add_del):
super(EventNetworkPort, self).__init__()
self.network_id = network_id
@ -44,6 +76,26 @@ class EventNetworkPort(event.EventBase):
class EventMacAddress(event.EventBase):
"""
An event class for end-point MAC address registration.
This event is generated when a end-point MAC address is updated
by the REST API.
An instance has at least the following attributes.
=========== ===============================================================
Attribute Description
=========== ===============================================================
network_id Network ID
dpid OpenFlow Datapath ID of the switch to which the port belongs.
port_no OpenFlow port number of the port
mac_address The old MAC address of the port if add_del is False. Otherwise
the new MAC address.
add_del False if this event is a result of a port removal. Otherwise
True.
=========== ===============================================================
"""
def __init__(self, dpid, port_no, network_id, mac_address, add_del):
super(EventMacAddress, self).__init__()
assert network_id is not None

View File

@ -27,6 +27,25 @@ from . import event
class EventOFPMsgBase(event.EventBase):
"""
The base class of OpenFlow event class.
OpenFlow event classes have at least the following attributes.
.. tabularcolumns:: |l|L|
============ ==============================================================
Attribute Description
============ ==============================================================
msg An object which describes the corresponding OpenFlow message.
msg.datapath A ryu.controller.controller.Datapath instance
which describes an OpenFlow switch from which we received
this OpenFlow message.
============ ==============================================================
The msg object has some more additional members whose values are extracted
from the original OpenFlow message.
"""
def __init__(self, msg):
super(EventOFPMsgBase, self).__init__()
self.msg = msg
@ -81,9 +100,45 @@ for ofp_mods in ofproto.get_ofp_modules().values():
class EventOFPStateChange(event.EventBase):
"""
An event class for negotiation phase change notification.
An instance of this class is sent to observer after changing
the negotiation phase.
An instance has at least the following attributes.
========= =================================================================
Attribute Description
========= =================================================================
datapath ryu.controller.controller.Datapath instance of the switch
========= =================================================================
"""
def __init__(self, dp):
super(EventOFPStateChange, self).__init__()
self.datapath = dp
class EventOFPPortStateChange(event.EventBase):
"""
An event class to notify the port state changes of Dtatapath instance.
This event performs like EventOFPPortStatus, but Ryu will
send this event after updating ``ports`` dict of Datapath instances.
An instance has at least the following attributes.
========= =================================================================
Attribute Description
========= =================================================================
datapath ryu.controller.controller.Datapath instance of the switch
reason one of OFPPR_*
port_no Port number which state was changed
========= =================================================================
"""
def __init__(self, dp, reason, port_no):
super(EventOFPPortStateChange, self).__init__()
self.datapath = dp
self.reason = reason
self.port_no = port_no
handler.register_service('ryu.controller.ofp_handler')

View File

@ -238,27 +238,59 @@ class OFPHandler(ryu.base.app_manager.RyuApp):
echo_reply.data = msg.data
datapath.send_msg(echo_reply)
@set_ev_handler(ofp_event.EventOFPEchoReply,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_reply_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
datapath.acknowledge_echo_reply(msg.xid)
@set_ev_handler(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
if msg.reason in [ofproto.OFPPR_ADD, ofproto.OFPPR_MODIFY]:
datapath.ports[msg.desc.port_no] = msg.desc
elif msg.reason == ofproto.OFPPR_DELETE:
datapath.ports.pop(msg.desc.port_no, None)
else:
return
self.send_event_to_observers(
ofp_event.EventOFPPortStateChange(
datapath, msg.reason, msg.desc.port_no),
datapath.state)
@set_ev_handler(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
ofp = msg.datapath.ofproto
(version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data)
self.logger.debug('EventOFPErrorMsg received.')
self.logger.debug(
'version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(msg.version),
hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid))
self.logger.debug(
' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg.msg_type))
self.logger.debug(
"OFPErrorMsg(type=%s, code=%s, data=b'%s')", hex(msg.type),
hex(msg.code), utils.binary_str(msg.data))
self.logger.debug(
' |-- type: %s', ofp.ofp_error_type_to_str(msg.type))
self.logger.debug(
' |-- code: %s', ofp.ofp_error_code_to_str(msg.type, msg.code))
self.logger.debug(
' `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s',
hex(version), hex(msg_type), hex(msg_len), hex(xid))
self.logger.debug(
' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg_type))
"EventOFPErrorMsg received.\n"
"version=%s, msg_type=%s, msg_len=%s, xid=%s\n"
" `-- msg_type: %s\n"
"OFPErrorMsg(type=%s, code=%s, data=b'%s')\n"
" |-- type: %s\n"
" |-- code: %s",
hex(msg.version), hex(msg.msg_type), hex(msg.msg_len),
hex(msg.xid), ofp.ofp_msg_type_to_str(msg.msg_type),
hex(msg.type), hex(msg.code), utils.binary_str(msg.data),
ofp.ofp_error_type_to_str(msg.type),
ofp.ofp_error_code_to_str(msg.type, msg.code))
if len(msg.data) >= ofp.OFP_HEADER_SIZE:
(version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data)
self.logger.debug(
" `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s\n"
" `-- msg_type: %s",
hex(version), hex(msg_type), hex(msg_len), hex(xid),
ofp.ofp_msg_type_to_str(msg_type))
else:
self.logger.warning(
"The data field sent from the switch is too short: "
"len(msg.data) < OFP_HEADER_SIZE\n"
"The OpenFlow Spec says that the data field should contain "
"at least 64 bytes of the failed request.\n"
"Please check the settings or implementation of your switch.")

View File

@ -43,16 +43,61 @@ class EventTunnelKeyBase(event.EventBase):
class EventTunnelKeyAdd(EventTunnelKeyBase):
"""
An event class for tunnel key registration.
This event is generated when a tunnel key is registered or updated
by the REST API.
An instance has at least the following attributes.
=========== ===============================================================
Attribute Description
=========== ===============================================================
network_id Network ID
tunnel_key Tunnel Key
=========== ===============================================================
"""
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key)
class EventTunnelKeyDel(EventTunnelKeyBase):
"""
An event class for tunnel key registration.
This event is generated when a tunnel key is removed by the REST API.
An instance has at least the following attributes.
=========== ===============================================================
Attribute Description
=========== ===============================================================
network_id Network ID
tunnel_key Tunnel Key
=========== ===============================================================
"""
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key)
class EventTunnelPort(event.EventBase):
"""
An event class for tunnel port registration.
This event is generated when a tunnel port is added or removed
by the REST API.
An instance has at least the following attributes.
=========== ===============================================================
Attribute Description
=========== ===============================================================
dpid OpenFlow Datapath ID
port_no OpenFlow port number
remote_dpid OpenFlow port number of the tunnel peer
add_del True for adding a tunnel. False for removal.
=========== ===============================================================
"""
def __init__(self, dpid, port_no, remote_dpid, add_del):
super(EventTunnelPort, self).__init__()
self.dpid = dpid

View File

@ -41,8 +41,6 @@ def setup_hook(config):
metadata = config['metadata']
if sys.platform == 'win32':
requires = metadata.get('requires_dist', '').split('\n')
requires.append('pywin32')
requires.append('wmi')
metadata['requires_dist'] = "\n".join(requires)
config['metadata'] = metadata

View File

@ -18,8 +18,8 @@ import logging
import os
# we don't bother to use cfg.py because monkey patch needs to be
# called very early. instead, we use an environment variable to
# We don't bother to use cfg.py because monkey patch needs to be
# called very early. Instead, we use an environment variable to
# select the type of hub.
HUB_TYPE = os.getenv('RYU_HUB_TYPE', 'eventlet')
@ -45,34 +45,42 @@ if HUB_TYPE == 'eventlet':
connect = eventlet.connect
def spawn(*args, **kwargs):
raise_error = kwargs.pop('raise_error', False)
def _launch(func, *args, **kwargs):
# mimic gevent's default raise_error=False behaviour
# by not propergating an exception to the joiner.
# Mimic gevent's default raise_error=False behaviour
# by not propagating an exception to the joiner.
try:
func(*args, **kwargs)
except greenlet.GreenletExit:
return func(*args, **kwargs)
except TaskExit:
pass
except:
# log uncaught exception.
# note: this is an intentional divergence from gevent
# behaviour. gevent silently ignores such exceptions.
if raise_error:
raise
# Log uncaught exception.
# Note: this is an intentional divergence from gevent
# behaviour; gevent silently ignores such exceptions.
LOG.error('hub: uncaught exception: %s',
traceback.format_exc())
return eventlet.spawn(_launch, *args, **kwargs)
def spawn_after(seconds, *args, **kwargs):
raise_error = kwargs.pop('raise_error', False)
def _launch(func, *args, **kwargs):
# mimic gevent's default raise_error=False behaviour
# by not propergating an exception to the joiner.
# Mimic gevent's default raise_error=False behaviour
# by not propagating an exception to the joiner.
try:
func(*args, **kwargs)
except greenlet.GreenletExit:
return func(*args, **kwargs)
except TaskExit:
pass
except:
# log uncaught exception.
# note: this is an intentional divergence from gevent
# behaviour. gevent silently ignores such exceptions.
if raise_error:
raise
# Log uncaught exception.
# Note: this is an intentional divergence from gevent
# behaviour; gevent silently ignores such exceptions.
LOG.error('hub: uncaught exception: %s',
traceback.format_exc())
@ -83,17 +91,18 @@ if HUB_TYPE == 'eventlet':
def joinall(threads):
for t in threads:
# this try-except is necessary when killing an inactive
# greenthread
# This try-except is necessary when killing an inactive
# greenthread.
try:
t.wait()
except greenlet.GreenletExit:
except TaskExit:
pass
Queue = eventlet.queue.Queue
Queue = eventlet.queue.LightQueue
QueueEmpty = eventlet.queue.Empty
Semaphore = eventlet.semaphore.Semaphore
BoundedSemaphore = eventlet.semaphore.BoundedSemaphore
TaskExit = greenlet.GreenletExit
class StreamServer(object):
def __init__(self, listen_info, handle=None, backlog=None,
@ -144,9 +153,9 @@ if HUB_TYPE == 'eventlet':
def _broadcast(self):
self._ev.send()
# because eventlet Event doesn't allow mutiple send() on an event,
# re-create the underlying event.
# note: _ev.reset() is obsolete.
# Since eventlet Event doesn't allow multiple send() operations
# on an event, re-create the underlying event.
# Note: _ev.reset() is obsolete.
self._ev = eventlet.event.Event()
def is_set(self):

156
ryu/lib/ofctl_nicira_ext.py Normal file
View File

@ -0,0 +1,156 @@
# Copyright (C) 2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from ryu.ofproto import nicira_ext
LOG = logging.getLogger(__name__)
def action_to_str(act, ofctl_action_to_str):
sub_type = act.subtype
if sub_type == nicira_ext.NXAST_RESUBMIT:
return 'NX_RESUBMIT: {port: %s, table: %s}' % (act.in_port,
act.table_id)
elif sub_type == nicira_ext.NXAST_REG_MOVE:
src_start = act.src_ofs
dst_start = act.dst_ofs
src_end = src_start + act.n_bits
dst_end = dst_start + act.n_bits
return 'NX_MOVE: {%s[%s..%s]: %s[%s..%s]}' % (act.dst_field, dst_start,
dst_end, act.src_field,
src_start, src_end)
elif sub_type == nicira_ext.NXAST_REG_LOAD:
start = act.ofs
end = start + act.nbits
return 'NX_LOAD: {%s[%s..%s]: %x}' % (act.dst, start, end, act.value)
elif sub_type == nicira_ext.NXAST_LEARN:
specs = []
add_spec = specs.append
for spec in act.specs:
dst_type = spec._dst_type
if dst_type == 0: # match
if isinstance(spec.src, (tuple, list)):
src = spec.src[0]
start = spec.src[1]
end = start + spec.n_bits
start_end = '%s..%s' % (start, end)
else:
src = spec.src
start_end = '[]'
add_spec('%s[%s]' % (src, start_end))
elif dst_type == 1: # load
if isinstance(spec.src, (tuple, list)):
src = spec.src[0]
start = spec.src[1]
end = start + spec.n_bits
src_start_end = '[%s..%s]' % (start, end)
else:
src = spec.src
start_end = ''
if isinstance(spec.dst, (tuple, list)):
dst = spec.dst[0]
start = spec.dst[1]
end = start + spec.n_bits
dst_start_end = '[%s..%s]' % (start, end)
else:
dst = spec.dst
start_end = '[]'
add_spec('NX_LOAD {%s%s: %s%s}' % (dst, dst_start_end,
src, src_start_end))
elif dst_type == 2: # output
if isinstance(spec.src, (tuple, list)):
src = spec.src[0]
start = spec.src[1]
end = start + spec.n_bits
start_end = '%s..%s' % (start, end)
else:
src = spec.src
start_end = '[]'
add_spec('output:%s%s' % (src, start_end))
return ('NX_LEARN: {idle_timeout: %s, '
'hard_timeouts: %s, '
'priority: %s, '
'cookie: %s, '
'flags: %s, '
'table_id: %s, '
'fin_idle_timeout: %s, '
'fin_hard_timeout: %s, '
'specs: %s}' % (act.idle_timeout, act.hard_timeout,
act.priority, act.cookie, act.flags,
act.fin_idle_timeout,
act.self.fin_hard_timeout,
specs))
elif sub_type == nicira_ext.NXAST_CONJUNCTION:
return ('NX_CONJUNCTION: {clause: %s, number_of_clauses: %s, id: %s}' %
(act.clause, act.n_clauses, act.id))
elif sub_type == nicira_ext.NXAST_CT:
if act.zone_ofs_nbits != 0:
start = act.zone_ofs_nbits
end = start + 16
zone = act.zone_src + ('[%s..%s]' % (start, end))
else:
zone = act.zone_src
actions = [ofctl_action_to_str(action) for action in act.actions]
return ('NX_CT: {flags: %s, '
'zone: %s, '
'table: %s, '
'alg: %s, '
'actions: %s}' % (act.flags, zone, act.recirc_table, act.alg,
actions))
elif sub_type == nicira_ext.NXAST_NAT:
return ('NX_NAT: {flags: %s, '
'range_ipv4_min: %s, '
'range_ipv4_max: %s, '
'range_ipv6_min: %s, '
'range_ipv6_max: %s, '
'range_proto_min: %s, '
'range_proto_max: %s}' % (act.flags,
act.range_ipv4_min,
act.range_ipv4_max,
act.range_ipv6_min,
act.range_ipv6_max,
act.range_proto_min,
act.range_proto_max))
data_str = base64.b64encode(act.data)
return 'NX_UNKNOWN: {subtype: %s, data: %s}' % (sub_type,
data_str.decode('utf-8'))

View File

@ -13,10 +13,250 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import netaddr
import six
from ryu.lib import dpid
from ryu.lib import hub
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
# NOTE(jkoelker) Constants for converting actions
OUTPUT = 'OUTPUT'
COPY_TTL_OUT = 'COPY_TTL_OUT'
COPY_TTL_IN = 'COPY_TTL_IN'
SET_MPLS_TTL = 'SET_MPLS_TTL'
DEC_MPLS_TTL = 'DEC_MPLS_TTL'
PUSH_VLAN = 'PUSH_VLAN'
POP_VLAN = 'POP_VLAN'
PUSH_MPLS = 'PUSH_MPLS'
POP_MPLS = 'POP_MPLS'
SET_QUEUE = 'SET_QUEUE'
GROUP = 'GROUP'
SET_NW_TTL = 'SET_NW_TTL'
DEC_NW_TTL = 'DEC_NW_TTL'
SET_FIELD = 'SET_FIELD'
PUSH_PBB = 'PUSH_PBB' # OpenFlow 1.3 or later
POP_PBB = 'POP_PBB' # OpenFlow 1.3 or later
COPY_FIELD = 'COPY_FIELD' # OpenFlow 1.5 or later
METER = 'METER' # OpenFlow 1.5 or later
EXPERIMENTER = 'EXPERIMENTER'
def get_logger(logger=None):
# NOTE(jkoelker) use the logger the calling code wants us to
if logger is not None:
return logger
return LOG
def match_vid_to_str(value, mask, ofpvid_present):
if mask is not None:
return '0x%04x/0x%04x' % (value, mask)
if value & ofpvid_present:
return str(value & ~ofpvid_present)
return '0x%04x' % value
def to_action(dic, ofp, parser, action_type, util):
actions = {COPY_TTL_OUT: parser.OFPActionCopyTtlOut,
COPY_TTL_IN: parser.OFPActionCopyTtlIn,
DEC_MPLS_TTL: parser.OFPActionDecMplsTtl,
POP_VLAN: parser.OFPActionPopVlan,
DEC_NW_TTL: parser.OFPActionDecNwTtl,
POP_PBB: parser.OFPActionPopPbb}
need_ethertype = {PUSH_VLAN: parser.OFPActionPushVlan,
PUSH_MPLS: parser.OFPActionPushMpls,
POP_MPLS: parser.OFPActionPopMpls,
PUSH_PBB: parser.OFPActionPushPbb}
if action_type in actions:
return actions[action_type]()
elif action_type in need_ethertype:
ethertype = int(dic.get('ethertype'))
return need_ethertype[action_type](ethertype)
elif action_type == OUTPUT:
out_port = util.ofp_port_from_user(dic.get('port', ofp.OFPP_ANY))
max_len = util.ofp_cml_from_user(dic.get('max_len', ofp.OFPCML_MAX))
return parser.OFPActionOutput(out_port, max_len)
elif action_type == SET_MPLS_TTL:
mpls_ttl = int(dic.get('mpls_ttl'))
return parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == SET_QUEUE:
queue_id = util.ofp_queue_from_user(dic.get('queue_id'))
return parser.OFPActionSetQueue(queue_id)
elif action_type == GROUP:
group_id = util.ofp_group_from_user(dic.get('group_id'))
return parser.OFPActionGroup(group_id)
elif action_type == SET_NW_TTL:
nw_ttl = int(dic.get('nw_ttl'))
return parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == SET_FIELD:
field = dic.get('field')
value = dic.get('value')
return parser.OFPActionSetField(**{field: value})
elif action_type == 'COPY_FIELD':
n_bits = int(dic.get('n_bits'))
src_offset = int(dic.get('src_offset'))
dst_offset = int(dic.get('dst_offset'))
oxm_ids = [parser.OFPOxmId(str(dic.get('src_oxm_id'))),
parser.OFPOxmId(str(dic.get('dst_oxm_id')))]
return parser.OFPActionCopyField(
n_bits, src_offset, dst_offset, oxm_ids)
elif action_type == 'METER':
if hasattr(parser, 'OFPActionMeter'):
# OpenFlow 1.5 or later
meter_id = int(dic.get('meter_id'))
return parser.OFPActionMeter(meter_id)
else:
# OpenFlow 1.4 or earlier
return None
elif action_type == EXPERIMENTER:
experimenter = int(dic.get('experimenter'))
data_type = dic.get('data_type', 'ascii')
if data_type not in ('ascii', 'base64'):
LOG.error('Unknown data type: %s', data_type)
return None
data = dic.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
return parser.OFPActionExperimenterUnknown(experimenter, data)
return None
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
return value
def to_match_ip(value):
if '/' in value:
(ip_addr, ip_mask) = value.split('/')
if ip_mask.isdigit():
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
return value
def to_match_vid(value, ofpvid_present):
# NOTE: If "vlan_id" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied
if isinstance(value, six.integer_types):
# described as decimal int value
return value | ofpvid_present
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofpvid_present
return int(value, 0)
def to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return (str_to_int(value[0]), str_to_int(value[1]))
return str_to_int(value)
def to_match_packet_type(value):
if isinstance(value, (list, tuple)):
return str_to_int(value[0]) << 16 | str_to_int(value[1])
else:
return str_to_int(value)
def send_experimenter(dp, exp, logger=None):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
elif data_type == 'ascii':
data = data.encode('ascii')
else:
get_logger(logger).error('Unknown data type: %s', data_type)
return
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
send_msg(dp, expmsg, logger)
def send_msg(dp, msg, logger=None):
if msg.xid is None:
dp.set_xid(msg)
log = get_logger(logger)
# NOTE(jkoelker) Prevent unnecessary string formating by including the
# format rules in the log_msg
log_msg = ('Sending message with xid(%x) to '
'datapath(' + dpid._DPID_FMT + '): %s')
log.debug(log_msg, msg.xid, dp.id, msg)
dp.send_msg(msg)
def send_stats_request(dp, stats, waiters, msgs, logger=None):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
previous_msg_len = len(msgs)
waiters_per_dp[stats.xid] = (lock, msgs)
send_msg(dp, stats, logger)
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
while current_msg_len > previous_msg_len:
previous_msg_len = current_msg_len
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def str_to_int(str_num):
@ -27,22 +267,104 @@ class OFCtlUtil(object):
def __init__(self, ofproto):
self.ofproto = ofproto
self.deprecated_value = [
'OFPTFPT_EXPERIMENTER_SLAVE',
'OFPTFPT_EXPERIMENTER_MASTER',
'OFPQCFC_EPERM']
def _reserved_num_from_user(self, num, prefix):
if isinstance(num, int):
return num
else:
if num.startswith(prefix):
return getattr(self.ofproto, num)
else:
return getattr(self.ofproto, prefix + num.upper())
try:
return str_to_int(num)
except ValueError:
try:
if num.startswith(prefix):
return getattr(self.ofproto, num.upper())
else:
return getattr(self.ofproto, prefix + num.upper())
except AttributeError:
LOG.warning(
"Cannot convert argument to reserved number: %s", num)
return num
def _reserved_num_to_user(self, num, prefix):
for k, v in self.ofproto.__dict__.items():
if k.startswith(prefix) and v == num:
return k.replace(prefix, '')
if k not in self.deprecated_value and \
k.startswith(prefix) and v == num:
return k.replace(prefix, '')
return num
def ofp_port_features_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPPF_')
def ofp_port_features_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPPF_')
def ofp_port_mod_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPPMPT_')
def ofp_port_mod_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPPMPT_')
def ofp_port_desc_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPPDPT_')
def ofp_port_desc_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPPDPT_')
def ofp_action_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPAT_')
def ofp_action_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPAT_')
def ofp_instruction_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPIT_')
def ofp_instruction_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPIT_')
def ofp_group_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPGT_')
def ofp_group_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPGT_')
def ofp_meter_band_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPMBT_')
def ofp_meter_band_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPMBT_')
def ofp_table_feature_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPTFPT_')
def ofp_table_feature_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPTFPT_')
def ofp_port_stats_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPPSPT_')
def ofp_port_stats_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPPSPT_')
def ofp_queue_desc_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPQDPT_')
def ofp_queue_desc_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPQDPT_')
def ofp_queue_stats_prop_type_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPQSPT_')
def ofp_queue_stats_prop_type_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPQSPT_')
def ofp_meter_flags_from_user(self, act):
return self._reserved_num_from_user(act, 'OFPMF_')
def ofp_meter_flags_to_user(self, act):
return self._reserved_num_to_user(act, 'OFPMF_')
def ofp_port_from_user(self, port):
return self._reserved_num_from_user(port, 'OFPP_')
@ -67,6 +389,18 @@ class OFCtlUtil(object):
def ofp_group_to_user(self, group):
return self._reserved_num_to_user(group, 'OFPG_')
def ofp_group_capabilities_from_user(self, group):
return self._reserved_num_from_user(group, 'OFPGFC_')
def ofp_group_capabilities_to_user(self, group):
return self._reserved_num_to_user(group, 'OFPGFC_')
def ofp_group_bucket_prop_type_from_user(self, group):
return self._reserved_num_from_user(group, 'OFPGBPT_')
def ofp_group_bucket_prop_type_to_user(self, group):
return self._reserved_num_to_user(group, 'OFPGBPT_')
def ofp_buffer_from_user(self, buffer):
if buffer in ['OFP_NO_BUFFER', 'NO_BUFFER']:
return self.ofproto.OFP_NO_BUFFER

View File

@ -18,7 +18,6 @@ import socket
import logging
from ryu.ofproto import ofproto_v1_0
from ryu.lib import hub
from ryu.lib import ofctl_utils
from ryu.lib.mac import haddr_to_bin, haddr_to_str
@ -258,8 +257,8 @@ def match_to_str(m):
def nw_src_to_str(wildcards, addr):
ip = socket.inet_ntoa(struct.pack('!I', addr))
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK)
>> ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK) >>
ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if mask == 32:
mask = 0
if mask:
@ -269,8 +268,8 @@ def nw_src_to_str(wildcards, addr):
def nw_dst_to_str(wildcards, addr):
ip = socket.inet_ntoa(struct.pack('!I', addr))
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK)
>> ofproto_v1_0.OFPFW_NW_DST_SHIFT)
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK) >>
ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if mask == 32:
mask = 0
if mask:
@ -278,30 +277,11 @@ def nw_dst_to_str(wildcards, addr):
return ip
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
previous_msg_len = len(msgs)
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
while current_msg_len > previous_msg_len:
previous_msg_len = current_msg_len
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
@ -314,11 +294,21 @@ def get_desc_stats(dp, waiters):
return desc
def get_queue_stats(dp, waiters):
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, dp.ofproto.OFPP_ALL,
dp.ofproto.OFPQ_ALL)
def get_queue_stats(dp, waiters, port=None, queue_id=None):
if port is None:
port = dp.ofproto.OFPP_ALL
else:
port = int(str(port), 0)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = int(str(queue_id), 0)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port,
queue_id)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = []
for msg in msgs:
@ -345,7 +335,7 @@ def get_flow_stats(dp, waiters, flow=None):
dp, 0, match, table_id, out_port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
@ -381,7 +371,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None):
dp, 0, match, table_id, out_port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
@ -390,7 +380,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None):
s = {'packet_count': st.packet_count,
'byte_count': st.byte_count,
'flow_count': st.flow_count}
flows.append(s)
flows.append(s)
flows = {str(dp.id): flows}
return flows
@ -400,7 +390,7 @@ def get_table_stats(dp, waiters):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
ofp = dp.ofproto
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
match_convert = {ofp.OFPFW_IN_PORT: 'IN_PORT',
ofp.OFPFW_DL_VLAN: 'DL_VLAN',
@ -447,11 +437,16 @@ def get_table_stats(dp, waiters):
return desc
def get_port_stats(dp, waiters):
def get_port_stats(dp, waiters, port=None):
if port is None:
port = dp.ofproto.OFPP_NONE
else:
port = int(str(port), 0)
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_NONE)
dp, 0, port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
@ -478,7 +473,7 @@ def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
@ -521,7 +516,7 @@ def mod_flow_entry(dp, flow, cmd):
flags=flags,
actions=actions)
dp.send_msg(flow_mod)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def delete_flow_entry(dp):
@ -532,7 +527,7 @@ def delete_flow_entry(dp):
datapath=dp, match=match, cookie=0,
command=dp.ofproto.OFPFC_DELETE)
dp.send_msg(flow_mod)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_port_behavior(dp, port_config):
@ -545,4 +540,4 @@ def mod_port_behavior(dp, port_config):
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
ofctl_utils.send_msg(dp, port_mod, LOG)

View File

@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import netaddr
@ -21,7 +20,6 @@ from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import hub
from ryu.lib import ofctl_utils
@ -104,8 +102,9 @@ def to_actions(dp, acts):
else:
LOG.error('Unknown action type: %s', action_type)
if write_actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
elif action_type == 'CLEAR_ACTIONS':
inst.append(parser.OFPInstructionActions(
ofp.OFPIT_CLEAR_ACTIONS, []))
@ -395,30 +394,10 @@ def match_vid_to_str(value, mask):
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
previous_msg_len = len(msgs)
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
while current_msg_len > previous_msg_len:
previous_msg_len = current_msg_len
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
@ -432,12 +411,23 @@ def get_desc_stats(dp, waiters):
return desc
def get_queue_stats(dp, waiters):
def get_queue_stats(dp, waiters, port=None, queue_id=None):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, ofp.OFPP_ANY,
ofp.OFPQ_ALL, 0)
if port is None:
port = ofp.OFPP_ANY
else:
port = int(str(port), 0)
if queue_id is None:
queue_id = ofp.OFPQ_ALL
else:
queue_id = int(str(queue_id), 0)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, port,
queue_id, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = []
for msg in msgs:
@ -452,12 +442,15 @@ def get_queue_stats(dp, waiters):
return desc
def get_queue_config(dp, port, waiters):
def get_queue_config(dp, waiters, port=None):
ofp = dp.ofproto
port = UTIL.ofp_port_from_user(port)
if port is None:
port = ofp.OFPP_ANY
else:
port = UTIL.ofp_port_from_user(int(str(port), 0))
stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE',
dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE',
@ -506,7 +499,7 @@ def get_flow_stats(dp, waiters, flow=None):
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
@ -547,7 +540,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None):
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
@ -565,7 +558,7 @@ def get_table_stats(dp, waiters):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp)
ofp = dp.ofproto
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
oxm_type_convert = {ofp.OFPXMT_OFB_IN_PORT: 'IN_PORT',
ofp.OFPXMT_OFB_IN_PHY_PORT: 'IN_PHY_PORT',
@ -686,11 +679,16 @@ def get_table_stats(dp, waiters):
return desc
def get_port_stats(dp, waiters):
def get_port_stats(dp, waiters, port=None):
if port is None:
port = dp.ofproto.OFPP_ANY
else:
port = int(str(port), 0)
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, dp.ofproto.OFPP_ANY, 0)
dp, port, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
@ -713,11 +711,16 @@ def get_port_stats(dp, waiters):
return ports
def get_group_stats(dp, waiters):
def get_group_stats(dp, waiters, group_id=None):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = int(str(group_id), 0)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, dp.ofproto.OFPG_ALL, 0)
dp, group_id, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
@ -766,7 +769,7 @@ def get_group_features(dp, waiters):
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
@ -807,7 +810,7 @@ def get_group_desc(dp, waiters):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
@ -834,7 +837,7 @@ def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
@ -879,7 +882,7 @@ def mod_flow_entry(dp, flow, cmd):
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_group_entry(dp, group, cmd):
@ -911,7 +914,7 @@ def mod_group_entry(dp, group, cmd):
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
@ -924,20 +927,8 @@ def mod_port_behavior(dp, port_config):
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
ofctl_utils.send_msg(dp, port_mod, LOG)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.error('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter

View File

@ -15,13 +15,13 @@
import base64
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import hub
from ryu.lib import ofctl_nicira_ext
from ryu.lib import ofctl_utils
@ -35,56 +35,8 @@ UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_3)
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = UTIL.ofp_port_from_user(dic.get('port', ofp.OFPP_ANY))
max_len = UTIL.ofp_cml_from_user(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = UTIL.ofp_queue_from_user(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = UTIL.ofp_group_from_user(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
elif action_type == 'PUSH_PBB':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushPbb(ethertype)
elif action_type == 'POP_PBB':
result = parser.OFPActionPopPbb()
else:
result = None
return result
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def to_actions(dp, acts):
@ -110,8 +62,9 @@ def to_actions(dp, acts):
else:
LOG.error('Unknown action type: %s', action_type)
if write_actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
elif action_type == 'CLEAR_ACTIONS':
inst.append(parser.OFPInstructionActions(
ofp.OFPIT_CLEAR_ACTIONS, []))
@ -176,6 +129,17 @@ def action_to_str(act):
buf = 'PUSH_PBB:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_PBB:
buf = 'POP_PBB'
elif action_type == ofproto_v1_3.OFPAT_EXPERIMENTER:
if act.experimenter == ofproto_common.NX_EXPERIMENTER_ID:
try:
return ofctl_nicira_ext.action_to_str(act, action_to_str)
except:
LOG.debug('Error parsing NX_ACTION(%s)',
act.__class__.__name__, exc_info=True)
data_str = base64.b64encode(act.data)
buf = 'EXPERIMENTER: {experimenter:%s, data:%s}' % \
(act.experimenter, data_str.decode('utf-8'))
else:
buf = 'UNKNOWN'
return buf
@ -229,11 +193,11 @@ def actions_to_str(instructions):
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': int,
'metadata': to_match_masked_int,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'metadata': ofctl_utils.to_match_masked_int,
'dl_dst': ofctl_utils.to_match_eth,
'dl_src': ofctl_utils.to_match_eth,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
@ -243,10 +207,10 @@ def to_match(dp, attrs):
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'nw_src': ofctl_utils.to_match_ip,
'nw_dst': ofctl_utils.to_match_ip,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
@ -258,24 +222,24 @@ def to_match(dp, attrs):
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': int,
'mpls_tc': int,
'mpls_bos': int,
'pbb_isid': to_match_masked_int,
'tunnel_id': to_match_masked_int,
'ipv6_exthdr': to_match_masked_int}
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
@ -319,55 +283,8 @@ def to_match(dp, attrs):
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
(ip_addr, ip_mask) = value.split('/')
if ip_mask.isdigit():
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_3.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_3.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return (ofctl_utils.str_to_int(value[0]),
ofctl_utils.str_to_int(value[1]))
else:
return ofctl_utils.str_to_int(value)
return ofctl_utils.to_match_vid(value, ofproto_v1_3.OFPVID_PRESENT)
def match_to_str(ofmatch):
@ -397,7 +314,8 @@ def match_to_str(ofmatch):
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
value = ofctl_utils.match_vid_to_str(value, mask,
ofproto_v1_3.OFPVID_PRESENT)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
@ -408,41 +326,17 @@ def match_to_str(ofmatch):
return match
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_3.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_3.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
previous_msg_len = len(msgs)
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
while current_msg_len > previous_msg_len:
previous_msg_len = current_msg_len
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
@ -452,16 +346,27 @@ def get_desc_stats(dp, waiters):
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters):
def get_queue_stats(dp, waiters, port=None, queue_id=None, to_user=True):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
if port is None:
port = ofp.OFPP_ANY
else:
port = int(str(port), 0)
if queue_id is None:
queue_id = ofp.OFPQ_ALL
else:
queue_id = int(str(queue_id), 0)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port,
queue_id)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = []
for msg in msgs:
@ -474,16 +379,19 @@ def get_queue_stats(dp, waiters):
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
return wrap_dpid_dict(dp, s, to_user)
def get_queue_config(dp, port, waiters):
def get_queue_config(dp, waiters, port=None, to_user=True):
ofp = dp.ofproto
port = UTIL.ofp_port_from_user(port)
if port is None:
port = ofp.OFPP_ANY
else:
port = UTIL.ofp_port_from_user(int(str(port), 0))
stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE',
dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE',
@ -504,19 +412,33 @@ def get_queue_config(dp, port, waiters):
p['experimenter'] = prop.experimenter
p['data'] = prop.data
prop_list.append(p)
q = {'port': UTIL.ofp_port_to_user(queue.port),
'properties': prop_list,
'queue_id': UTIL.ofp_queue_to_user(queue.queue_id)}
q = {'properties': prop_list}
if to_user:
q['port'] = UTIL.ofp_port_to_user(queue.port)
q['queue_id'] = UTIL.ofp_queue_to_user(queue.queue_id)
else:
q['port'] = queue.port
q['queue_id'] = queue.queue_id
queue_list.append(q)
c = {'port': UTIL.ofp_port_to_user(config.port),
'queues': queue_list}
c = {'queues': queue_list}
if to_user:
c['port'] = UTIL.ofp_port_to_user(config.port)
else:
c['port'] = config.port
configs.append(c)
configs = {str(dp.id): configs}
return configs
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None):
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
@ -534,34 +456,39 @@ def get_flow_stats(dp, waiters, flow=None):
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': UTIL.ofp_table_to_user(stats.table_id),
'length': stats.length,
'flags': stats.flags}
if to_user:
s['actions'] = actions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
s['table_id'] = UTIL.ofp_table_to_user(stats.table_id)
else:
s['actions'] = stats.instructions
s['instructions'] = stats.instructions
s['match'] = stats.match
s['table_id'] = stats.table_id
flows.append(s)
flows = {str(dp.id): flows}
return flows
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None):
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
@ -579,7 +506,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None):
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
@ -588,35 +515,39 @@ def get_aggregate_flow_stats(dp, waiters, flow=None):
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
flows = {str(dp.id): flows}
return flows
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters):
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = {'table_id': UTIL.ofp_table_to_user(stat.table_id),
'active_count': stat.active_count,
s = {'active_count': stat.active_count,
'lookup_count': stat.lookup_count,
'matched_count': stat.matched_count}
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
else:
s['table_id'] = stat.table_id
tables.append(s)
desc = {str(dp.id): tables}
return desc
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters):
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {ofproto.OFPTFPT_INSTRUCTIONS: 'INSTRUCTIONS',
ofproto.OFPTFPT_INSTRUCTIONS_MISS: 'INSTRUCTIONS_MISS',
@ -636,6 +567,9 @@ def get_table_features(dp, waiters):
ofproto.OFPTFPT_EXPERIMENTER_MISS: 'EXPERIMENTER_MISS'
}
if not to_user:
prop_type = dict((k, k) for k in prop_type.keys())
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
@ -694,31 +628,40 @@ def get_table_features(dp, waiters):
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s = {'table_id': UTIL.ofp_table_to_user(stat.table_id),
'name': stat.name.decode('utf-8'),
s = {'name': stat.name.decode('utf-8'),
'metadata_match': stat.metadata_match,
'metadata_write': stat.metadata_write,
'config': stat.config,
'max_entries': stat.max_entries,
'properties': properties,
}
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
else:
s['table_id'] = stat.table_id
tables.append(s)
desc = {str(dp.id): tables}
return desc
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters):
def get_port_stats(dp, waiters, port=None, to_user=True):
if port is None:
port = dp.ofproto.OFPP_ANY
else:
port = int(str(port), 0)
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_ANY)
dp, 0, port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': UTIL.ofp_port_to_user(stats.port_no),
'rx_packets': stats.rx_packets,
s = {'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
@ -732,16 +675,28 @@ def get_port_stats(dp, waiters):
'collisions': stats.collisions,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec}
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
else:
s['port_no'] = stats.port_no
ports.append(s)
ports = {str(dp.id): ports}
return ports
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters):
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = int(str(meter_id), 0)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
dp, 0, meter_id)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
@ -751,20 +706,26 @@ def get_meter_stats(dp, waiters):
b = {'packet_band_count': band.packet_band_count,
'byte_band_count': band.byte_band_count}
bands.append(b)
s = {'meter_id': UTIL.ofp_meter_to_user(stats.meter_id),
'len': stats.len,
s = {'len': stats.len,
'flow_count': stats.flow_count,
'packet_in_count': stats.packet_in_count,
'byte_in_count': stats.byte_in_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'band_stats': bands}
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
else:
s['meter_id'] = stats.meter_id
meters.append(s)
meters = {str(dp.id): meters}
return meters
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters):
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
@ -777,7 +738,7 @@ def get_meter_features(dp, waiters):
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
@ -785,22 +746,34 @@ def get_meter_features(dp, waiters):
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
band_types.append(v)
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in capa_convert.items():
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
capabilities.append(v)
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
features = {str(dp.id): features}
return features
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters):
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
@ -810,41 +783,68 @@ def get_meter_config(dp, waiters):
dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK',
dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = int(str(meter_id), 0)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
dp, 0, meter_id)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
bands = []
for band in config.bands:
b = {'type': band_type.get(band.type, ''),
'rate': band.rate,
b = {'rate': band.rate,
'burst_size': band.burst_size}
if to_user:
b['type'] = band_type.get(band.type, '')
else:
b['type'] = band.type
if band.type == dp.ofproto.OFPMBT_DSCP_REMARK:
b['prec_level'] = band.prec_level
elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER:
b['experimenter'] = band.experimenter
bands.append(b)
c_flags = []
for k, v in flags.items():
for k, v in sorted(flags.items()):
if k & config.flags:
c_flags.append(v)
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c = {'flags': c_flags,
'meter_id': UTIL.ofp_meter_to_user(config.meter_id),
'bands': bands}
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
else:
c['meter_id'] = config.meter_id
configs.append(c)
configs = {str(dp.id): configs}
return configs
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters):
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = int(str(group_id), 0)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, dp.ofproto.OFPG_ALL)
dp, 0, group_id)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
@ -855,19 +855,25 @@ def get_group_stats(dp, waiters):
'byte_count': bucket_stat.byte_count}
bucket_stats.append(c)
g = {'length': stats.length,
'group_id': UTIL.ofp_group_to_user(stats.group_id),
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'bucket_stats': bucket_stats}
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
else:
g['group_id'] = stats.group_id
groups.append(g)
groups = {str(dp.id): groups}
return groups
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters):
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
@ -897,7 +903,7 @@ def get_group_features(dp, waiters):
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
@ -905,31 +911,56 @@ def get_group_features(dp, waiters):
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters):
def get_group_desc(dp, waiters, to_user=True):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
@ -938,7 +969,7 @@ def get_group_desc(dp, waiters):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
@ -947,33 +978,44 @@ def get_group_desc(dp, waiters):
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': UTIL.ofp_group_to_user(stats.group_id),
'buckets': buckets}
d = {'buckets': buckets}
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
d['type'] = type_convert.get(stats.type)
else:
d['group_id'] = stats.group_id
d['type'] = stats.type
descs.append(d)
descs = {str(dp.id): descs}
return descs
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters):
def get_port_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = {'port_no': UTIL.ofp_port_to_user(stat.port_no),
'hw_addr': stat.hw_addr,
d = {'hw_addr': stat.hw_addr,
'name': stat.name.decode('utf-8'),
'config': stat.config,
'state': stat.state,
@ -983,9 +1025,16 @@ def get_port_desc(dp, waiters):
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
else:
d['port_no'] = stat.port_no
descs.append(d)
descs = {str(dp.id): descs}
return descs
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
@ -1010,7 +1059,7 @@ def mod_flow_entry(dp, flow, cmd):
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
@ -1057,7 +1106,7 @@ def mod_meter_entry(dp, meter, cmd):
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
dp.send_msg(meter_mod)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
@ -1089,7 +1138,7 @@ def mod_group_entry(dp, group, cmd):
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
@ -1102,20 +1151,8 @@ def mod_port_behavior(dp, port_config):
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
ofctl_utils.send_msg(dp, port_mod, LOG)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.error('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter

945
ryu/lib/ofctl_v1_4.py Normal file
View File

@ -0,0 +1,945 @@
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4)
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def _get_actions(dp, dics):
actions = []
for d in dics:
action = to_action(dp, d)
if action is not None:
actions.append(action)
else:
LOG.error('Unknown action type: %s', d)
return actions
def to_instructions(dp, insts):
instructions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for i in insts:
inst_type = i.get('type')
if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']:
dics = i.get('actions', [])
actions = _get_actions(dp, dics)
if actions:
if inst_type == 'APPLY_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
else:
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
actions))
elif inst_type == 'CLEAR_ACTIONS':
instructions.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif inst_type == 'GOTO_TABLE':
table_id = int(i.get('table_id'))
instructions.append(parser.OFPInstructionGotoTable(table_id))
elif inst_type == 'WRITE_METADATA':
metadata = ofctl_utils.str_to_int(i.get('metadata'))
metadata_mask = (ofctl_utils.str_to_int(i['metadata_mask'])
if 'metadata_mask' in i
else parser.UINT64_MAX)
instructions.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif inst_type == 'METER':
meter_id = int(i.get('meter_id'))
instructions.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown instruction type: %s', inst_type)
return instructions
def action_to_str(act):
s = act.to_jsondict()[act.__class__.__name__]
t = UTIL.ofp_action_type_to_user(s['type'])
s['type'] = t if t != s['type'] else 'UNKNOWN'
if 'field' in s:
field = s.pop('field')
s['field'] = field['OXMTlv']['field']
s['mask'] = field['OXMTlv']['mask']
s['value'] = field['OXMTlv']['value']
return s
def instructions_to_str(instructions):
s = []
for i in instructions:
v = i.to_jsondict()[i.__class__.__name__]
t = UTIL.ofp_instruction_type_to_user(v['type'])
inst_type = t if t != v['type'] else 'UNKNOWN'
# apply/write/clear-action instruction
if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions):
acts = []
for a in i.actions:
acts.append(action_to_str(a))
v['type'] = inst_type
v['actions'] = acts
s.append(v)
# others
else:
v['type'] = inst_type
s.append(v)
return s
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': int,
'metadata': ofctl_utils.to_match_masked_int,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'eth_type': int,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'ip_proto': int,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': int,
'mpls_tc': int,
'mpls_bos': int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int,
'pbb_uca': int,
}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'ipv4_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['ipv4_src']
del attrs['ipv4_src']
if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['ipv4_dst']
del attrs['ipv4_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT)
def match_to_str(ofmatch):
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'vlan_vid':
value = ofctl_utils.match_vid_to_str(value, mask,
ofproto_v1_4.OFPVID_PRESENT)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = t if t != p['type'] else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user)
def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if queue_id is None:
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueDescStatsRequest(
dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for queue in msg.body:
q = queue.to_jsondict()[queue.__class__.__name__]
prop_list = []
for prop in queue.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
prop_list.append(p)
q['properties'] = prop_list
configs.append(q)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
s['instructions'] = instructions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS,
ofproto.OFPTFPT_TABLE_SYNC_FROM]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = {}
t = UTIL.ofp_table_feature_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
if prop.type in p_type_instructions:
instruction_ids = []
for id in prop.instruction_ids:
i = {'len': id.len,
'type': id.type}
instruction_ids.append(i)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for id in prop.table_ids:
table_ids.append(id)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for id in prop.action_ids:
i = id.to_jsondict()[id.__class__.__name__]
action_ids.append(i)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for id in prop.oxm_ids:
i = id.to_jsondict()[id.__class__.__name__]
oxm_ids.append(i)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s['name'] = stat.name.decode('utf-8')
s['properties'] = properties
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
properties = []
for prop in stats.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
t = UTIL.ofp_port_stats_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
s['properties'] = properties
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
s = stats.to_jsondict()[stats.__class__.__name__]
bands = []
for band in stats.band_stats:
b = band.to_jsondict()[band.__class__.__name__]
bands.append(b)
s['band_stats'] = bands
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = UTIL.ofp_meter_from_user(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
c = config.to_jsondict()[config.__class__.__name__]
bands = []
for band in config.bands:
b = band.to_jsondict()[band.__class__.__name__]
if to_user:
t = UTIL.ofp_meter_band_type_to_user(band.type)
b['type'] = t if t != band.type else 'UNKNOWN'
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c['flags'] = c_flags
c['bands'] = bands
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = UTIL.ofp_group_from_user(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
g = stats.to_jsondict()[stats.__class__.__name__]
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__]
bucket_stats.append(c)
g['bucket_stats'] = bucket_stats
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB',
ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER',
}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
d = stats.to_jsondict()[stats.__class__.__name__]
buckets = []
for bucket in stats.buckets:
b = bucket.to_jsondict()[bucket.__class__.__name__]
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b['actions'] = actions
buckets.append(b)
d['buckets'] = buckets
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
t = UTIL.ofp_group_type_to_user(stats.type)
d['type'] = t if t != stats.type else 'UNKNOWN'
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, port_no=None, to_user=True):
if port_no is None:
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_port_desc_prop_type_to_user(prop.type)
p['type'] = t if t != prop.type else 'UNKNOWN'
properties.append(p)
d['name'] = stat.name.decode('utf-8')
d['properties'] = properties
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
importance = int(flow.get('importance', 0))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_instructions(dp, flow.get('instructions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
importance, flags, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
t = UTIL.ofp_meter_flags_from_user(flag)
f = t if t != flag else None
if f is None:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= f
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = int(band.get('rate', 0))
burst_size = int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
group_type = str(group.get('type', 'ALL'))
t = UTIL.ofp_group_type_from_user(group_type)
group_type = t if t != group_type else None
if group_type is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, group_type, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
ofp = dp.ofproto
parser = dp.ofproto_parser
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
properties = port_config.get('properties')
prop = []
for p in properties:
type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type'])
length = None
if type_ == ofp.OFPPDPT_ETHERNET:
advertise = UTIL.ofp_port_features_from_user(p['advertise'])
prop.append(
parser.OFPPortModPropEthernet(type_, length, advertise))
elif type_ == ofp.OFPPDPT_OPTICAL:
prop.append(
parser.OFPPortModPropOptical(
type_, length, p['configure'], p['freq_lmda'],
p['fl_offset'], p['grid_span'], p['tx_pwr']))
elif type_ == ofp.OFPPDPT_EXPERIMENTER:
prop.append(
parser.OFPPortModPropExperimenter(
type_, length, p['experimenter'], p['exp_type'],
p['data']))
else:
LOG.error('Unknown port desc prop type: %s', type_)
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, prop)
ofctl_utils.send_msg(dp, port_mod, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter

1090
ryu/lib/ofctl_v1_5.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -325,7 +325,8 @@ class VSCtlContext(object):
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
LOG.warn('%s: database contains duplicate bridge name', name)
LOG.warning('%s: database contains duplicate bridge name',
name)
bridges.add(name)
vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name,
None, 0)
@ -356,10 +357,10 @@ class VSCtlContext(object):
vsctl_port = self.ports.get(port_name)
if vsctl_port:
if ovsrec_port == vsctl_port.port_cfg:
LOG.warn('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
LOG.warning('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
else:
LOG.error('%s: database contains duplicate '
'vsctl_port name',
@ -378,7 +379,7 @@ class VSCtlContext(object):
iface = self.ifaces.get(ovsrec_iface.name)
if iface:
if ovsrec_iface == iface.iface_cfg:
LOG.warn(
LOG.warning(
'%s: interface is in multiple ports '
'(%s and %s)',
ovsrec_iface.name,

View File

@ -23,18 +23,13 @@ RFC 4271 BGP-4
# - RFC 4364 BGP/MPLS IP Virtual Private Networks (VPNs)
import abc
import six
import struct
import copy
import netaddr
import functools
import numbers
import socket
import struct
try:
# Python 3
from functools import reduce
except ImportError:
# Python 2
pass
import six
from ryu.lib.stringify import StringifyMixin
from ryu.lib.packet import afi as addr_family
@ -44,6 +39,8 @@ from ryu.lib.packet import stream_parser
from ryu.lib import addrconv
from ryu.lib.pack_utils import msg_pack_into
reduce = six.moves.reduce
BGP_MSG_OPEN = 1
BGP_MSG_UPDATE = 2
BGP_MSG_NOTIFICATION = 3
@ -162,14 +159,14 @@ class _Value(object):
_VALUE_FIELDS = ['value']
@staticmethod
def do_init(cls, self, kwargs, **extra_kwargs):
def do_init(cls_type, self, kwargs, **extra_kwargs):
ourfields = {}
for f in cls._VALUE_FIELDS:
for f in cls_type._VALUE_FIELDS:
v = kwargs[f]
del kwargs[f]
ourfields[f] = v
kwargs.update(extra_kwargs)
super(cls, self).__init__(**kwargs)
super(cls_type, self).__init__(**kwargs)
self.__dict__.update(ourfields)
@classmethod
@ -236,6 +233,7 @@ class BgpExc(Exception):
"""Flag if set indicates Notification message should be sent to peer."""
def __init__(self, data=''):
super(BgpExc, self).__init__()
self.data = data
def __str__(self):
@ -260,6 +258,7 @@ class BadLen(BgpExc):
SUB_CODE = BGP_ERROR_SUB_BAD_MESSAGE_LENGTH
def __init__(self, msg_type_code, message_length):
super(BadLen, self).__init__()
self.msg_type_code = msg_type_code
self.length = message_length
self.data = struct.pack('!H', self.length)
@ -279,6 +278,7 @@ class BadMsg(BgpExc):
SUB_CODE = BGP_ERROR_SUB_BAD_MESSAGE_TYPE
def __init__(self, msg_type):
super(BadMsg, self).__init__()
self.msg_type = msg_type
self.data = struct.pack('B', msg_type)
@ -317,6 +317,7 @@ class UnsupportedVersion(BgpExc):
SUB_CODE = BGP_ERROR_SUB_UNSUPPORTED_VERSION_NUMBER
def __init__(self, locally_support_version):
super(UnsupportedVersion, self).__init__()
self.data = struct.pack('H', locally_support_version)
@ -403,6 +404,7 @@ class MissingWellKnown(BgpExc):
SUB_CODE = BGP_ERROR_SUB_MISSING_WELL_KNOWN_ATTRIBUTE
def __init__(self, pattr_type_code):
super(MissingWellKnown, self).__init__()
self.pattr_type_code = pattr_type_code
self.data = struct.pack('B', pattr_type_code)
@ -571,13 +573,20 @@ class OutOfResource(BgpExc):
SUB_CODE = BGP_ERROR_SUB_OUT_OF_RESOURCES
@functools.total_ordering
class RouteFamily(StringifyMixin):
def __init__(self, afi, safi):
self.afi = afi
self.safi = safi
def __cmp__(self, other):
return cmp((other.afi, other.safi), (self.afi, self.safi))
def __lt__(self, other):
return (self.afi, self.safi) < (other.afi, other.safi)
def __eq__(self, other):
return (self.afi, self.safi) == (other.afi, other.safi)
def __hash__(self):
return hash((self.afi, self.safi))
# Route Family Singleton
RF_IPv4_UC = RouteFamily(addr_family.IP, subaddr_family.UNICAST)
@ -587,7 +596,7 @@ RF_IPv6_VPN = RouteFamily(addr_family.IP6, subaddr_family.MPLS_VPN)
RF_IPv4_MPLS = RouteFamily(addr_family.IP, subaddr_family.MPLS_LABEL)
RF_IPv6_MPLS = RouteFamily(addr_family.IP6, subaddr_family.MPLS_LABEL)
RF_RTC_UC = RouteFamily(addr_family.IP,
subaddr_family.ROUTE_TARGET_CONSTRTAINS)
subaddr_family.ROUTE_TARGET_CONSTRAINTS)
_rf_map = {
(addr_family.IP, subaddr_family.UNICAST): RF_IPv4_UC,
@ -596,7 +605,7 @@ _rf_map = {
(addr_family.IP6, subaddr_family.MPLS_VPN): RF_IPv6_VPN,
(addr_family.IP, subaddr_family.MPLS_LABEL): RF_IPv4_MPLS,
(addr_family.IP6, subaddr_family.MPLS_LABEL): RF_IPv6_MPLS,
(addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRTAINS): RF_RTC_UC
(addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRAINTS): RF_RTC_UC
}
@ -604,9 +613,9 @@ def get_rf(afi, safi):
return _rf_map[(afi, safi)]
def pad(bin, len_):
assert len(bin) <= len_
return bin + b'\0' * (len_ - len(bin))
def pad(binary, len_):
assert len(binary) <= len_
return binary + b'\0' * (len_ - len(binary))
class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value):
@ -615,7 +624,9 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value):
IPV4_ADDRESS = 1
FOUR_OCTET_AS = 2
def __init__(self, type_, admin=0, assigned=0):
def __init__(self, admin=0, assigned=0, type_=None):
if type_ is None:
type_ = self._rev_lookup_type(self.__class__)
self.type = type_
self.admin = admin
self.assigned = assigned
@ -626,7 +637,7 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value):
(type_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
rest = buf[struct.calcsize(cls._PACK_STR):]
subcls = cls._lookup_type(type_)
return subcls(type_=type_, **subcls.parse_value(rest))
return subcls(**subcls.parse_value(rest))
@classmethod
def from_str(cls, str_):
@ -642,7 +653,7 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value):
type_ = cls.TWO_OCTET_AS
first = int(first)
subcls = cls._lookup_type(type_)
return subcls(type_=type_, admin=first, assigned=int(second))
return subcls(admin=first, assigned=int(second))
def serialize(self):
value = self.serialize_value()
@ -660,8 +671,9 @@ class BGPTwoOctetAsRD(_RouteDistinguisher):
_VALUE_PACK_STR = '!HI'
_VALUE_FIELDS = ['admin', 'assigned']
def __init__(self, type_=_RouteDistinguisher.TWO_OCTET_AS, **kwargs):
self.do_init(BGPTwoOctetAsRD, self, kwargs, type_=type_)
def __init__(self, **kwargs):
super(BGPTwoOctetAsRD, self).__init__()
self.do_init(BGPTwoOctetAsRD, self, kwargs)
@_RouteDistinguisher.register_type(_RouteDistinguisher.IPV4_ADDRESS)
@ -674,8 +686,9 @@ class BGPIPv4AddressRD(_RouteDistinguisher):
]
}
def __init__(self, type_=_RouteDistinguisher.IPV4_ADDRESS, **kwargs):
self.do_init(BGPIPv4AddressRD, self, kwargs, type_=type_)
def __init__(self, **kwargs):
super(BGPIPv4AddressRD, self).__init__()
self.do_init(BGPIPv4AddressRD, self, kwargs)
@classmethod
def parse_value(cls, buf):
@ -700,16 +713,16 @@ class BGPFourOctetAsRD(_RouteDistinguisher):
_VALUE_PACK_STR = '!IH'
_VALUE_FIELDS = ['admin', 'assigned']
def __init__(self, type_=_RouteDistinguisher.FOUR_OCTET_AS,
**kwargs):
self.do_init(BGPFourOctetAsRD, self, kwargs, type_=type_)
def __init__(self, **kwargs):
super(BGPFourOctetAsRD, self).__init__()
self.do_init(BGPFourOctetAsRD, self, kwargs)
@six.add_metaclass(abc.ABCMeta)
class _AddrPrefix(StringifyMixin):
_PACK_STR = '!B' # length
def __init__(self, length, addr, prefixes=None):
def __init__(self, length, addr, prefixes=None, **kwargs):
# length is on-wire bit length of prefixes+addr.
assert prefixes != ()
if isinstance(addr, tuple):
@ -721,14 +734,14 @@ class _AddrPrefix(StringifyMixin):
addr = prefixes + (addr,)
self.addr = addr
@staticmethod
@classmethod
@abc.abstractmethod
def _to_bin(addr):
def _to_bin(cls, addr):
pass
@staticmethod
@classmethod
@abc.abstractmethod
def _from_bin(addr):
def _from_bin(cls, addr):
pass
@classmethod
@ -761,12 +774,12 @@ class _AddrPrefix(StringifyMixin):
class _BinAddrPrefix(_AddrPrefix):
@staticmethod
def _to_bin(addr):
@classmethod
def _to_bin(cls, addr):
return addr
@staticmethod
def _from_bin(addr):
@classmethod
def _from_bin(cls, addr):
return addr
@ -808,9 +821,10 @@ class _LabelledAddrPrefix(_AddrPrefix):
return buf
@classmethod
def _label_from_bin(cls, bin):
(b1, b2, b3) = struct.unpack_from(cls._LABEL_PACK_STR, six.binary_type(bin))
rest = bin[struct.calcsize(cls._LABEL_PACK_STR):]
def _label_from_bin(cls, label):
(b1, b2, b3) = struct.unpack_from(cls._LABEL_PACK_STR,
six.binary_type(label))
rest = label[struct.calcsize(cls._LABEL_PACK_STR):]
return (b1 << 16) | (b2 << 8) | b3, rest
@classmethod
@ -820,7 +834,7 @@ class _LabelledAddrPrefix(_AddrPrefix):
labels = [x << 4 for x in labels]
if labels and labels[-1] != cls._WITHDRAW_LABEL:
labels[-1] |= 1 # bottom of stack
bin_labels = list(map(cls._label_to_bin, labels))
bin_labels = list(cls._label_to_bin(l) for l in labels)
return bytes(reduce(lambda x, y: x + y, bin_labels,
bytearray()) + cls._prefix_to_bin(rest))
@ -876,7 +890,7 @@ class _IPAddrPrefix(_AddrPrefix):
@staticmethod
def _prefix_from_bin(addr):
return (addrconv.ipv4.bin_to_text(pad(addr, 4)),)
return addrconv.ipv4.bin_to_text(pad(addr, 4)),
class _IP6AddrPrefix(_AddrPrefix):
@ -887,7 +901,7 @@ class _IP6AddrPrefix(_AddrPrefix):
@staticmethod
def _prefix_from_bin(addr):
return (addrconv.ipv6.bin_to_text(pad(addr, 16)),)
return addrconv.ipv6.bin_to_text(pad(addr, 16)),
class _VPNAddrPrefix(_AddrPrefix):
@ -1014,6 +1028,7 @@ class LabelledVPNIP6AddrPrefix(_LabelledAddrPrefix, _VPNAddrPrefix,
return "%s:%s" % (self.route_dist, self.prefix)
@functools.total_ordering
class RouteTargetMembershipNLRI(StringifyMixin):
"""Route Target Membership NLRI.
@ -1083,11 +1098,16 @@ class RouteTargetMembershipNLRI(StringifyMixin):
return True
return False
def __cmp__(self, other):
return cmp(
(self._origin_as, self._route_target),
(other.origin_as, other.route_target),
)
def __lt__(self, other):
return ((self.origin_as, self.route_target) <
(other.origin_as, other.route_target))
def __eq__(self, other):
return ((self.origin_as, self.route_target) ==
(other.origin_as, other.route_target))
def __hash__(self):
return hash((self.origin_as, self.route_target))
@classmethod
def parser(cls, buf):
@ -1102,7 +1122,7 @@ class RouteTargetMembershipNLRI(StringifyMixin):
return cls(origin_as, route_target)
def serialize(self):
rt_nlri = ''
rt_nlri = b''
if not self.is_default_rtnlri():
rt_nlri += struct.pack('!I', self.origin_as)
# Encode route target
@ -1111,7 +1131,10 @@ class RouteTargetMembershipNLRI(StringifyMixin):
# RT Nlri is 12 octets
return struct.pack('B', (8 * 12)) + rt_nlri
_addr_class_key = lambda x: (x.afi, x.safi)
def _addr_class_key(route_family):
return route_family.afi, route_family.safi
_ADDR_CLASSES = {
_addr_class_key(RF_IPv4_UC): IPAddrPrefix,
@ -1144,13 +1167,14 @@ class _OptParam(StringifyMixin, _TypeDisp, _Value):
@classmethod
def parser(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
(type_, length) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
rest = buf[struct.calcsize(cls._PACK_STR):]
value = bytes(rest[:length])
rest = rest[length:]
subcls = cls._lookup_type(type_)
caps = subcls.parse_value(value)
if type(caps) != list:
if not isinstance(caps, list):
caps = [subcls(type_=type_, length=length, **caps[0])]
return caps, rest
@ -1267,7 +1291,8 @@ class BGPOptParamCapabilityGracefulRestart(_OptParamCapability):
@classmethod
def parse_cap_value(cls, buf):
(restart, ) = struct.unpack_from(cls._CAP_PACK_STR, six.binary_type(buf))
(restart, ) = struct.unpack_from(cls._CAP_PACK_STR,
six.binary_type(buf))
buf = buf[2:]
l = []
while len(buf) >= 4:
@ -1278,8 +1303,6 @@ class BGPOptParamCapabilityGracefulRestart(_OptParamCapability):
def serialize_cap_value(self):
buf = bytearray()
msg_pack_into(self._CAP_PACK_STR, buf, 0, self.flags << 12 | self.time)
tuples = self.tuples
i = 0
offset = 2
for i in self.tuples:
afi, safi, flags = i
@ -1298,7 +1321,8 @@ class BGPOptParamCapabilityFourOctetAsNumber(_OptParamCapability):
@classmethod
def parse_cap_value(cls, buf):
(as_number, ) = struct.unpack_from(cls._CAP_PACK_STR, six.binary_type(buf))
(as_number, ) = struct.unpack_from(cls._CAP_PACK_STR,
six.binary_type(buf))
return {'as_number': as_number}
def serialize_cap_value(self):
@ -1363,7 +1387,8 @@ class _PathAttribute(StringifyMixin, _TypeDisp, _Value):
@classmethod
def parser(cls, buf):
(flags, type_) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
(flags, type_) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
rest = buf[struct.calcsize(cls._PACK_STR):]
if (flags & BGP_ATTR_FLAG_EXTENDED_LENGTH) != 0:
len_pack_str = cls._PACK_STR_EXT_LEN
@ -1506,7 +1531,7 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute):
six.binary_type(buf))
buf = buf[struct.calcsize(cls._SEG_HDR_PACK_STR):]
l = []
for i in range(0, num_as):
for _ in range(0, num_as):
(as_number,) = struct.unpack_from(as_pack_str,
six.binary_type(buf))
buf = buf[struct.calcsize(as_pack_str):]
@ -1516,7 +1541,8 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute):
elif type_ == cls._AS_SEQUENCE:
result.append(l)
else:
assert(0) # protocol error
# protocol error
raise struct.error('Unsupported segment type: %s' % type_)
return {
'value': result,
'as_pack_str': as_pack_str,
@ -1530,6 +1556,10 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute):
type_ = self._AS_SET
elif isinstance(e, list):
type_ = self._AS_SEQUENCE
else:
raise struct.error(
'Element of %s.value must be of type set or list' %
self.__class__.__name__)
l = list(e)
num_as = len(l)
if num_as == 0:
@ -1578,7 +1608,8 @@ class BGPPathAttributeNextHop(_PathAttribute):
@classmethod
def parse_value(cls, buf):
(ip_addr,) = struct.unpack_from(cls._VALUE_PACK_STR, six.binary_type(buf))
(ip_addr,) = struct.unpack_from(cls._VALUE_PACK_STR,
six.binary_type(buf))
return {
'value': addrconv.ipv4.bin_to_text(ip_addr),
}
@ -1887,7 +1918,9 @@ class _ExtendedCommunity(StringifyMixin, _TypeDisp, _Value):
FOUR_OCTET_AS_SPECIFIC = 0x02
OPAQUE = 0x03
def __init__(self, type_):
def __init__(self, type_=None):
if type_ is None:
type_ = self._rev_lookup_type(self.__class__)
self.type = type_
@classmethod
@ -1912,10 +1945,9 @@ class BGPTwoOctetAsSpecificExtendedCommunity(_ExtendedCommunity):
_VALUE_PACK_STR = '!BHI' # sub type, as number, local adm
_VALUE_FIELDS = ['subtype', 'as_number', 'local_administrator']
def __init__(self, type_=_ExtendedCommunity.TWO_OCTET_AS_SPECIFIC,
**kwargs):
self.do_init(BGPTwoOctetAsSpecificExtendedCommunity, self, kwargs,
type_=type_)
def __init__(self, **kwargs):
super(BGPTwoOctetAsSpecificExtendedCommunity, self).__init__()
self.do_init(BGPTwoOctetAsSpecificExtendedCommunity, self, kwargs)
@_ExtendedCommunity.register_type(_ExtendedCommunity.IPV4_ADDRESS_SPECIFIC)
@ -1928,10 +1960,9 @@ class BGPIPv4AddressSpecificExtendedCommunity(_ExtendedCommunity):
]
}
def __init__(self, type_=_ExtendedCommunity.IPV4_ADDRESS_SPECIFIC,
**kwargs):
self.do_init(BGPIPv4AddressSpecificExtendedCommunity, self, kwargs,
type_=type_)
def __init__(self, **kwargs):
super(BGPIPv4AddressSpecificExtendedCommunity, self).__init__()
self.do_init(BGPIPv4AddressSpecificExtendedCommunity, self, kwargs)
@classmethod
def parse_value(cls, buf):
@ -1957,10 +1988,9 @@ class BGPFourOctetAsSpecificExtendedCommunity(_ExtendedCommunity):
_VALUE_PACK_STR = '!BIH' # sub type, as number, local adm
_VALUE_FIELDS = ['subtype', 'as_number', 'local_administrator']
def __init__(self, type_=_ExtendedCommunity.FOUR_OCTET_AS_SPECIFIC,
**kwargs):
self.do_init(BGPFourOctetAsSpecificExtendedCommunity, self, kwargs,
type_=type_)
def __init__(self, **kwargs):
super(BGPFourOctetAsSpecificExtendedCommunity, self).__init__()
self.do_init(BGPFourOctetAsSpecificExtendedCommunity, self, kwargs)
@_ExtendedCommunity.register_type(_ExtendedCommunity.OPAQUE)
@ -1968,18 +1998,18 @@ class BGPOpaqueExtendedCommunity(_ExtendedCommunity):
_VALUE_PACK_STR = '!7s' # opaque value
_VALUE_FIELDS = ['opaque']
def __init__(self, type_=_ExtendedCommunity.OPAQUE,
**kwargs):
self.do_init(BGPOpaqueExtendedCommunity, self, kwargs,
type_=type_)
def __init__(self, **kwargs):
super(BGPOpaqueExtendedCommunity, self).__init__()
self.do_init(BGPOpaqueExtendedCommunity, self, kwargs)
@_ExtendedCommunity.register_unknown_type()
class BGPUnknownExtendedCommunity(_ExtendedCommunity):
_VALUE_PACK_STR = '!7s' # opaque value
def __init__(self, **kwargs):
self.do_init(BGPUnknownExtendedCommunity, self, kwargs)
def __init__(self, type_, **kwargs):
super(BGPUnknownExtendedCommunity, self).__init__(type_=type_)
self.do_init(BGPUnknownExtendedCommunity, self, kwargs, type_=type_)
@_PathAttribute.register_type(BGP_ATTR_TYPE_MP_REACH_NLRI)
@ -2116,7 +2146,8 @@ class BGPPathAttributeMpUnreachNLRI(_PathAttribute):
@classmethod
def parse_value(cls, buf):
(afi, safi,) = struct.unpack_from(cls._VALUE_PACK_STR, six.binary_type(buf))
(afi, safi,) = struct.unpack_from(cls._VALUE_PACK_STR,
six.binary_type(buf))
binnlri = buf[struct.calcsize(cls._VALUE_PACK_STR):]
addr_cls = _get_addr_class(afi, safi)
nlri = []
@ -2160,7 +2191,7 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp):
========================== ===============================================
marker Marker field. Ignored when encoding.
len Length field. Ignored when encoding.
type Type field. one of BGP\_MSG\_ constants.
type Type field. one of ``BGP_MSG_*`` constants.
========================== ===============================================
"""
@ -2168,12 +2199,15 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp):
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
_class_prefixes = ['BGP']
def __init__(self, type_, len_=None, marker=None):
def __init__(self, marker=None, len_=None, type_=None):
super(BGPMessage, self).__init__()
if marker is None:
self._marker = _MARKER
else:
self._marker = marker
self.len = len_
if type_ is None:
type_ = self._rev_lookup_type(self.__class__)
self.type = type_
@classmethod
@ -2193,7 +2227,7 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp):
kwargs = subcls.parser(binmsg)
return subcls(marker=marker, len_=len_, type_=type_, **kwargs), rest
def serialize(self):
def serialize(self, payload=None, prev=None):
# fixup
self._marker = _MARKER
tail = self.serialize_tail()
@ -2260,9 +2294,12 @@ class BGPOpen(BGPMessage):
@classmethod
def parser(cls, buf):
(version, my_as, hold_time,
bgp_identifier, opt_param_len) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
(version,
my_as,
hold_time,
bgp_identifier,
opt_param_len) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
rest = buf[struct.calcsize(cls._PACK_STR):]
binopts = rest[:opt_param_len]
opt_param = []

View File

@ -682,7 +682,7 @@ class OSPFMessage(packet_base.PacketBase, _TypeDisp):
rest = buf[length:]
subcls = cls._lookup_type(type_)
kwargs = subcls.parser(binmsg)
return subcls(length, router_id, area_id, au_type, authentication,
return subcls(length, router_id, area_id, au_type, int(authentication),
checksum, version, **kwargs), None, rest
@classmethod

View File

@ -23,4 +23,4 @@ UNICAST = 1
MULTICAST = 2
MPLS_LABEL = 4 # RFC 3107
MPLS_VPN = 128 # RFC 4364
ROUTE_TARGET_CONSTRTAINS = 132 # RFC 4684
ROUTE_TARGET_CONSTRAINTS = 132 # RFC 4684

View File

@ -35,6 +35,16 @@ TCP_OPTION_KIND_TIMESTAMPS = 8 # Timestamps
TCP_OPTION_KIND_USER_TIMEOUT = 28 # User Timeout Option
TCP_OPTION_KIND_AUTHENTICATION = 29 # TCP Authentication Option (TCP-AO)
TCP_FIN = 0x001
TCP_SYN = 0x002
TCP_RST = 0x004
TCP_PSH = 0x008
TCP_ACK = 0x010
TCP_URG = 0x020
TCP_ECE = 0x040
TCP_CWR = 0x080
TCP_NS = 0x100
class tcp(packet_base.PacketBase):
"""TCP (RFC 793) header encoder/decoder class.
@ -83,6 +93,21 @@ class tcp(packet_base.PacketBase):
def __len__(self):
return self.offset * 4
def has_flags(self, *flags):
"""Check if flags are set on this packet.
returns boolean if all passed flags is set
Example::
>>> pkt = tcp.tcp(bits=(tcp.TCP_SYN | tcp.TCP_ACK))
>>> pkt.has_flags(tcp.TCP_SYN, tcp.TCP_ACK)
True
"""
mask = sum(flags)
return (self.bits & mask) == mask
@classmethod
def parser(cls, buf):
(src_port, dst_port, seq, ack, offset, bits, window_size,

View File

@ -18,6 +18,7 @@ import struct
from . import packet_base
from . import packet_utils
from . import dhcp
from . import vxlan
class udp(packet_base.PacketBase):
@ -49,10 +50,14 @@ class udp(packet_base.PacketBase):
self.total_length = total_length
self.csum = csum
@classmethod
def get_packet_type(cls, src_port, dst_port):
if (src_port == 68 and dst_port == 67) or (src_port == 67 and dst_port == 68):
@staticmethod
def get_packet_type(src_port, dst_port):
if ((src_port == 68 and dst_port == 67) or
(src_port == 67 and dst_port == 68)):
return dhcp.dhcp
if (dst_port == vxlan.UDP_DST_PORT or
dst_port == vxlan.UDP_DST_PORT_OLD):
return vxlan.vxlan
return None
@classmethod

90
ryu/lib/packet/vxlan.py Normal file
View File

@ -0,0 +1,90 @@
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VXLAN packet parser/serializer
RFC 7348
VXLAN Header:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|R|R|R|R|I|R|R|R| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| VXLAN Network Identifier (VNI) | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- Flags (8 bits): where the I flag MUST be set to 1 for a valid
VXLAN Network ID (VNI). The other 7 bits (designated "R") are
reserved fields and MUST be set to zero on transmission and
ignored on receipt.
- VXLAN Segment ID/VXLAN Network Identifier (VNI): this is a
24-bit value used to designate the individual VXLAN overlay
network on which the communicating VMs are situated. VMs in
different VXLAN overlay networks cannot communicate with each
other.
- Reserved fields (24 bits and 8 bits): MUST be set to zero on
transmission and ignored on receipt.
"""
import struct
import logging
from . import packet_base
LOG = logging.getLogger(__name__)
UDP_DST_PORT = 4789
UDP_DST_PORT_OLD = 8472 # for backward compatibility like Linux
class vxlan(packet_base.PacketBase):
"""VXLAN (RFC 7348) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ====================
Attribute Description
============== ====================
vni VXLAN Network Identifier
============== ====================
"""
# Note: Python has no format character for 24 bits field.
# we use uint32 format character instead and bit-shift at serializing.
_PACK_STR = '!II'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, vni):
super(vxlan, self).__init__()
self.vni = vni
@classmethod
def parser(cls, buf):
(flags_reserved, vni_rserved) = struct.unpack_from(cls._PACK_STR, buf)
# Check VXLAN flags is valid
assert (1 << 3) == (flags_reserved >> 24)
# Note: To avoid cyclic import, import ethernet module here
from ryu.lib.packet import ethernet
return cls(vni_rserved >> 8), ethernet.ethernet, buf[cls._MIN_LEN:]
def serialize(self, payload, prev):
return struct.pack(self._PACK_STR,
1 << (3 + 24), self.vni << 8)

View File

@ -1,3 +1,18 @@
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parsing libpcap and reading/writing PCAP file.
Reference source: http://wiki.wireshark.org/Development/LibpcapFileFormat
@ -18,55 +33,10 @@ Reference source: http://wiki.wireshark.org/Development/LibpcapFileFormat
+---------------------+
| Packet Data |
+---------------------+
| ...
+---------------- ...
Sample usage of dump packets:
from ryu.lib import pcaplib
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
# Creating an instance with a PCAP filename
self.pcap_pen = Writer(open('mypcap.pcap', 'wb'))
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
# Dump the data packet into PCAP file
self.pcap_pen.write_pkt(msg.data)
pkt = packet.Packet(msg.data)
Sample usage of reading PCAP files:
from ryu.lib import pcaplib
from ryu.lib.packet import packet
frame_count = 0
# Using the Reader iterator that yields packets in PCAP file
for ts, buf in pcaplib.Reader(open('test.pcap', 'rb')):
frame_count += 1
pkt = packet.Packet(buf)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
# print frames count, timestamp, ethernet src, ethernet dst
# and raw packet.
print frame_count, ts, dst, src, pkt
| ... |
+---------------------+
"""
import six
import struct
import sys
import time
@ -103,43 +73,56 @@ class PcapFileHdr(object):
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
File Format
"""
_FILE_HDR_FMT = None
_FILE_HDR_FMT = '4sHHIIII'
_FILE_HDR_FMT_BIG_ENDIAN = '>' + _FILE_HDR_FMT
_FILE_HDR_FMT_LITTLE_ENDIAN = '<' + _FILE_HDR_FMT
FILE_HDR_SIZE = struct.calcsize(_FILE_HDR_FMT)
def __init__(self, magic=b'\xd4\xc3\xb2\xa1', version_major=2,
# Magic Number field is used to detect the file format itself and
# the byte ordering.
MAGIC_NUMBER_IDENTICAL = b'\xa1\xb2\xc3\xd4' # Big Endian
MAGIC_NUMBER_SWAPPED = b'\xd4\xc3\xb2\xa1' # Little Endian
def __init__(self, magic=MAGIC_NUMBER_SWAPPED, version_major=2,
version_minor=4, thiszone=0, sigfigs=0, snaplen=0,
linktype=0):
network=0):
self.magic = magic
self.version_major = version_major
self.version_minor = version_minor
self.thiszone = thiszone
self.sigfigs = sigfigs
self.snaplen = snaplen
self.linktype = linktype
self.network = network
@classmethod
def parser(cls, buf):
if buf[:4] == b'\xa1\xb2\xc3\xd4':
magic_buf = buf[:4]
if magic_buf == cls.MAGIC_NUMBER_IDENTICAL:
# Big Endian
cls._FILE_HDR_FMT = '>IHHIIII'
byteorder = '>'
elif buf[:4] == b'\xd4\xc3\xb2\xa1':
fmt = cls._FILE_HDR_FMT_BIG_ENDIAN
byteorder = 'big'
elif magic_buf == cls.MAGIC_NUMBER_SWAPPED:
# Little Endian
cls._FILE_HDR_FMT = '<IHHIIII'
byteorder = '<'
fmt = cls._FILE_HDR_FMT_LITTLE_ENDIAN
byteorder = 'little'
else:
raise Exception('Invalid pcap file.')
raise struct.error('Invalid byte ordered pcap file.')
(magic, version_major, version_minor, thiszone, sigfigs,
snaplen, linktype) = struct.unpack_from(cls._FILE_HDR_FMT, buf)
return cls(*struct.unpack_from(fmt, buf)), byteorder
hdr = cls(magic, version_major, version_minor, thiszone, sigfigs,
snaplen, linktype)
return hdr, byteorder
def serialize(self):
if sys.byteorder == 'big':
# Big Endian
fmt = self._FILE_HDR_FMT_BIG_ENDIAN
self.magic = self.MAGIC_NUMBER_IDENTICAL
else:
# Little Endian
fmt = self._FILE_HDR_FMT_LITTLE_ENDIAN
self.magic = self.MAGIC_NUMBER_SWAPPED
def serialize(self, fmt):
return struct.pack(fmt, self.magic, self.version_major,
self.version_minor, self.thiszone,
self.sigfigs, self.snaplen, self.linktype)
self.sigfigs, self.snaplen, self.network)
class PcapPktHdr(object):
@ -167,7 +150,10 @@ class PcapPktHdr(object):
Record (Packet) Header Format
"""
_PKT_HDR_FMT = None
_PKT_HDR_FMT = 'IIII'
_PKT_HDR_FMT_BIG_ENDIAN = '>' + _PKT_HDR_FMT
_PKT_HDR_FMT_LITTLE_ENDIAN = '<' + _PKT_HDR_FMT
PKT_HDR_SIZE = struct.calcsize(_PKT_HDR_FMT)
def __init__(self, ts_sec=0, ts_usec=0, incl_len=0, orig_len=0):
self.ts_sec = ts_sec
@ -176,114 +162,161 @@ class PcapPktHdr(object):
self.orig_len = orig_len
@classmethod
def parser(cls, byteorder, buf):
def parser(cls, buf, byteorder='little'):
if not buf:
raise IndexError('No data')
cls._PKT_HDR_FMT = byteorder + 'IIII'
PKT_HDR_LEN = struct.calcsize(cls._PKT_HDR_FMT)
(ts_sec, ts_usec, incl_len,
orig_len) = struct.unpack_from(cls._PKT_HDR_FMT, buf)
if byteorder == 'big':
# Big Endian
fmt = cls._PKT_HDR_FMT_BIG_ENDIAN
else:
# Little Endian
fmt = cls._PKT_HDR_FMT_LITTLE_ENDIAN
(ts_sec, ts_usec, incl_len, orig_len) = struct.unpack_from(fmt, buf)
hdr = cls(ts_sec, ts_usec, incl_len, orig_len)
# print repr(buf[0:16])
return hdr, buf[PKT_HDR_LEN:PKT_HDR_LEN + incl_len]
def serialize(self, fmt):
return hdr, buf[cls.PKT_HDR_SIZE:cls.PKT_HDR_SIZE + incl_len]
def serialize(self):
if sys.byteorder == 'big':
# Big Endian
fmt = self._PKT_HDR_FMT_BIG_ENDIAN
else:
# Little Endian
fmt = self._PKT_HDR_FMT_LITTLE_ENDIAN
return struct.pack(fmt, self.ts_sec, self.ts_usec,
self.incl_len, self.orig_len)
class Reader(object):
_FILE_HDR_FMT = '>IHHIIII'
_PKT_HDR_FMT = '>IIII'
"""
PCAP file reader
_PKT_HDR_LEN = struct.calcsize(_PKT_HDR_FMT)
_FILE_HDR_FMT_LEN = struct.calcsize(_FILE_HDR_FMT)
================ ===================================
Argument Description
================ ===================================
file_obj File object which reading PCAP file
in binary mode
================ ===================================
Example of usage::
from ryu.lib import pcaplib
from ryu.lib.packet import packet
frame_count = 0
# iterate pcaplib.Reader that yields (timestamp, packet_data)
# in the PCAP file
for ts, buf in pcaplib.Reader(open('test.pcap', 'rb')):
frame_count += 1
pkt = packet.Packet(buf)
print("%d, %f, %s" % (frame_count, ts, pkt))
"""
def __init__(self, file_obj):
self._fp = file_obj
# self.__filename = filename
self._file_byteorder = None
self._hdr_data = None
self.incl_len_pos = 0
buf = self._fp.read(PcapFileHdr.FILE_HDR_SIZE)
# Read only pcap file header
self.pcap_header, self._file_byteorder = PcapFileHdr.parser(buf)
# Read pcap data with out header
self._pcap_body = self._fp.read()
self._fp.close()
self._next_pos = 0
def __iter__(self):
buf = self._fp.read(Reader._FILE_HDR_FMT_LEN)
# Only Read PCAP file from 0 to 24th byte
(filehdr, self._file_byteorder) = PcapFileHdr.parser(buf)
# self._fp.seek(Reader._FILE_HDR_FMT_LEN)
# Read PCAP file from 24th byte to EOF
self._hdr_data = self._fp.read()
self._fp.close()
return self
def next(self):
try:
pkt_hdr, pkt_data = PcapPktHdr.parser(self._file_byteorder,
self._hdr_data
[self.incl_len_pos:])
pkt_hdr, pkt_data = PcapPktHdr.parser(
self._pcap_body[self._next_pos:], self._file_byteorder)
self._next_pos += pkt_hdr.incl_len + PcapPktHdr.PKT_HDR_SIZE
next_pos = pkt_hdr.incl_len + Reader._PKT_HDR_LEN
self.incl_len_pos += next_pos
except IndexError:
raise StopIteration
raise StopIteration()
return float(pkt_hdr.ts_sec + (pkt_hdr.ts_usec / 1e6)), pkt_data
return pkt_hdr.ts_sec + (pkt_hdr.ts_usec / 1e6), pkt_data
# for Python 3 compatible
__next__ = next
class Writer(object):
def __init__(self, file_obj, snaplen=65535, linktype=1):
"""
PCAP file writer
========== ==================================================
Argument Description
========== ==================================================
file_obj File object which writing PCAP file in binary mode
snaplen Max length of captured packets (in octets)
network Data link type. (e.g. 1 for Ethernet,
see `tcpdump.org`_ for details)
========== ==================================================
.. _tcpdump.org: http://www.tcpdump.org/linktypes.html
Example of usage::
...
from ryu.lib import pcaplib
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
# Create pcaplib.Writer instance with a file object
# for the PCAP file
self.pcap_writer = pcaplib.Writer(open('mypcap.pcap', 'wb'))
...
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# Dump the packet data into PCAP file
self.pcap_writer.write_pkt(ev.msg.data)
...
"""
def __init__(self, file_obj, snaplen=65535, network=1):
self._f = file_obj
self._write_pcap_file_hdr(snaplen, linktype)
self.snaplen = snaplen
self.network = network
self._write_pcap_file_hdr()
def _write_pcap_file_hdr(self, snaplen, linktype):
if sys.byteorder == 'little':
pcap_file_hdr = PcapFileHdr(magic=0xa1b2c3d4,
snaplen=snaplen,
linktype=linktype)
p = pcap_file_hdr.serialize(fmt='<IHHIIII')
else:
pcap_file_hdr, byteorder = PcapFileHdr(magic=0xd4c3b2a1,
naplen=snaplen,
linktype=linktype)
p = pcap_file_hdr.serialize(fmt='>IHHIIII')
self._f.write(str(p))
def _write_pcap_file_hdr(self):
pcap_file_hdr = PcapFileHdr(snaplen=self.snaplen,
network=self.network)
self._f.write(pcap_file_hdr.serialize())
def _write_pkt_hdr(self, ts, buf_str_len):
def _write_pkt_hdr(self, ts, buf_len):
sec = int(ts)
if sec == 0:
usec = 0
else:
usec = int(ts * 1e6) % int(ts)
usec = int(round(ts % 1, 6) * 1e6) if sec != 0 else 0
if sys.byteorder == 'little':
# usec = int(ts * 1e6) % int(ts)
# old_usec = int((float(ts) - int(ts)) * 1e6)
pc_pkt_hdr = PcapPktHdr(ts_sec=sec,
ts_usec=usec,
incl_len=buf_str_len,
orig_len=buf_str_len)
p = pc_pkt_hdr.serialize(fmt='<IIII')
else:
pc_pkt_hdr = PcapPktHdr(ts_sec=sec,
ts_usec=usec,
incl_len=buf_str_len,
orig_len=buf_str_len)
p = pc_pkt_hdr.serialize(fmt='>IIII')
self._f.write(str(p))
pc_pkt_hdr = PcapPktHdr(ts_sec=sec, ts_usec=usec,
incl_len=buf_len, orig_len=buf_len)
self._f.write(pc_pkt_hdr.serialize())
def write_pkt(self, buf, ts=None):
if ts is None:
ts = time.time()
ts = time.time() if ts is None else ts
buf_str = six.binary_type(buf)
buf_str_len = len(buf_str)
self._write_pkt_hdr(ts, buf_str_len)
self._f.write(buf_str)
# Check the max length of captured packets
buf_len = len(buf)
if buf_len > self.snaplen:
buf_len = self.snaplen
buf = buf[:self.snaplen]
self._write_pkt_hdr(ts, buf_len)
self._f.write(buf)
def __del__(self):
self._f.close()

View File

@ -17,9 +17,11 @@
# Nicira extensions
# Many of these definitions are common among OpenFlow versions.
import sys
from struct import calcsize
from ryu.lib import type_desc
from ryu.ofproto.ofproto_common import OFP_HEADER_SIZE
from ryu.ofproto import oxm_fields
# Action subtypes
NXAST_RESUBMIT = 1
@ -84,6 +86,7 @@ assert calcsize(NX_ACTION_NOTE_PACK_STR) == NX_ACTION_NOTE_SIZE
NX_ACTION_BUNDLE_PACK_STR = '!HHIHHHHIHHI4x'
NX_ACTION_BUNDLE_SIZE = 32
NX_ACTION_BUNDLE_0_SIZE = 24
assert calcsize(NX_ACTION_BUNDLE_PACK_STR) == NX_ACTION_BUNDLE_SIZE
NX_ACTION_AUTOPATH_PACK_STR = '!HHIHHII4x'
@ -108,6 +111,7 @@ assert calcsize(NX_ACTION_FIN_TIMEOUT_PACK_STR) == NX_ACTION_FIN_TIMEOUT_SIZE
NX_ACTION_HEADER_PACK_STR = '!HHIH6x'
NX_ACTION_HEADER_SIZE = 16
NX_ACTION_HEADER_0_SIZE = 2
assert calcsize(NX_ACTION_HEADER_PACK_STR) == NX_ACTION_HEADER_SIZE
# Messages
@ -250,3 +254,206 @@ NX_NAT_RANGE_IPV6_MIN = 1 << 2
NX_NAT_RANGE_IPV6_MAX = 1 << 3
NX_NAT_RANGE_PROTO_MIN = 1 << 4
NX_NAT_RANGE_PROTO_MAX = 1 << 5
def nxm_header__(vendor, field, hasmask, length):
return (vendor << 16) | (field << 9) | (hasmask << 8) | length
def nxm_header(vendor, field, length):
return nxm_header__(vendor, field, 0, length)
def nxm_header_w(vendor, field, length):
return nxm_header__(vendor, field, 1, (length) * 2)
NXM_OF_IN_PORT = nxm_header(0x0000, 0, 2)
NXM_OF_ETH_DST = nxm_header(0x0000, 1, 6)
NXM_OF_ETH_DST_W = nxm_header_w(0x0000, 1, 6)
NXM_OF_ETH_SRC = nxm_header(0x0000, 2, 6)
NXM_OF_ETH_SRC_W = nxm_header_w(0x0000, 2, 6)
NXM_OF_ETH_TYPE = nxm_header(0x0000, 3, 2)
NXM_OF_VLAN_TCI = nxm_header(0x0000, 4, 2)
NXM_OF_VLAN_TCI_W = nxm_header_w(0x0000, 4, 2)
NXM_OF_IP_TOS = nxm_header(0x0000, 5, 1)
NXM_OF_IP_PROTO = nxm_header(0x0000, 6, 1)
NXM_OF_IP_SRC = nxm_header(0x0000, 7, 4)
NXM_OF_IP_SRC_W = nxm_header_w(0x0000, 7, 4)
NXM_OF_IP_DST = nxm_header(0x0000, 8, 4)
NXM_OF_IP_DST_W = nxm_header_w(0x0000, 8, 4)
NXM_OF_TCP_SRC = nxm_header(0x0000, 9, 2)
NXM_OF_TCP_SRC_W = nxm_header_w(0x0000, 9, 2)
NXM_OF_TCP_DST = nxm_header(0x0000, 10, 2)
NXM_OF_TCP_DST_W = nxm_header_w(0x0000, 10, 2)
NXM_OF_UDP_SRC = nxm_header(0x0000, 11, 2)
NXM_OF_UDP_SRC_W = nxm_header_w(0x0000, 11, 2)
NXM_OF_UDP_DST = nxm_header(0x0000, 12, 2)
NXM_OF_UDP_DST_W = nxm_header_w(0x0000, 12, 2)
NXM_OF_ICMP_TYPE = nxm_header(0x0000, 13, 1)
NXM_OF_ICMP_CODE = nxm_header(0x0000, 14, 1)
NXM_OF_ARP_OP = nxm_header(0x0000, 15, 2)
NXM_OF_ARP_SPA = nxm_header(0x0000, 16, 4)
NXM_OF_ARP_SPA_W = nxm_header_w(0x0000, 16, 4)
NXM_OF_ARP_TPA = nxm_header(0x0000, 17, 4)
NXM_OF_ARP_TPA_W = nxm_header_w(0x0000, 17, 4)
NXM_NX_TUN_ID = nxm_header(0x0001, 16, 8)
NXM_NX_TUN_ID_W = nxm_header_w(0x0001, 16, 8)
NXM_NX_TUN_IPV4_SRC = nxm_header(0x0001, 31, 4)
NXM_NX_TUN_IPV4_SRC_W = nxm_header_w(0x0001, 31, 4)
NXM_NX_TUN_IPV4_DST = nxm_header(0x0001, 32, 4)
NXM_NX_TUN_IPV4_DST_W = nxm_header_w(0x0001, 32, 4)
NXM_NX_ARP_SHA = nxm_header(0x0001, 17, 6)
NXM_NX_ARP_THA = nxm_header(0x0001, 18, 6)
NXM_NX_IPV6_SRC = nxm_header(0x0001, 19, 16)
NXM_NX_IPV6_SRC_W = nxm_header_w(0x0001, 19, 16)
NXM_NX_IPV6_DST = nxm_header(0x0001, 20, 16)
NXM_NX_IPV6_DST_W = nxm_header_w(0x0001, 20, 16)
NXM_NX_ICMPV6_TYPE = nxm_header(0x0001, 21, 1)
NXM_NX_ICMPV6_CODE = nxm_header(0x0001, 22, 1)
NXM_NX_ND_TARGET = nxm_header(0x0001, 23, 16)
NXM_NX_ND_TARGET_W = nxm_header_w(0x0001, 23, 16)
NXM_NX_ND_SLL = nxm_header(0x0001, 24, 6)
NXM_NX_ND_TLL = nxm_header(0x0001, 25, 6)
NXM_NX_IP_FRAG = nxm_header(0x0001, 26, 1)
NXM_NX_IP_FRAG_W = nxm_header_w(0x0001, 26, 1)
NXM_NX_IPV6_LABEL = nxm_header(0x0001, 27, 4)
NXM_NX_IP_ECN = nxm_header(0x0001, 28, 1)
NXM_NX_IP_TTL = nxm_header(0x0001, 29, 1)
NXM_NX_PKT_MARK = nxm_header(0x0001, 33, 4)
NXM_NX_PKT_MARK_W = nxm_header_w(0x0001, 33, 4)
NXM_NX_TCP_FLAGS = nxm_header(0x0001, 34, 2)
NXM_NX_TCP_FLAGS_W = nxm_header_w(0x0001, 34, 2)
def nxm_nx_reg(idx):
return nxm_header(0x0001, idx, 4)
def nxm_nx_reg_w(idx):
return nxm_header_w(0x0001, idx, 4)
NXM_HEADER_PACK_STRING = '!I'
#
# The followings are implementations for OpenFlow 1.2+
#
sys.modules[__name__].__doc__ = """
The API of this class is the same as ``OFPMatch``.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==============================================
Argument Value Description
================ =============== ==============================================
eth_dst_nxm MAC address Ethernet destination address.
eth_src_nxm MAC address Ethernet source address.
eth_type_nxm Integer 16bit Ethernet type. Needed to support Nicira
extensions that require the eth_type to
be set. (i.e. tcp_flags_nxm)
ip_proto_nxm Integer 8bit IP protocol. Needed to support Nicira
extensions that require the ip_proto to
be set. (i.e. tcp_flags_nxm)
tunnel_id_nxm Integer 64bit Tunnel identifier.
tun_ipv4_src IPv4 address Tunnel IPv4 source address.
tun_ipv4_dst IPv4 address Tunnel IPv4 destination address.
pkt_mark Integer 32bit Packet metadata mark.
tcp_flags_nxm Integer 16bit TCP Flags. Requires setting fields:
eth_type_nxm = [0x0800 (IP)|0x86dd (IPv6)] and
ip_proto_nxm = 6 (TCP)
conj_id Integer 32bit Conjunction ID used only with
the conjunction action
ct_state Integer 32bit Conntrack state.
ct_zone Integer 16bit Conntrack zone.
ct_mark Integer 32bit Conntrack mark.
ct_label Integer 128bit Conntrack label.
tun_ipv6_src IPv6 address Tunnel IPv6 source address.
tun_ipv6_dst IPv6 address Tunnel IPv6 destination address.
_dp_hash Integer 32bit Flow hash computed in Datapath.
reg<idx> Integer 32bit Packet register.
<idx> is register number 0-7.
================ =============== ==============================================
.. Note::
Setting the TCP flags via the nicira extensions.
This is required when using OVS version < 2.4.
When using the nxm fields, you need to use any nxm prereq
fields as well or you will receive a OFPBMC_BAD_PREREQ error
Example::
# WILL NOT work
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto=inet.IPPROTO_TCP,
eth_type=eth_type)
# Works
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto_nxm=inet.IPPROTO_TCP,
eth_type_nxm=eth_type)
"""
oxm_types = [
oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_type_nxm', 3, type_desc.Int2),
oxm_fields.NiciraExtended0('ip_proto_nxm', 6, type_desc.Int1),
oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4),
oxm_fields.NiciraExtended1('tcp_flags_nxm', 34, type_desc.Int2),
oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2),
oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16),
oxm_fields.NiciraExtended1('tun_ipv6_src', 109, type_desc.IPv6Addr),
oxm_fields.NiciraExtended1('tun_ipv6_dst', 110, type_desc.IPv6Addr),
# The following definition is merely for testing 64-bit experimenter OXMs.
# Following Open vSwitch, we use dp_hash for this purpose.
# Prefix the name with '_' to indicate this is not intended to be used
# in wild.
oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4),
# Support for matching/setting NX registers 0-7
oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4),
oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4),
oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4),
oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4),
oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4),
oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4),
oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4),
oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4),
]

View File

@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import struct
from ryu import utils
@ -26,8 +28,6 @@ from ryu.ofproto.ofproto_parser import StringifyMixin
def generate(ofp_name, ofpp_name):
import sys
import string
import functools
ofp = sys.modules[ofp_name]
ofpp = sys.modules[ofpp_name]
@ -154,7 +154,7 @@ def generate(ofp_name, ofpp_name):
_experimenter = ofproto_common.NX_EXPERIMENTER_ID
def __init__(self):
super(NXAction, self).__init__(experimenter=self._experimenter)
super(NXAction, self).__init__(self._experimenter)
self.subtype = self._subtype
@classmethod
@ -165,14 +165,21 @@ def generate(ofp_name, ofpp_name):
rest = buf[struct.calcsize(fmt_str):]
if subtype_cls is None:
return NXActionUnknown(subtype, rest)
return subtype_cls.parse(rest)
return subtype_cls.parser(rest)
def serialize(self, buf, offset):
data = self.serialize_body()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXAction, self).serialize(buf, offset)
msg_pack_into(NXAction._fmt_str,
buf,
offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE,
self.subtype)
buf += data
@classmethod
def register(cls, subtype_cls):
@ -187,21 +194,135 @@ def generate(ofp_name, ofpp_name):
self.data = data
@classmethod
def parse(cls, subtype, buf):
def parser(cls, buf):
return cls(data=buf)
def serialize(self, buf, offset):
def serialize_body(self):
# fixup
data = self.data
if data is None:
data = bytearray()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionUnknown, self).serialize(buf, offset)
buf += data
return bytearray() if self.data is None else self.data
class NXActionPopQueue(NXAction):
_subtype = nicira_ext.NXAST_POP_QUEUE
_fmt_str = '!6x'
def __init__(self,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionPopQueue, self).__init__()
@classmethod
def parser(cls, buf):
return cls()
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0)
return data
class NXActionRegLoad(NXAction):
_subtype = nicira_ext.NXAST_REG_LOAD
_fmt_str = '!HIQ' # ofs_nbits, dst, value
_TYPE = {
'ascii': [
'dst',
]
}
def __init__(self, start, end, dst, value,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionRegLoad, self).__init__()
self.start = start
self.end = end
self.dst = dst
self.value = value
@classmethod
def parser(cls, buf):
(ofs_nbits, dst, value,) = struct.unpack_from(
cls._fmt_str, buf, 0)
start = ofs_nbits >> 6
end = (ofs_nbits & 0x3f) + start
# Right-shift instead of using oxm_parse_header for simplicity...
dst_name = ofp.oxm_to_user_header(dst >> 9)
return cls(start, end, dst_name, value)
def serialize_body(self):
hdr_data = bytearray()
n = ofp.oxm_from_user_header(self.dst)
ofp.oxm_serialize_header(n, hdr_data, 0)
(dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0)
ofs_nbits = (self.start << 6) + (self.end - self.start)
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
ofs_nbits, dst_num, self.value)
return data
class NXActionNote(NXAction):
_subtype = nicira_ext.NXAST_NOTE
# note
_fmt_str = '!%dB'
# set the integer array in a note
def __init__(self,
note,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionNote, self).__init__()
self.note = note
@classmethod
def parser(cls, buf):
note = struct.unpack_from(
cls._fmt_str % len(buf), buf, 0)
return cls(list(note))
def serialize_body(self):
assert isinstance(self.note, (tuple, list))
for n in self.note:
assert isinstance(n, six.integer_types)
pad = (len(self.note) + nicira_ext.NX_ACTION_HEADER_0_SIZE) % 8
if pad:
self.note += [0x0 for i in range(8 - pad)]
note_len = len(self.note)
data = bytearray()
msg_pack_into(self._fmt_str % note_len, data, 0,
*self.note)
return data
class _NXActionSetTunnelBase(NXAction):
# _subtype, _fmt_str must be attributes of subclass.
def __init__(self,
tun_id,
type_=None, len_=None, experimenter=None, subtype=None):
super(_NXActionSetTunnelBase, self).__init__()
self.tun_id = tun_id
@classmethod
def parser(cls, buf):
(tun_id,) = struct.unpack_from(
cls._fmt_str, buf, 0)
return cls(tun_id)
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
self.tun_id)
return data
class NXActionSetTunnel(_NXActionSetTunnelBase):
_subtype = nicira_ext.NXAST_SET_TUNNEL
# tun_id
_fmt_str = '!2xI'
class NXActionSetTunnel64(_NXActionSetTunnelBase):
_subtype = nicira_ext.NXAST_SET_TUNNEL64
# tun_id
_fmt_str = '!6xQ'
class NXActionRegMove(NXAction):
_subtype = nicira_ext.NXAST_REG_MOVE
@ -224,9 +345,9 @@ def generate(ofp_name, ofpp_name):
self.dst_field = dst_field
@classmethod
def parse(cls, buf):
def parser(cls, buf):
(n_bits, src_ofs, dst_ofs,) = struct.unpack_from(
NXActionRegMove._fmt_str, buf, 0)
cls._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionRegMove._fmt_str):]
# src field
(n, len) = ofp.oxm_parse_header(rest, 0)
@ -240,10 +361,10 @@ def generate(ofp_name, ofpp_name):
return cls(src_field, dst_field=dst_field, n_bits=n_bits,
src_ofs=src_ofs, dst_ofs=dst_ofs)
def serialize(self, buf, offset):
def serialize_body(self):
# fixup
data = bytearray()
msg_pack_into(NXActionRegMove._fmt_str, data, 0,
msg_pack_into(self._fmt_str, data, 0,
self.n_bits, self.src_ofs, self.dst_ofs)
# src field
n = ofp.oxm_from_user_header(self.src_field)
@ -251,14 +372,98 @@ def generate(ofp_name, ofpp_name):
# dst field
n = ofp.oxm_from_user_header(self.dst_field)
ofp.oxm_serialize_header(n, data, len(data))
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionRegMove, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
return data
class NXActionResubmit(NXAction):
_subtype = nicira_ext.NXAST_RESUBMIT
# in_port
_fmt_str = '!H4x'
def __init__(self,
in_port=0xfff8,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionResubmit, self).__init__()
self.in_port = in_port
@classmethod
def parser(cls, buf):
(in_port,) = struct.unpack_from(
cls._fmt_str, buf, 0)
return cls(in_port)
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
self.in_port)
return data
class NXActionResubmitTable(NXAction):
_subtype = nicira_ext.NXAST_RESUBMIT_TABLE
# in_port, table_id
_fmt_str = '!HB3x'
def __init__(self,
in_port=0xfff8,
table_id=0xff,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionResubmitTable, self).__init__()
self.in_port = in_port
self.table_id = table_id
@classmethod
def parser(cls, buf):
(in_port,
table_id) = struct.unpack_from(
cls._fmt_str, buf, 0)
return cls(in_port, table_id)
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
self.in_port, self.table_id)
return data
class NXActionOutputReg(NXAction):
_subtype = nicira_ext.NXAST_OUTPUT_REG
# ofs_nbits, src, max_len
_fmt_str = '!HIH6x'
def __init__(self,
start,
end,
src,
max_len,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionOutputReg, self).__init__()
self.start = start
self.end = end
self.src = src
self.max_len = max_len
@classmethod
def parser(cls, buf):
(ofs_nbits,
src,
max_len) = struct.unpack_from(
cls._fmt_str, buf, 0)
start = ofs_nbits >> 6
end = (ofs_nbits & 0x3f) + start
return cls(start,
end,
src,
max_len)
def serialize_body(self):
data = bytearray()
ofs_nbits = (self.start << 6) + (self.end - self.start)
msg_pack_into(self._fmt_str, data, 0,
ofs_nbits,
self.src,
self.max_len)
return data
class NXActionLearn(NXAction):
_subtype = nicira_ext.NXAST_LEARN
@ -291,7 +496,7 @@ def generate(ofp_name, ofpp_name):
self.specs = specs
@classmethod
def parse(cls, buf):
def parser(cls, buf):
(idle_timeout,
hard_timeout,
priority,
@ -300,8 +505,8 @@ def generate(ofp_name, ofpp_name):
table_id,
fin_idle_timeout,
fin_hard_timeout,) = struct.unpack_from(
NXActionLearn._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionLearn._fmt_str):]
cls._fmt_str, buf, 0)
rest = buf[struct.calcsize(cls._fmt_str):]
# specs
specs = []
while len(rest) > 0:
@ -319,10 +524,10 @@ def generate(ofp_name, ofpp_name):
fin_hard_timeout=fin_hard_timeout,
specs=specs)
def serialize(self, buf, offset):
def serialize_body(self):
# fixup
data = bytearray()
msg_pack_into(NXActionLearn._fmt_str, data, 0,
msg_pack_into(self._fmt_str, data, 0,
self.idle_timeout,
self.hard_timeout,
self.priority,
@ -333,14 +538,88 @@ def generate(ofp_name, ofpp_name):
self.fin_hard_timeout)
for spec in self.specs:
data += spec.serialize()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionLearn, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
return data
class NXActionExit(NXAction):
_subtype = nicira_ext.NXAST_EXIT
_fmt_str = '!6x'
def __init__(self,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionExit, self).__init__()
@classmethod
def parser(cls, buf):
return cls()
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0)
return data
class NXActionController(NXAction):
_subtype = nicira_ext.NXAST_CONTROLLER
# max_len, controller_id, reason
_fmt_str = '!HHBx'
def __init__(self,
max_len,
controller_id,
reason,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionController, self).__init__()
self.max_len = max_len
self.controller_id = controller_id
self.reason = reason
@classmethod
def parser(cls, buf):
(max_len,
controller_id,
reason) = struct.unpack_from(
cls._fmt_str, buf)
return cls(max_len,
controller_id,
reason)
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
self.max_len,
self.controller_id,
self.reason)
return data
class NXActionFinTimeout(NXAction):
_subtype = nicira_ext.NXAST_FIN_TIMEOUT
# fin_idle_timeout, fin_hard_timeout
_fmt_str = '!HH2x'
def __init__(self,
fin_idle_timeout,
fin_hard_timeout,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionFinTimeout, self).__init__()
self.fin_idle_timeout = fin_idle_timeout
self.fin_hard_timeout = fin_hard_timeout
@classmethod
def parser(cls, buf):
(fin_idle_timeout,
fin_hard_timeout) = struct.unpack_from(
cls._fmt_str, buf, 0)
return cls(fin_idle_timeout,
fin_hard_timeout)
def serialize_body(self):
data = bytearray()
msg_pack_into(self._fmt_str, data, 0,
self.fin_idle_timeout,
self.fin_hard_timeout)
return data
class NXActionConjunction(NXAction):
_subtype = nicira_ext.NXAST_CONJUNCTION
@ -359,67 +638,172 @@ def generate(ofp_name, ofpp_name):
self.id = id_
@classmethod
def parse(cls, buf):
def parser(cls, buf):
(clause,
n_clauses,
id_,) = struct.unpack_from(
NXActionConjunction._fmt_str, buf, 0)
cls._fmt_str, buf, 0)
return cls(clause, n_clauses, id_)
def serialize(self, buf, offset):
def serialize_body(self):
data = bytearray()
msg_pack_into(NXActionConjunction._fmt_str, data, 0,
msg_pack_into(self._fmt_str, data, 0,
self.clause,
self.n_clauses,
self.id)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionConjunction, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
return data
class NXActionResubmitTable(NXAction):
_subtype = nicira_ext.NXAST_RESUBMIT_TABLE
class NXActionMultipath(NXAction):
_subtype = nicira_ext.NXAST_MULTIPATH
# in_port, table_id
_fmt_str = '!HB3x'
# fields, basis, algorithm, max_link,
# arg, ofs_nbits, dst
_fmt_str = '!HH2xHHI2xHI'
def __init__(self,
in_port,
table_id,
fields,
basis,
algorithm,
max_link,
arg,
start,
end,
dst,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionResubmitTable, self).__init__()
self.in_port = in_port
self.table_id = table_id
super(NXActionMultipath, self).__init__()
self.fields = fields
self.basis = basis
self.algorithm = algorithm
self.max_link = max_link
self.arg = arg
self.start = start
self.end = end
self.dst = dst
@classmethod
def parse(cls, buf):
(in_port,
table_id) = struct.unpack_from(
NXActionResubmitTable._fmt_str, buf, 0)
return cls(in_port, table_id)
def parser(cls, buf):
(fields,
basis,
algorithm,
max_link,
arg,
ofs_nbits,
dst) = struct.unpack_from(
cls._fmt_str, buf, 0)
start = ofs_nbits >> 6
end = (ofs_nbits & 0x3f) + start
return cls(fields,
basis,
algorithm,
max_link,
arg,
start,
end,
dst)
def serialize(self, buf, offset):
def serialize_body(self):
ofs_nbits = (self.start << 6) + (self.end - self.start)
data = bytearray()
msg_pack_into(NXActionResubmitTable._fmt_str, data, 0,
self.in_port,
self.table_id)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionResubmitTable, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
msg_pack_into(self._fmt_str, data, 0,
self.fields,
self.basis,
self.algorithm,
self.max_link,
self.arg,
ofs_nbits,
self.dst)
return data
class _NXActionBundleBase(NXAction):
# algorithm, fields, basis, slave_type, n_slaves
# ofs_nbits, dst, slaves
_fmt_str = '!HHHIHHI4x'
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
start, end, dst, slaves):
super(_NXActionBundleBase, self).__init__()
self.len = utils.round_up(
nicira_ext.NX_ACTION_BUNDLE_0_SIZE + len(slaves) * 2, 8)
self.algorithm = algorithm
self.fields = fields
self.basis = basis
self.slave_type = slave_type
self.n_slaves = n_slaves
self.start = start
self.end = end
self.dst = dst
assert isinstance(slaves, (list, tuple))
for s in slaves:
assert isinstance(s, six.integer_types)
self.slaves = slaves
@classmethod
def parser(cls, buf):
(algorithm, fields, basis,
slave_type, n_slaves, ofs_nbits, dst) = struct.unpack_from(
cls._fmt_str, buf, 0)
start = ofs_nbits >> 6
end = (ofs_nbits & 0x3f) + start
slave_offset = (nicira_ext.NX_ACTION_BUNDLE_0_SIZE -
nicira_ext.NX_ACTION_HEADER_0_SIZE)
slaves = []
for i in range(0, n_slaves):
s = struct.unpack_from('!H', buf, slave_offset)
slaves.append(s[0])
slave_offset += 2
return cls(algorithm, fields, basis, slave_type,
n_slaves, start, end, dst, slaves)
def serialize_body(self):
ofs_nbits = (self.start << 6) + (self.end - self.start)
data = bytearray()
slave_offset = (nicira_ext.NX_ACTION_BUNDLE_0_SIZE -
nicira_ext.NX_ACTION_HEADER_0_SIZE)
self.n_slaves = len(self.slaves)
for s in self.slaves:
msg_pack_into('!H', data, slave_offset, s)
slave_offset += 2
pad_len = (utils.round_up(self.n_slaves, 4) -
self.n_slaves)
if pad_len != 0:
msg_pack_into('%dx' % pad_len * 2, data, slave_offset)
msg_pack_into(self._fmt_str, data, 0,
self.algorithm, self.fields, self.basis,
self.slave_type, self.n_slaves,
ofs_nbits, self.dst)
return data
class NXActionBundle(_NXActionBundleBase):
_subtype = nicira_ext.NXAST_BUNDLE
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
start, end, dst, slaves):
# NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed.
super(NXActionBundle, self).__init__(
algorithm, fields, basis, slave_type, n_slaves,
start=0, end=0, dst=0, slaves=slaves)
class NXActionBundleLoad(_NXActionBundleBase):
_subtype = nicira_ext.NXAST_BUNDLE_LOAD
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
start, end, dst, slaves):
super(NXActionBundleLoad, self).__init__(
algorithm, fields, basis, slave_type, n_slaves,
start, end, dst, slaves)
class NXActionCT(NXAction):
_subtype = nicira_ext.NXAST_CT
# flags, zone_src, zone_ofs_nbits (zone_imm), recirc_table,
# flags, zone_src, zone_ofs_nbits, recirc_table,
# pad, alg
_fmt_str = '!HIHB3xH'
# Followed by actions
@ -427,7 +811,8 @@ def generate(ofp_name, ofpp_name):
def __init__(self,
flags,
zone_src,
zone_ofs_nbits, # is zone_imm if zone_src == 0
zone_start,
zone_end,
recirc_table,
alg,
actions,
@ -435,20 +820,23 @@ def generate(ofp_name, ofpp_name):
super(NXActionCT, self).__init__()
self.flags = flags
self.zone_src = zone_src
self.zone_ofs_nbits = zone_ofs_nbits
self.zone_start = zone_start
self.zone_end = zone_end
self.recirc_table = recirc_table
self.alg = alg
self.actions = actions
@classmethod
def parse(cls, buf):
def parser(cls, buf):
(flags,
zone_src,
zone_ofs_nbits,
recirc_table,
alg,) = struct.unpack_from(
NXActionCT._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionCT._fmt_str):]
cls._fmt_str, buf, 0)
zone_start = zone_ofs_nbits >> 6
zone_end = (zone_ofs_nbits & 0x3f) + zone_start
rest = buf[struct.calcsize(cls._fmt_str):]
# actions
actions = []
while len(rest) > 0:
@ -456,27 +844,22 @@ def generate(ofp_name, ofpp_name):
actions.append(action)
rest = rest[action.len:]
return cls(flags, zone_src, zone_ofs_nbits, recirc_table,
return cls(flags, zone_src, zone_start, zone_end, recirc_table,
alg, actions)
def serialize(self, buf, offset):
def serialize_body(self):
zone_ofs_nbits = ((self.zone_start << 6) +
(self.zone_end - self.zone_start))
data = bytearray()
msg_pack_into(NXActionCT._fmt_str, data, 0,
msg_pack_into(self._fmt_str, data, 0,
self.flags,
self.zone_src,
self.zone_ofs_nbits,
zone_ofs_nbits,
self.recirc_table,
self.alg)
for a in self.actions:
a.serialize(data, len(data))
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionCT, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
return data
class NXActionNAT(NXAction):
_subtype = nicira_ext.NXAST_NAT
@ -513,11 +896,11 @@ def generate(ofp_name, ofpp_name):
self.range_proto_max = range_proto_max
@classmethod
def parse(cls, buf):
def parser(cls, buf):
(flags,
range_present) = struct.unpack_from(
NXActionNAT._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionNAT._fmt_str):]
cls._fmt_str, buf, 0)
rest = buf[struct.calcsize(cls._fmt_str):]
# optional parameters
kwargs = dict()
if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN:
@ -534,15 +917,15 @@ def generate(ofp_name, ofpp_name):
kwargs['range_ipv6_max'] = (
type_desc.IPv6Addr.to_user(rest[:16]))
rest = rest[16:]
if range_present & NX_NAT_RANGE_PROTO_MIN:
if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN:
kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2])
rest = rest[2:]
if range_present & NX_NAT_RANGE_PROTO_MAX:
if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX:
kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2])
return cls(flags, **kwargs)
def serialize(self, buf, offset):
def serialize_body(self):
# Pack optional parameters first, as range_present needs
# to be calculated.
optional_data = b''
@ -573,20 +956,13 @@ def generate(ofp_name, ofpp_name):
self.range_proto_max)
data = bytearray()
msg_pack_into(NXActionNAT._fmt_str, data, 0,
msg_pack_into(self._fmt_str, data, 0,
self.flags,
range_present)
msg_pack_into('!%ds' % len(optional_data), data, len(data),
optional_data)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionNAT, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
return data
def add_attr(k, v):
v.__module__ = ofpp.__name__ # Necessary for stringify stuff
@ -596,10 +972,23 @@ def generate(ofp_name, ofpp_name):
add_attr('NXActionUnknown', NXActionUnknown)
classes = [
'NXActionPopQueue',
'NXActionRegLoad',
'NXActionNote',
'NXActionSetTunnel',
'NXActionSetTunnel64',
'NXActionRegMove',
'NXActionLearn',
'NXActionConjunction',
'NXActionResubmit',
'NXActionResubmitTable',
'NXActionOutputReg',
'NXActionLearn',
'NXActionExit',
'NXActionController',
'NXActionFinTimeout',
'NXActionConjunction',
'NXActionMultipath',
'NXActionBundle',
'NXActionBundleLoad',
'NXActionCT',
'NXActionNAT',
'_NXFlowSpec', # exported for testing

View File

@ -16,16 +16,15 @@
# limitations under the License.
import struct
import sys
from ryu import exception
from ryu.lib import mac
from ryu.lib import type_desc
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto import ether
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import inet
from ryu.ofproto import oxm_fields
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
@ -94,6 +93,7 @@ class Flow(ofproto_parser.StringifyMixin):
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
self.pkt_mark = 0
self.tcp_flags = 0
class FlowWildcards(ofproto_parser.StringifyMixin):
@ -116,6 +116,7 @@ class FlowWildcards(ofproto_parser.StringifyMixin):
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
self.pkt_mark_mask = 0
self.tcp_flags_mask = 0
class ClsRule(ofproto_parser.StringifyMixin):
@ -312,6 +313,10 @@ class ClsRule(ofproto_parser.StringifyMixin):
self.flow.pkt_mark = pkt_mark
self.wc.pkt_mark_mask = mask
def set_tcp_flags(self, tcp_flags, mask):
self.flow.tcp_flags = tcp_flags
self.wc.tcp_flags_mask = mask
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
@ -332,6 +337,9 @@ class ClsRule(ofproto_parser.StringifyMixin):
if self.wc.regs_bits > 0:
return ofproto_v1_0.NXFF_NXM
if self.flow.tcp_flags > 0:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
@ -948,6 +956,19 @@ class MFPktMark(MFField):
rule.wc.pkt_mark_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TCP_FLAGS,
ofproto_v1_0.NXM_NX_TCP_FLAGS_W])
class MFTcpFlags(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tcp_flags,
rule.wc.tcp_flags_mask)
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
@ -1029,6 +1050,22 @@ def serialize_nxm_match(rule, buf, offset):
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tcp_flags != 0:
# TCP Flags can only be used if the ethernet type is IPv4 or IPv6
if rule.flow.dl_type in (ether.ETH_TYPE_IP, ether.ETH_TYPE_IPV6):
# TCP Flags can only be used if the ip protocol is TCP
if rule.flow.nw_proto == inet.IPPROTO_TCP:
if rule.wc.tcp_flags_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS
else:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS_W
else:
header = 0
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
@ -1189,65 +1226,3 @@ class NXMatch(object):
msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
#
# The followings are implementations for OpenFlow 1.2+
#
sys.modules[__name__].__doc__ = """
The API of this class is the same as ``OFPMatch``.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==================================
Argument Value Description
================ =============== ==================================
eth_dst_nxm MAC address Ethernet destination address.
eth_src_nxm MAC address Ethernet source address.
tunnel_id_nxm Integer 64bit Tunnel identifier.
tun_ipv4_src IPv4 address Tunnel IPv4 source address.
tun_ipv4_dst IPv4 address Tunnel IPv4 destination address.
pkt_mark Integer 32bit Packet metadata mark.
conj_id Integer 32bit Conjunction ID used only with
the conjunction action
ct_state Integer 32bit Conntrack state.
ct_zone Integer 16bit Conntrack zone.
ct_mark Integer 32bit Conntrack mark.
ct_label Integer 128bit Conntrack label.
_dp_hash Integer 32bit Flow hash computed in Datapath.
reg<idx> Integer 32bit Packet register.
<idx> is register number 0-7.
================ =============== ==================================
"""
oxm_types = [
oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr),
oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4),
oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2),
oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16),
# The following definition is merely for testing 64-bit experimenter OXMs.
# Following Open vSwitch, we use dp_hash for this purpose.
# Prefix the name with '_' to indicate this is not intended to be used
# in wild.
oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4),
# Support for matching/setting NX registers 0-7
oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4),
oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4),
oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4),
oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4),
oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4),
oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4),
oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4),
oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4),
]

View File

@ -21,10 +21,13 @@ OFP_HEADER_PACK_STR = '!BBHI'
OFP_HEADER_SIZE = 8
assert calcsize(OFP_HEADER_PACK_STR) == OFP_HEADER_SIZE
# note: while IANA assigned port number for OpenFlow is 6653,
# 6633 is (still) the defacto standard.
OFP_TCP_PORT = 6633
OFP_SSL_PORT = 6633
# Note: IANA assigned port number for OpenFlow is 6653
# from OpenFlow 1.3.3 (EXT-133).
# Some applications may still use 6633 as the de facto standard though.
OFP_TCP_PORT = 6653
OFP_SSL_PORT = 6653
OFP_TCP_PORT_OLD = 6633
OFP_SSL_PORT_OLD = 6633
# Vendor/Experimenter IDs
# https://rs.opennetworking.org/wiki/display/PUBLIC/ONF+Registry

View File

@ -170,7 +170,7 @@ class MsgBase(StringifyMixin):
def __str__(self):
def hexify(x):
return hex(x) if isinstance(x, int) else x
return hex(x) if isinstance(x, six.integer_types) else x
buf = 'version=%s,msg_type=%s,msg_len=%s,xid=%s,' %\
(hexify(self.version), hexify(self.msg_type),
hexify(self.msg_len), hexify(self.xid))

View File

@ -18,10 +18,8 @@
OpenFlow 1.0 definitions.
"""
from struct import calcsize
from ryu.ofproto import ofproto_utils
from ryu.ofproto.nicira_ext import * # For API compat
MAX_XID = 0xffffffff
@ -227,6 +225,8 @@ OFP_ACTION_VENDOR_HEADER_PACK_STR = '!HHI'
OFP_ACTION_VENDOR_HEADER_SIZE = 8
assert (calcsize(OFP_ACTION_VENDOR_HEADER_PACK_STR) ==
OFP_ACTION_VENDOR_HEADER_SIZE)
# OpenFlow1.2 or later compatible
OFP_ACTION_EXPERIMENTER_HEADER_SIZE = OFP_ACTION_VENDOR_HEADER_SIZE
OFP_ACTION_HEADER_PACK_STR = '!HH4x'
OFP_ACTION_HEADER_SIZE = 8
@ -496,107 +496,37 @@ OFP_QUEUE_PROP_MIN_RATE_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_MIN_RATE_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE == OFP_QUEUE_PROP_MIN_RATE_SIZE)
# OXM
# enum ofp_oxm_class
OFPXMC_OPENFLOW_BASIC = 0x8000 # Basic class for OpenFlow
def _oxm_tlv_header(class_, field, hasmask, length):
return (class_ << 16) | (field << 9) | (hasmask << 8) | length
def oxm_tlv_header(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 0, length)
def oxm_tlv_header_w(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 1, length * 2)
def oxm_tlv_header_extract_hasmask(header):
return (header >> 8) & 1
def oxm_tlv_header_extract_length(header):
if oxm_tlv_header_extract_hasmask(header):
length = (header & 0xff) // 2
else:
length = header & 0xff
return length
oxm_fields.generate(__name__)
# generate utility methods
ofproto_utils.generate(__name__)
def nxm_header__(vendor, field, hasmask, length):
return (vendor << 16) | (field << 9) | (hasmask << 8) | length
def nxm_header(vendor, field, length):
return nxm_header__(vendor, field, 0, length)
def nxm_header_w(vendor, field, length):
return nxm_header__(vendor, field, 1, (length) * 2)
NXM_OF_IN_PORT = nxm_header(0x0000, 0, 2)
NXM_OF_ETH_DST = nxm_header(0x0000, 1, 6)
NXM_OF_ETH_DST_W = nxm_header_w(0x0000, 1, 6)
NXM_OF_ETH_SRC = nxm_header(0x0000, 2, 6)
NXM_OF_ETH_SRC_W = nxm_header_w(0x0000, 2, 6)
NXM_OF_ETH_TYPE = nxm_header(0x0000, 3, 2)
NXM_OF_VLAN_TCI = nxm_header(0x0000, 4, 2)
NXM_OF_VLAN_TCI_W = nxm_header_w(0x0000, 4, 2)
NXM_OF_IP_TOS = nxm_header(0x0000, 5, 1)
NXM_OF_IP_PROTO = nxm_header(0x0000, 6, 1)
NXM_OF_IP_SRC = nxm_header(0x0000, 7, 4)
NXM_OF_IP_SRC_W = nxm_header_w(0x0000, 7, 4)
NXM_OF_IP_DST = nxm_header(0x0000, 8, 4)
NXM_OF_IP_DST_W = nxm_header_w(0x0000, 8, 4)
NXM_OF_TCP_SRC = nxm_header(0x0000, 9, 2)
NXM_OF_TCP_SRC_W = nxm_header_w(0x0000, 9, 2)
NXM_OF_TCP_DST = nxm_header(0x0000, 10, 2)
NXM_OF_TCP_DST_W = nxm_header_w(0x0000, 10, 2)
NXM_OF_UDP_SRC = nxm_header(0x0000, 11, 2)
NXM_OF_UDP_SRC_W = nxm_header_w(0x0000, 11, 2)
NXM_OF_UDP_DST = nxm_header(0x0000, 12, 2)
NXM_OF_UDP_DST_W = nxm_header_w(0x0000, 12, 2)
NXM_OF_ICMP_TYPE = nxm_header(0x0000, 13, 1)
NXM_OF_ICMP_CODE = nxm_header(0x0000, 14, 1)
NXM_OF_ARP_OP = nxm_header(0x0000, 15, 2)
NXM_OF_ARP_SPA = nxm_header(0x0000, 16, 4)
NXM_OF_ARP_SPA_W = nxm_header_w(0x0000, 16, 4)
NXM_OF_ARP_TPA = nxm_header(0x0000, 17, 4)
NXM_OF_ARP_TPA_W = nxm_header_w(0x0000, 17, 4)
NXM_NX_TUN_ID = nxm_header(0x0001, 16, 8)
NXM_NX_TUN_ID_W = nxm_header_w(0x0001, 16, 8)
NXM_NX_TUN_IPV4_SRC = nxm_header(0x0001, 31, 4)
NXM_NX_TUN_IPV4_SRC_W = nxm_header_w(0x0001, 31, 4)
NXM_NX_TUN_IPV4_DST = nxm_header(0x0001, 32, 4)
NXM_NX_TUN_IPV4_DST_W = nxm_header_w(0x0001, 32, 4)
NXM_NX_ARP_SHA = nxm_header(0x0001, 17, 6)
NXM_NX_ARP_THA = nxm_header(0x0001, 18, 6)
NXM_NX_IPV6_SRC = nxm_header(0x0001, 19, 16)
NXM_NX_IPV6_SRC_W = nxm_header_w(0x0001, 19, 16)
NXM_NX_IPV6_DST = nxm_header(0x0001, 20, 16)
NXM_NX_IPV6_DST_W = nxm_header_w(0x0001, 20, 16)
NXM_NX_ICMPV6_TYPE = nxm_header(0x0001, 21, 1)
NXM_NX_ICMPV6_CODE = nxm_header(0x0001, 22, 1)
NXM_NX_ND_TARGET = nxm_header(0x0001, 23, 16)
NXM_NX_ND_TARGET_W = nxm_header_w(0x0001, 23, 16)
NXM_NX_ND_SLL = nxm_header(0x0001, 24, 6)
NXM_NX_ND_TLL = nxm_header(0x0001, 25, 6)
NXM_NX_IP_FRAG = nxm_header(0x0001, 26, 1)
NXM_NX_IP_FRAG_W = nxm_header_w(0x0001, 26, 1)
NXM_NX_IPV6_LABEL = nxm_header(0x0001, 27, 4)
NXM_NX_IP_ECN = nxm_header(0x0001, 28, 1)
NXM_NX_IP_TTL = nxm_header(0x0001, 29, 1)
NXM_NX_PKT_MARK = nxm_header(0x0001, 33, 4)
NXM_NX_PKT_MARK_W = nxm_header_w(0x0001, 33, 4)
def nxm_nx_reg(idx):
return nxm_header(0x0001, idx, 4)
def nxm_nx_reg_w(idx):
return nxm_header_w(0x0001, idx, 4)
NXM_HEADER_PACK_STRING = '!I'
from ryu.ofproto.nicira_ext import * # For API compat

View File

@ -28,10 +28,11 @@ from ryu.lib import addrconv
from ryu.lib import ip
from ryu.lib import mac
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto import nx_match
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_v1_0 as ofproto
from ryu.ofproto import nx_match
from ryu.ofproto import nx_actions
from ryu import utils
import logging
@ -215,7 +216,8 @@ class OFPMatch(StringifyMixin):
self.dl_src = mac.DONTCARE
else:
wc &= ~ofproto.OFPFW_DL_SRC
if isinstance(dl_src, (six.text_type, str)) and netaddr.valid_mac(dl_src):
if (isinstance(dl_src, (six.text_type, str)) and
netaddr.valid_mac(dl_src)):
dl_src = addrconv.mac.text_to_bin(dl_src)
if dl_src == 0:
self.dl_src = mac.DONTCARE
@ -226,7 +228,8 @@ class OFPMatch(StringifyMixin):
self.dl_dst = mac.DONTCARE
else:
wc &= ~ofproto.OFPFW_DL_DST
if isinstance(dl_dst, (six.text_type, str)) and netaddr.valid_mac(dl_dst):
if (isinstance(dl_dst, (six.text_type, str)) and
netaddr.valid_mac(dl_dst)):
dl_dst = addrconv.mac.text_to_bin(dl_dst)
if dl_dst == 0:
self.dl_dst = mac.DONTCARE
@ -518,7 +521,8 @@ class OFPActionStripVlan(OFPAction):
class OFPActionDlAddr(OFPAction):
def __init__(self, dl_addr):
super(OFPActionDlAddr, self).__init__()
if isinstance(dl_addr, (six.text_type, str)) and netaddr.valid_mac(dl_addr):
if (isinstance(dl_addr, (six.text_type, str)) and
netaddr.valid_mac(dl_addr)):
dl_addr = addrconv.mac.text_to_bin(dl_addr)
self.dl_addr = dl_addr
@ -781,16 +785,63 @@ class OFPActionVendor(OFPAction):
return cls
return _register_action_vendor
def __init__(self):
def __init__(self, vendor=None):
super(OFPActionVendor, self).__init__()
self.vendor = self.cls_vendor
self.type = ofproto.OFPAT_VENDOR
self.len = None
if vendor is None:
self.vendor = self.cls_vendor
else:
self.vendor = vendor
@classmethod
def parser(cls, buf, offset):
type_, len_, vendor = struct.unpack_from(
ofproto.OFP_ACTION_VENDOR_HEADER_PACK_STR, buf, offset)
cls_ = cls._ACTION_VENDORS.get(vendor)
return cls_.parser(buf, offset)
data = buf[(offset + ofproto.OFP_ACTION_VENDOR_HEADER_SIZE
): offset + len_]
if vendor == ofproto_common.NX_EXPERIMENTER_ID:
obj = NXAction.parse(data) # noqa
else:
cls_ = cls._ACTION_VENDORS.get(vendor, None)
if cls_ is None:
obj = OFPActionVendorUnknown(vendor, data)
else:
obj = cls_.parser(buf, offset)
obj.len = len_
return obj
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_VENDOR_HEADER_PACK_STR,
buf, offset, self.type, self.len, self.vendor)
# OpenFlow1.2 or later compatible
OFPActionExperimenter = OFPActionVendor
class OFPActionVendorUnknown(OFPActionVendor):
def __init__(self, vendor, data=None, type_=None, len_=None):
super(OFPActionVendorUnknown,
self).__init__(vendor=vendor)
self.data = data
def serialize(self, buf, offset):
# fixup
data = self.data
if data is None:
data = bytearray()
self.len = (utils.round_up(len(data), 8) +
ofproto.OFP_ACTION_VENDOR_HEADER_SIZE)
super(OFPActionVendorUnknown, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(self.data),
buf,
offset + ofproto.OFP_ACTION_VENDOR_HEADER_SIZE,
self.data)
@OFPActionVendor.register_action_vendor(ofproto_common.NX_EXPERIMENTER_ID)
@ -822,476 +873,6 @@ class NXActionHeader(OFPActionVendor):
return cls_.parser(buf, offset)
class NXActionResubmitBase(NXActionHeader):
def __init__(self, in_port, table):
super(NXActionResubmitBase, self).__init__()
assert self.subtype in (ofproto.NXAST_RESUBMIT,
ofproto.NXAST_RESUBMIT_TABLE)
self.in_port = in_port
self.table = table
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.in_port, self.table)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_RESUBMIT, ofproto.NX_ACTION_RESUBMIT_SIZE)
class NXActionResubmit(NXActionResubmitBase):
def __init__(self, in_port=ofproto.OFPP_IN_PORT):
super(NXActionResubmit, self).__init__(in_port, 0)
@classmethod
def parser(cls, buf, offset):
type_, len_, vendor, subtype, in_port, table = struct.unpack_from(
ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset)
return cls(in_port)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_RESUBMIT_TABLE, ofproto.NX_ACTION_RESUBMIT_SIZE)
class NXActionResubmitTable(NXActionResubmitBase):
def __init__(self, in_port=ofproto.OFPP_IN_PORT, table=0xff):
super(NXActionResubmitTable, self).__init__(in_port, table)
@classmethod
def parser(cls, buf, offset):
type_, len_, vendor, subtype, in_port, table = struct.unpack_from(
ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset)
return cls(in_port, table)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_SET_TUNNEL, ofproto.NX_ACTION_SET_TUNNEL_SIZE)
class NXActionSetTunnel(NXActionHeader):
def __init__(self, tun_id):
super(NXActionSetTunnel, self).__init__()
self.tun_id = tun_id
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_SET_TUNNEL_PACK_STR, buf,
offset, self.type, self.len, self.vendor, self.subtype,
self.tun_id)
@classmethod
def parser(cls, buf, offset):
type_, len_, vendor, subtype, tun_id = struct.unpack_from(
ofproto.NX_ACTION_SET_TUNNEL_PACK_STR, buf, offset)
return cls(tun_id)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_SET_QUEUE, ofproto.NX_ACTION_SET_QUEUE_SIZE)
class NXActionSetQueue(NXActionHeader):
def __init__(self, queue_id):
super(NXActionSetQueue, self).__init__()
self.queue_id = queue_id
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_SET_QUEUE_PACK_STR, buf,
offset, self.type, self.len, self.vendor,
self.subtype, self.queue_id)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, queue_id) = struct.unpack_from(
ofproto.NX_ACTION_SET_QUEUE_PACK_STR, buf, offset)
return cls(queue_id)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_POP_QUEUE, ofproto.NX_ACTION_POP_QUEUE_SIZE)
class NXActionPopQueue(NXActionHeader):
def __init__(self):
super(NXActionPopQueue, self).__init__()
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_POP_QUEUE_PACK_STR, buf,
offset, self.type, self.len, self.vendor,
self.subtype)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype) = struct.unpack_from(
ofproto.NX_ACTION_POP_QUEUE_PACK_STR, buf, offset)
return cls()
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_REG_MOVE, ofproto.NX_ACTION_REG_MOVE_SIZE)
class NXActionRegMove(NXActionHeader):
def __init__(self, n_bits, src_ofs, dst_ofs, src, dst):
super(NXActionRegMove, self).__init__()
self.n_bits = n_bits
self.src_ofs = src_ofs
self.dst_ofs = dst_ofs
self.src = src
self.dst = dst
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_REG_MOVE_PACK_STR, buf,
offset, self.type, self.len, self.vendor,
self.subtype, self.n_bits, self.src_ofs, self.dst_ofs,
self.src, self.dst)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, n_bits, src_ofs, dst_ofs,
src, dst) = struct.unpack_from(
ofproto.NX_ACTION_REG_MOVE_PACK_STR, buf, offset)
return cls(n_bits, src_ofs, dst_ofs, src, dst)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_REG_LOAD, ofproto.NX_ACTION_REG_LOAD_SIZE)
class NXActionRegLoad(NXActionHeader):
def __init__(self, ofs_nbits, dst, value):
super(NXActionRegLoad, self).__init__()
self.ofs_nbits = ofs_nbits
self.dst = dst
self.value = value
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_REG_LOAD_PACK_STR, buf,
offset, self.type, self.len, self.vendor,
self.subtype, self.ofs_nbits, self.dst, self.value)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, ofs_nbits, dst,
value) = struct.unpack_from(
ofproto.NX_ACTION_REG_LOAD_PACK_STR, buf, offset)
return cls(ofs_nbits, dst, value)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_SET_TUNNEL64, ofproto.NX_ACTION_SET_TUNNEL64_SIZE)
class NXActionSetTunnel64(NXActionHeader):
def __init__(self, tun_id):
super(NXActionSetTunnel64, self).__init__()
self.tun_id = tun_id
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR, buf,
offset, self.type, self.len, self.vendor, self.subtype,
self.tun_id)
@classmethod
def parser(cls, buf, offset):
type_, len_, vendor, subtype, tun_id = struct.unpack_from(
ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR, buf, offset)
return cls(tun_id)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_MULTIPATH, ofproto.NX_ACTION_MULTIPATH_SIZE)
class NXActionMultipath(NXActionHeader):
def __init__(self, fields, basis, algorithm, max_link, arg,
ofs_nbits, dst):
super(NXActionMultipath, self).__init__()
self.fields = fields
self.basis = basis
self.algorithm = algorithm
self.max_link = max_link
self.arg = arg
self.ofs_nbits = ofs_nbits
self.dst = dst
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_MULTIPATH_PACK_STR, buf,
offset, self.type, self.len, self.vendor, self.subtype,
self.fields, self.basis, self.algorithm, self.max_link,
self.arg, self.ofs_nbits, self.dst)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, fields, basis, algorithm,
max_link, arg, ofs_nbits, dst) = struct.unpack_from(
ofproto.NX_ACTION_MULTIPATH_PACK_STR, buf, offset)
return cls(fields, basis, algorithm, max_link, arg, ofs_nbits,
dst)
@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_NOTE, 0)
class NXActionNote(NXActionHeader):
def __init__(self, note):
super(NXActionNote, self).__init__()
# should check here if the note is valid (only hex values)
pad = (len(note) + 10) % 8
if pad:
note += [0x0 for i in range(8 - pad)]
self.note = note
self.len = len(note) + 10
def serialize(self, buf, offset):
note = self.note
extra = None
extra_len = len(self.note) - 6
if extra_len > 0:
extra = note[6:]
note = note[0:6]
msg_pack_into(ofproto.NX_ACTION_NOTE_PACK_STR, buf,
offset, self.type, self.len, self.vendor, self.subtype,
*note)
if extra_len > 0:
msg_pack_into('B' * extra_len, buf,
offset + ofproto.NX_ACTION_NOTE_SIZE,
*extra)
@classmethod
def parser(cls, buf, offset):
note = struct.unpack_from(
ofproto.NX_ACTION_NOTE_PACK_STR, buf, offset)
(type_, len_, vendor, subtype) = note[0:4]
note = [i for i in note[4:]]
if len_ > ofproto.NX_ACTION_NOTE_SIZE:
note_start = offset + ofproto.NX_ACTION_NOTE_SIZE
note_end = note_start + len_ - ofproto.NX_ACTION_NOTE_SIZE
note += [int(binascii.b2a_hex(i), 16) for i
in buf[note_start:note_end]]
return cls(note)
class NXActionBundleBase(NXActionHeader):
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
ofs_nbits, dst, slaves):
super(NXActionBundleBase, self).__init__()
_len = ofproto.NX_ACTION_BUNDLE_SIZE + len(slaves) * 2
_len += (_len % 8)
self.len = _len
self.algorithm = algorithm
self.fields = fields
self.basis = basis
self.slave_type = slave_type
self.n_slaves = n_slaves
self.ofs_nbits = ofs_nbits
self.dst = dst
self.slaves = slaves
def serialize(self, buf, offset):
slave_offset = offset + ofproto.NX_ACTION_BUNDLE_SIZE
for s in self.slaves:
msg_pack_into('!H', buf, slave_offset, s)
slave_offset += 2
pad_len = (len(self.slaves) * 2 +
ofproto.NX_ACTION_BUNDLE_SIZE) % 8
if pad_len != 0:
msg_pack_into('%dx' % pad_len, buf, slave_offset)
msg_pack_into(ofproto.NX_ACTION_BUNDLE_PACK_STR, buf,
offset, self.type, self.len, self.vendor, self.subtype,
self.algorithm, self.fields, self.basis,
self.slave_type, self.n_slaves,
self.ofs_nbits, self.dst)
@classmethod
def parser(cls, action_cls, buf, offset):
(type_, len_, vendor, subtype, algorithm, fields, basis,
slave_type, n_slaves, ofs_nbits, dst) = struct.unpack_from(
ofproto.NX_ACTION_BUNDLE_PACK_STR, buf, offset)
slave_offset = offset + ofproto.NX_ACTION_BUNDLE_SIZE
slaves = []
for i in range(0, n_slaves):
s = struct.unpack_from('!H', buf, slave_offset)
slaves.append(s[0])
slave_offset += 2
return action_cls(algorithm, fields, basis, slave_type,
n_slaves, ofs_nbits, dst, slaves)
@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_BUNDLE, 0)
class NXActionBundle(NXActionBundleBase):
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
ofs_nbits, dst, slaves):
super(NXActionBundle, self).__init__(
algorithm, fields, basis, slave_type, n_slaves,
ofs_nbits, dst, slaves)
@classmethod
def parser(cls, buf, offset):
return NXActionBundleBase.parser(NXActionBundle, buf, offset)
@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_BUNDLE_LOAD, 0)
class NXActionBundleLoad(NXActionBundleBase):
def __init__(self, algorithm, fields, basis, slave_type, n_slaves,
ofs_nbits, dst, slaves):
super(NXActionBundleLoad, self).__init__(
algorithm, fields, basis, slave_type, n_slaves,
ofs_nbits, dst, slaves)
@classmethod
def parser(cls, buf, offset):
return NXActionBundleBase.parser(NXActionBundleLoad, buf, offset)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_AUTOPATH, ofproto.NX_ACTION_AUTOPATH_SIZE)
class NXActionAutopath(NXActionHeader):
def __init__(self, ofs_nbits, dst, id_):
super(NXActionAutopath, self).__init__()
self.ofs_nbits = ofs_nbits
self.dst = dst
self.id = id_
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_AUTOPATH_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.ofs_nbits, self.dst, self.id)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, ofs_nbits, dst,
id_) = struct.unpack_from(
ofproto.NX_ACTION_AUTOPATH_PACK_STR, buf, offset)
return cls(ofs_nbits, dst, id_)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_OUTPUT_REG, ofproto.NX_ACTION_OUTPUT_REG_SIZE)
class NXActionOutputReg(NXActionHeader):
def __init__(self, ofs_nbits, src, max_len):
super(NXActionOutputReg, self).__init__()
self.ofs_nbits = ofs_nbits
self.src = src
self.max_len = max_len
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_OUTPUT_REG_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.ofs_nbits, self.src, self.max_len)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, ofs_nbits, src,
max_len) = struct.unpack_from(
ofproto.NX_ACTION_OUTPUT_REG_PACK_STR, buf, offset)
return cls(ofs_nbits, src, max_len)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_EXIT, ofproto.NX_ACTION_HEADER_SIZE)
class NXActionExit(NXActionHeader):
def __init__(self):
super(NXActionExit, self).__init__()
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype) = struct.unpack_from(
ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_DEC_TTL, ofproto.NX_ACTION_HEADER_SIZE)
class NXActionDecTtl(NXActionHeader):
def __init__(self):
super(NXActionDecTtl, self).__init__()
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype) = struct.unpack_from(
ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_LEARN, 0)
class NXActionLearn(NXActionHeader):
def __init__(self, idle_timeout, hard_timeout, priority, cookie, flags,
table_id, fin_idle_timeout, fin_hard_timeout, spec):
super(NXActionLearn, self).__init__()
len_ = len(spec) + ofproto.NX_ACTION_LEARN_SIZE
pad_len = 8 - (len_ % 8)
self.len = len_ + pad_len
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.cookie = cookie
self.flags = flags
self.table_id = table_id
self.fin_idle_timeout = fin_idle_timeout
self.fin_hard_timeout = fin_hard_timeout
self.spec = spec + bytearray(b'\x00' * pad_len)
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_LEARN_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.idle_timeout, self.hard_timeout, self.priority,
self.cookie, self.flags, self.table_id,
self.fin_idle_timeout, self.fin_hard_timeout)
buf += self.spec
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, idle_timeout, hard_timeout, priority,
cookie, flags, table_id, fin_idle_timeout,
fin_hard_timeout) = struct.unpack_from(
ofproto.NX_ACTION_LEARN_PACK_STR, buf, offset)
spec = buf[offset + ofproto.NX_ACTION_LEARN_SIZE:]
return cls(idle_timeout, hard_timeout, priority,
cookie, flags, table_id, fin_idle_timeout,
fin_hard_timeout, spec)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_CONTROLLER, ofproto.NX_ACTION_CONTROLLER_SIZE)
class NXActionController(NXActionHeader):
def __init__(self, max_len, controller_id, reason):
super(NXActionController, self).__init__()
self.max_len = max_len
self.controller_id = controller_id
self.reason = reason
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_CONTROLLER_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.max_len, self.controller_id, self.reason, 0)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, max_len, controller_id, reason,
_zero) = struct.unpack_from(
ofproto.NX_ACTION_CONTROLLER_PACK_STR, buf, offset)
return cls(max_len, controller_id, reason)
@NXActionHeader.register_nx_action_subtype(
ofproto.NXAST_FIN_TIMEOUT, ofproto.NX_ACTION_FIN_TIMEOUT_SIZE)
class NXActionFinTimeout(NXActionHeader):
def __init__(self, fin_idle_timeout, fin_hard_timeout):
super(NXActionFinTimeout, self).__init__()
self.fin_idle_timeout = fin_idle_timeout
self.fin_hard_timeout = fin_hard_timeout
def serialize(self, buf, offset):
msg_pack_into(ofproto.NX_ACTION_FIN_TIMEOUT_PACK_STR, buf, offset,
self.type, self.len, self.vendor, self.subtype,
self.fin_idle_timeout, self.fin_hard_timeout)
@classmethod
def parser(cls, buf, offset):
(type_, len_, vendor, subtype, fin_idle_timeout,
fin_hard_timeout) = struct.unpack_from(
ofproto.NX_ACTION_FIN_TIMEOUT_PACK_STR, buf, offset)
return cls(fin_idle_timeout, fin_hard_timeout)
class OFPDescStats(ofproto_parser.namedtuple('OFPDescStats', (
'mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc'))):
@ -1958,8 +1539,8 @@ class NXTFlowRemoved(NiciraHeader):
idle_timeout, match_len,
packet_count, byte_count) = struct.unpack_from(
ofproto.NX_FLOW_REMOVED_PACK_STR, buf, offset)
offset += (ofproto.NX_FLOW_REMOVED_SIZE
- ofproto.NICIRA_HEADER_SIZE)
offset += (ofproto.NX_FLOW_REMOVED_SIZE -
ofproto.NICIRA_HEADER_SIZE)
match = nx_match.NXMatch.parser(buf, offset, match_len)
return cls(datapath, cookie, priority, reason, duration_sec,
duration_nsec, idle_timeout, match_len, packet_count,
@ -2000,8 +1581,8 @@ class NXTPacketIn(NiciraHeader):
cookie, match_len) = struct.unpack_from(
ofproto.NX_PACKET_IN_PACK_STR, buf, offset)
offset += (ofproto.NX_PACKET_IN_SIZE
- ofproto.NICIRA_HEADER_SIZE)
offset += (ofproto.NX_PACKET_IN_SIZE -
ofproto.NICIRA_HEADER_SIZE)
match = nx_match.NXMatch.parser(buf, offset, match_len)
offset += (match_len + 7) // 8 * 8
@ -3070,6 +2651,7 @@ class OFPPacketOut(MsgBase):
self.buffer_id, self.in_port, self._actions_len)
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
@ -3126,15 +2708,14 @@ class OFPFlowMod(MsgBase):
priority, buffer_id, out_port, flags, actions)
datapath.send_msg(req)
"""
def __init__(self, datapath, match, cookie, command,
def __init__(self, datapath, match=None, cookie=0,
command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
buffer_id=0xffffffff, out_port=ofproto.OFPP_NONE,
flags=0, actions=None):
if actions is None:
actions = []
super(OFPFlowMod, self).__init__(datapath)
self.match = match
self.match = OFPMatch() if match is None else match
self.cookie = cookie
self.command = command
self.idle_timeout = idle_timeout
@ -3143,7 +2724,7 @@ class OFPFlowMod(MsgBase):
self.buffer_id = buffer_id
self.out_port = out_port
self.flags = flags
self.actions = actions
self.actions = [] if actions is None else actions
def _serialize_body(self):
offset = ofproto.OFP_HEADER_SIZE
@ -3162,6 +2743,30 @@ class OFPFlowMod(MsgBase):
a.serialize(self.buf, offset)
offset += a.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
offset = ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parse(msg.buf, offset)
offset += ofproto.OFP_MATCH_SIZE
(msg.cookie, msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.flags) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, offset)
offset = ofproto.OFP_FLOW_MOD_SIZE
actions = []
while offset < msg_len:
a = OFPAction.parser(buf, offset)
actions.append(a)
offset += a.len
msg.actions = actions
return msg
@_set_msg_type(ofproto.OFPT_PORT_MOD)
class OFPPortMod(MsgBase):
@ -3619,3 +3224,9 @@ class NXAggregateStatsRequest(NXStatsRequest):
ofproto.NX_AGGREGATE_STATS_REQUEST_PACK_STR,
self.buf, ofproto.NX_STATS_MSG_SIZE, self.out_port,
self.match_len, self.table_id)
nx_actions.generate(
'ryu.ofproto.ofproto_v1_0',
'ryu.ofproto.ofproto_v1_0_parser'
)

View File

@ -19,7 +19,7 @@ OpenFlow 1.2 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import nx_match
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_utils
from ryu.ofproto import oxm_fields
@ -836,7 +836,7 @@ oxm_types = [
# EXT-233 Output match Extension
# NOTE(yamamoto): The spec says uint64_t but I assume it's an error.
oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4),
] + nx_match.oxm_types
] + nicira_ext.oxm_types
oxm_fields.generate(__name__)

View File

@ -862,6 +862,7 @@ class OFPPacketOut(MsgBase):
self.buffer_id, self.in_port, self.actions_len)
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
@ -971,6 +972,31 @@ class OFPFlowMod(MsgBase):
inst.serialize(self.buf, offset)
offset += inst.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id,
msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.out_group, msg.flags) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf,
ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while offset < msg_len:
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@ -1557,19 +1583,11 @@ class OFPActionSetField(OFPAction):
return not hasattr(self, 'value')
def to_jsondict(self):
# XXX old api compat
if self._composed_with_old_api():
# copy object first because serialize_old is destructive
o2 = OFPActionSetField(self.field)
# serialize and parse to fill new fields
buf = bytearray()
o2.serialize(buf, 0)
o = OFPActionSetField.parser(six.binary_type(buf), 0)
else:
o = self
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value)
'field': ofproto.oxm_to_jsondict(self.key, self.value),
"len": self.len,
"type": self.type
}
}

View File

@ -19,7 +19,7 @@ OpenFlow 1.3 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import nx_match
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_utils
from ryu.ofproto import oxm_fields
@ -1195,7 +1195,7 @@ oxm_types = [
# EXT-233 Output match Extension
# NOTE(yamamoto): The spec says uint64_t but I assume it's an error.
oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4),
] + nx_match.oxm_types
] + nicira_ext.oxm_types
oxm_fields.generate(__name__)

View File

@ -1686,7 +1686,6 @@ class OFPMatchField(StringifyMixin):
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if ofproto.oxm_tlv_header_extract_hasmask(header):
pack_str = '!' + cls.pack_str[1:] * 2
@ -2155,7 +2154,6 @@ class MTPbbIsid(OFPMatchField):
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if ofproto.oxm_tlv_header_extract_hasmask(header):
pack_str = '!' + cls.pack_str[1:] * 2
@ -2547,6 +2545,7 @@ class OFPPacketOut(MsgBase):
self.buffer_id, self.in_port, self.actions_len)
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
@ -2659,6 +2658,31 @@ class OFPFlowMod(MsgBase):
inst.serialize(self.buf, offset)
offset += inst.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id,
msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.out_group, msg.flags) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf,
ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while offset < msg_len:
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@ -3273,19 +3297,11 @@ class OFPActionSetField(OFPAction):
return not hasattr(self, 'value')
def to_jsondict(self):
# XXX old api compat
if self._composed_with_old_api():
# copy object first because serialize_old is destructive
o2 = OFPActionSetField(self.field)
# serialize and parse to fill new fields
buf = bytearray()
o2.serialize(buf, 0)
o = OFPActionSetField.parser(six.binary_type(buf), 0)
else:
o = self
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value)
'field': ofproto.oxm_to_jsondict(self.key, self.value),
"len": self.len,
"type": self.type
}
}
@ -3402,7 +3418,7 @@ class OFPActionExperimenter(OFPAction):
data = buf[(offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE
): offset + len_]
if experimenter == ofproto_common.NX_EXPERIMENTER_ID:
obj = NXAction.parse(data)
obj = NXAction.parse(data) # noqa
else:
obj = OFPActionExperimenterUnknown(experimenter, data)
obj.len = len_
@ -5137,7 +5153,8 @@ class OFPInstructionId(StringifyMixin):
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
(type_, len_,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
@ -5190,7 +5207,8 @@ class OFPTableFeaturePropNextTables(OFPTableFeatureProp):
rest = cls.get_rest(buf)
ids = []
while rest:
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, six.binary_type(rest), 0)
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR,
six.binary_type(rest), 0)
rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):]
ids.append(i)
return cls(table_ids=ids)
@ -5223,7 +5241,8 @@ class OFPActionId(StringifyMixin):
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
(type_, len_,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
@ -6119,12 +6138,13 @@ class OFPSetAsync(MsgBase):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
packet_in_mask = ofp.OFPR_ACTION | ofp.OFPR_INVALID_TTL
port_status_mask = (ofp.OFPPR_ADD | ofp.OFPPR_DELETE |
ofp.OFPPR_MODIFY)
flow_removed_mask = (ofp.OFPRR_IDLE_TIMEOUT |
ofp.OFPRR_HARD_TIMEOUT |
ofp.OFPRR_DELETE)
packet_in_mask = 1 << ofp.OFPR_ACTION | 1 << ofp.OFPR_INVALID_TTL
port_status_mask = (1 << ofp.OFPPR_ADD
| 1 << ofp.OFPPR_DELETE
| 1 << ofp.OFPPR_MODIFY)
flow_removed_mask = (1 << ofp.OFPRR_IDLE_TIMEOUT
| 1 << ofp.OFPRR_HARD_TIMEOUT
| 1 << ofp.OFPRR_DELETE)
req = ofp_parser.OFPSetAsync(datapath,
[packet_in_mask, 0],
[port_status_mask, 0],

View File

@ -19,7 +19,7 @@ OpenFlow 1.4 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import nx_match
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_utils
from ryu.ofproto import oxm_fields
@ -396,7 +396,7 @@ oxm_types = [
# EXT-233 Output match Extension
# NOTE(yamamoto): The spec says uint64_t but I assume it's an error.
oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4),
] + nx_match.oxm_types
] + nicira_ext.oxm_types
oxm_fields.generate(__name__)
@ -1406,8 +1406,15 @@ OFPACPT_TABLE_STATUS_SLAVE = 8 # Table status mask for slave.
OFPACPT_TABLE_STATUS_MASTER = 9 # Table status mask for master.
OFPACPT_REQUESTFORWARD_SLAVE = 10 # RequestForward mask for slave.
OFPACPT_REQUESTFORWARD_MASTER = 11 # RequestForward mask for master.
OFPTFPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave.
OFPTFPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master.
OFPTFPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave (depracated).
OFPTFPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master (depracated).
# New or updated Ryu applications shall use
# OFPACPT_EXPERIMENTER_SLAVE and OFPACPT_EXPERIMENTER_MASTER.
# The variable name is a typo of in specifications before v1.5.0.
OFPACPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave.
OFPACPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master.
# Backporting from ofproto_v1_5 for consistency with
# later OF specs.
# struct ofp_async_config_prop_reasons
OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR = '!HHI'

View File

@ -24,7 +24,7 @@ import struct
from ryu.lib import addrconv
from ryu.lib.pack_utils import msg_pack_into
from ryu import utils
from ryu.ofproto.ofproto_parser import StringifyMixin, MsgBase, MsgInMsgBase, msg_str_attr
from ryu.ofproto.ofproto_parser import StringifyMixin, MsgBase, MsgInMsgBase
from ryu.ofproto import ether
from ryu.ofproto import nx_actions
from ryu.ofproto import ofproto_parser
@ -1783,7 +1783,8 @@ class OFPInstructionId(StringifyMixin):
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
(type_, len_,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
@ -1834,7 +1835,8 @@ class OFPActionId(StringifyMixin):
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0)
(type_, len_,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
@ -1889,7 +1891,8 @@ class OFPTableFeaturePropNextTables(OFPTableFeatureProp):
rest = cls.get_rest(buf)
ids = []
while rest:
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, six.binary_type(rest), 0)
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR,
six.binary_type(rest), 0)
rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):]
ids.append(i)
return cls(table_ids=ids)
@ -4210,6 +4213,7 @@ class OFPPacketOut(MsgBase):
self.buffer_id, self.in_port, self.actions_len)
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
@ -4326,6 +4330,31 @@ class OFPFlowMod(MsgBase):
inst.serialize(self.buf, offset)
offset += inst.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id,
msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.out_group, msg.flags, msg.importance) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf,
ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while offset < msg_len:
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@ -4908,7 +4937,9 @@ class OFPActionSetField(OFPAction):
def to_jsondict(self):
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value)
'field': ofproto.oxm_to_jsondict(self.key, self.value),
"len": self.len,
"type": self.type
}
}
@ -5002,7 +5033,7 @@ class OFPActionExperimenter(OFPAction):
data = buf[(offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE
): offset + len_]
if experimenter == ofproto_common.NX_EXPERIMENTER_ID:
obj = NXAction.parse(data)
obj = NXAction.parse(data) # noqa
else:
obj = OFPActionExperimenterUnknown(experimenter, data)
obj.len = len_
@ -5417,8 +5448,8 @@ class OFPAsyncConfigPropReasons(OFPAsyncConfigProp):
return buf
@OFPAsyncConfigProp.register_type(ofproto.OFPTFPT_EXPERIMENTER_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPTFPT_EXPERIMENTER_MASTER)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_SLAVE)
@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_MASTER)
class OFPAsyncConfigPropExperimenter(OFPPropCommonExperimenter4ByteData):
pass
@ -5505,13 +5536,11 @@ class OFPSetAsync(MsgBase):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
properties = [ofp_parser.OFPAsyncConfigPropReasons(
8, ofp_parser.OFPACPT_PACKET_IN_SLAVE,
(ofp_parser.OFPR_APPLY_ACTION |
ofp_parser.OFPR_INVALID_TTL)),
ofp_parser.OFPAsyncConfigPropExperimenter(
ofp.OFPTFPT_EXPERIMENTER_MASTER,
16, 100, 2, bytearray())]
properties = [
ofp_parser.OFPAsyncConfigPropReasons(
ofp.OFPACPT_PACKET_IN_SLAVE, 8,
(1 << ofp.OFPR_APPLY_ACTION
| 1 << ofp.OFPR_INVALID_TTL))]
req = ofp_parser.OFPSetAsync(datapath, properties)
datapath.send_msg(req)
"""

View File

@ -19,7 +19,7 @@ OpenFlow 1.5 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import nx_match
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_utils
from ryu.ofproto import oxm_fields
from ryu.ofproto import oxs_fields
@ -431,7 +431,7 @@ oxm_types = [
oxm_fields.OpenFlowBasic('tcp_flags', 42, type_desc.Int2),
oxm_fields.OpenFlowBasic('actset_output', 43, type_desc.Int4),
oxm_fields.OpenFlowBasic('packet_type', 44, type_desc.Int4),
] + nx_match.oxm_types
] + nicira_ext.oxm_types
oxm_fields.generate(__name__)

View File

@ -2814,7 +2814,6 @@ class OFPGroupDescStats(StringifyMixin):
self.length = length
self.type = type_
self.group_id = group_id
self.bucket_array_len = bucket_array_len
self.buckets = buckets
self.properties = properties
@ -3223,11 +3222,11 @@ class OFPMeterDescStats(StringifyMixin):
(meter_config.length, meter_config.flags,
meter_config.meter_id) = struct.unpack_from(
ofproto.OFP_METER_CONFIG_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_CONFIG_SIZE
ofproto.OFP_METER_DESC_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_DESC_SIZE
meter_config.bands = []
length = ofproto.OFP_METER_CONFIG_SIZE
length = ofproto.OFP_METER_DESC_SIZE
while length < meter_config.length:
band = OFPMeterBandHeader.parser(buf, offset)
meter_config.bands.append(band)
@ -3309,7 +3308,7 @@ class OFPMeterDescStatsReply(OFPMultipartReply):
class OFPMeterFeaturesStats(ofproto_parser.namedtuple('OFPMeterFeaturesStats',
('max_meter', 'band_types', 'capabilities',
'max_bands', 'max_color'))):
'max_bands', 'max_color', 'features'))):
@classmethod
def parser(cls, buf, offset):
meter_features = struct.unpack_from(
@ -5067,6 +5066,7 @@ class OFPPacketOut(MsgBase):
self.buffer_id, self.actions_len)
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
@ -5183,6 +5183,31 @@ class OFPFlowMod(MsgBase):
inst.serialize(self.buf, offset)
offset += inst.len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id,
msg.command, msg.idle_timeout, msg.hard_timeout,
msg.priority, msg.buffer_id, msg.out_port,
msg.out_group, msg.flags, msg.importance) = struct.unpack_from(
ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf,
ofproto.OFP_HEADER_SIZE)
offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while offset < msg_len:
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@ -5781,7 +5806,9 @@ class OFPActionSetField(OFPAction):
def to_jsondict(self):
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value)
'field': ofproto.oxm_to_jsondict(self.key, self.value),
"len": self.len,
"type": self.type
}
}
@ -5887,12 +5914,14 @@ class OFPActionCopyField(OFPAction):
return cls(n_bits, src_offset, dst_offset, oxm_ids, type_, len_)
def serialize(self, buf, offset):
oxm_ids_buf = bytearray()
for i in self.oxm_ids:
oxm_ids_buf += i.serialize()
self.len += len(oxm_ids_buf)
msg_pack_into(ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf,
offset, self.type, self.len,
self.n_bits, self.src_offset, self.dst_offset)
for i in self.oxm_ids:
buf += i.serialize()
buf += oxm_ids_buf
@OFPAction.register_action_type(ofproto.OFPAT_METER,
@ -6580,13 +6609,11 @@ class OFPSetAsync(MsgBase):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
properties = [ofp_parser.OFPAsyncConfigPropReasons(
8, ofp_parser.OFPACPT_PACKET_IN_SLAVE,
(ofp_parser.OFPR_APPLY_ACTION |
ofp_parser.OFPR_INVALID_TTL)),
ofp_parser.OFPAsyncConfigPropExperimenter(
ofp.OFPTFPT_EXPERIMENTER_MASTER,
16, 100, 2, bytearray())]
properties = [
ofp_parser.OFPAsyncConfigPropReasons(
ofp.OFPACPT_PACKET_IN_SLAVE, 8,
(1 << ofp.OFPR_APPLY_ACTION
| 1 << ofp.OFPR_INVALID_TTL))]
req = ofp_parser.OFPSetAsync(datapath, properties)
datapath.send_msg(req)
"""

View File

@ -82,7 +82,7 @@ def _get_field_info_by_number(oxx, num_to_field, n):
name = f.name
except KeyError:
t = type_desc.UnknownType
if isinstance(n, int):
if isinstance(n, six.integer_types):
name = 'field_%d' % (n,)
else:
raise KeyError('unknown %s field number: %s' % (oxx.upper(), n))

View File

@ -18,6 +18,8 @@
This API can be used by various services like RPC, CLI, IoC, etc.
"""
from __future__ import absolute_import
import inspect
import logging
import traceback
@ -208,7 +210,7 @@ def call(symbol, **kwargs):
LOG.info("API method %s called with args: %s", symbol, str(kwargs))
# TODO(PH, JK) improve the way api function modules are loaded
import all # noqa
from . import all # noqa
if not is_call_registered(symbol):
message = 'Did not find any method registered by symbol %s' % symbol
raise MethodNotFound(message)

View File

@ -14,10 +14,9 @@
# limitations under the License.
import json
from ryu.base import app_manager
from ryu.lib import hub
from ryu.app.wsgi import route, websocket, ControllerBase, WSGIApplication
from ryu.app.wsgi import websocket, ControllerBase, WSGIApplication
from ryu.app.wsgi import rpc_public, WebSocketRPCServer
from ryu.services.protocols.bgp.api.base import call
from ryu.services.protocols.bgp.api.base import PREFIX

View File

@ -18,7 +18,6 @@
import imp
import logging
import traceback
from os import path
from oslo_config import cfg
from ryu.lib import hub

View File

@ -15,12 +15,17 @@
"""
Defines some base class related to managing green threads.
"""
from __future__ import absolute_import
import abc
from collections import OrderedDict
import logging
import six
import socket
import time
import traceback
import weakref
import netaddr
from ryu.lib import hub
@ -38,12 +43,6 @@ from ryu.services.protocols.bgp.utils.evtlet import LoopingCall
# Logger instance for this module.
LOG = logging.getLogger('bgpspeaker.base')
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# Pointer to active/available OrderedDict.
OrderedDict = OrderedDict
@ -127,6 +126,7 @@ class ActivityException(BGPSException):
pass
@six.add_metaclass(abc.ABCMeta)
class Activity(object):
"""Base class for a thread of execution that provides some custom settings.
@ -135,7 +135,6 @@ class Activity(object):
to start another activity or greenthread. Activity is also holds pointers
to sockets that it or its child activities of threads have create.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None):
self._name = name
@ -367,7 +366,9 @@ class Activity(object):
sock.bind(sa)
sock.listen(50)
listen_sockets[sa] = sock
except socket.error:
except socket.error as e:
LOG.error('Error creating socket: %s', e)
if sock:
sock.close()
@ -454,7 +455,7 @@ class Sink(object):
self.index = Sink.next_index()
# Event used to signal enqueing.
from utils.evtlet import EventletIOFactory
from .utils.evtlet import EventletIOFactory
self.outgoing_msg_event = EventletIOFactory.create_custom_event()
self.messages_queued = 0

View File

@ -34,8 +34,6 @@ from ryu.services.protocols.bgp.rtconf.common \
import DEFAULT_REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common \
import DEFAULT_REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common \
import DEFAULT_BGP_CONN_RETRY_TIME
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
@ -57,8 +55,6 @@ from ryu.services.protocols.bgp.rtconf.neighbors \
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP
from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD
from ryu.services.protocols.bgp.rtconf.neighbors import IN_FILTER
from ryu.services.protocols.bgp.rtconf.neighbors import OUT_FILTER
from ryu.services.protocols.bgp.rtconf.neighbors import IS_ROUTE_SERVER_CLIENT
from ryu.services.protocols.bgp.rtconf.neighbors import IS_NEXT_HOP_SELF
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE
@ -244,7 +240,8 @@ class BGPSpeaker(object):
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None, is_route_server_client=False,
is_next_hop_self=False, local_address=None,
local_port=None, connect_mode=DEFAULT_CONNECT_MODE):
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE):
""" This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
@ -265,11 +262,14 @@ class BGPSpeaker(object):
``enable_vpnv6`` enables VPNv6 address family for this
neighbor. The default is False.
``enable_enhanced_refresh`` enable Enhanced Route Refresh for this
neighbor. The default is False.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authenticaiton is disabled.
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
The default is None and if not specified, MED value is
@ -284,17 +284,19 @@ class BGPSpeaker(object):
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``connect_mode`` specifies how to connect to this neighbor.
CONNECT_MODE_ACTIVE tries to connect from us.
CONNECT_MODE_PASSIVE just listens and wait for the connection.
CONNECT_MODE_BOTH use both methods.
The default is CONNECT_MODE_BOTH
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
The default is the AS number of BGPSpeaker instance.
``connect_mode`` specifies how to connect to this neighbor.
CONNECT_MODE_ACTIVE tries to connect from us.
CONNECT_MODE_PASSIVE just listens and wait for the connection.
CONNECT_MODE_BOTH use both methods.
The default is CONNECT_MODE_BOTH.
"""
bgp_neighbor = {}
bgp_neighbor[neighbors.IP_ADDRESS] = address
@ -332,6 +334,9 @@ class BGPSpeaker(object):
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor)
def neighbor_del(self, address):

View File

@ -17,15 +17,12 @@ from ryu.services.protocols.bgp.base import Activity
from ryu.lib import hub
from ryu.lib.packet import bmp
from ryu.lib.packet import bgp
from ryu.services.protocols.bgp import constants as const
import socket
import logging
from calendar import timegm
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.lib.packet.bgp import BGPUpdate
from ryu.lib.packet.bgp import BGPPathAttributeNextHop
from ryu.lib.packet.bgp import BGPPathAttributeMpReachNLRI
from ryu.lib.packet.bgp import BGPPathAttributeMpUnreachNLRI
LOG = logging.getLogger('bgpspeaker.bmp')
@ -82,9 +79,7 @@ class BMPClient(Activity):
if not self._socket:
return
assert isinstance(msg, bmp.BMPMessage)
serialized_msg = msg.serialize()
ret = self._socket.send(msg.serialize())
self._socket.send(msg.serialize())
def on_adj_rib_in_changed(self, data):
peer = data['peer']

View File

@ -472,7 +472,8 @@ class CoreService(Factory, Activity):
if (host, port) in self.bmpclients:
bmpclient = self.bmpclients[(host, port)]
if bmpclient.started:
LOG.warn("bmpclient is already running for %s:%s", host, port)
LOG.warning("bmpclient is already running for %s:%s",
host, port)
return False
bmpclient = BMPClient(self, host, port)
self.bmpclients[(host, port)] = bmpclient
@ -481,7 +482,7 @@ class CoreService(Factory, Activity):
def stop_bmp(self, host, port):
if (host, port) not in self.bmpclients:
LOG.warn("no bmpclient is running for %s:%s", host, port)
LOG.warning("no bmpclient is running for %s:%s", host, port)
return False
bmpclient = self.bmpclients[(host, port)]

View File

@ -13,10 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from configuration_manager import ConfigurationManager
from import_map_manager import ImportMapManager
from peer_manager import PeerManager
from table_manager import TableCoreManager
from .configuration_manager import ConfigurationManager
from .import_map_manager import ImportMapManager
from .peer_manager import PeerManager
from .table_manager import TableCoreManager
__all__ = ['ImportMapManager', 'TableCoreManager', 'PeerManager',
'ConfigurationManager']

View File

@ -7,10 +7,6 @@ from ryu.services.protocols.bgp.peer import Peer
from ryu.lib.packet.bgp import BGPPathAttributeCommunities
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_COMMUNITIES
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import RouteTargetMembershipNLRI
from ryu.services.protocols.bgp.utils.bgp \

View File

@ -445,8 +445,8 @@ class TableCoreManager(object):
# of the given path and import this path into them.
route_dist = vpn_path.nlri.route_dist
for vrf_table in interested_tables:
if not (vpn_path.source is None
and route_dist == vrf_table.vrf_conf.route_dist):
if (vpn_path.source is not None and
route_dist != vrf_table.vrf_conf.route_dist):
update_vrf_dest = vrf_table.import_vpn_path(vpn_path)
# Queue the destination for further processing.
if update_vrf_dest is not None:

View File

@ -23,7 +23,9 @@ from abc import ABCMeta
from abc import abstractmethod
from copy import copy
import logging
import functools
import netaddr
import six
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RouteTargetMembershipNLRI
@ -42,6 +44,7 @@ from ryu.services.protocols.bgp.processor import BPR_UNKNOWN
LOG = logging.getLogger('bgpspeaker.info_base.base')
@six.add_metaclass(ABCMeta)
class Table(object):
"""A container for holding information about destination/prefixes.
@ -49,7 +52,6 @@ class Table(object):
This is a base class which should be sub-classed for different route
family. A table can be uniquely identified by (Route Family, Scope Id).
"""
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
def __init__(self, scope_id, core_service, signal_bus):
@ -81,9 +83,6 @@ class Table(object):
raise NotImplementedError()
def values(self):
return self._destinations.values()
def itervalues(self):
return iter(self._destinations.values())
def insert(self, path):
@ -225,6 +224,10 @@ class NonVrfPathProcessingMixin(object):
because they are processed at VRF level, so different logic applies.
"""
def __init__(self):
self._core_service = None # not assigned yet
self._known_path_list = []
def _best_path_lost(self):
self._best_path = None
@ -249,8 +252,9 @@ class NonVrfPathProcessingMixin(object):
LOG.debug('New best path selected for destination %s', self)
# If old best path was withdrawn
if (old_best_path and old_best_path not in self._known_path_list
and self._sent_routes):
if (old_best_path and
old_best_path not in self._known_path_list and
self._sent_routes):
# Have to clear sent_route list for this destination as
# best path is removed.
self._sent_routes = {}
@ -272,6 +276,7 @@ class NonVrfPathProcessingMixin(object):
self._sent_routes = {}
@six.add_metaclass(ABCMeta)
class Destination(object):
"""State about a particular destination.
@ -279,7 +284,6 @@ class Destination(object):
a routing information base table *Table*.
"""
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
def __init__(self, table, nlri):
@ -662,13 +666,13 @@ class Destination(object):
return result
@six.add_metaclass(ABCMeta)
class Path(object):
"""Represents a way of reaching an IP destination.
Also contains other meta-data given to us by a specific source (such as a
peer).
"""
__metaclass__ = ABCMeta
__slots__ = ('_source', '_path_attr_map', '_nlri', '_source_version_num',
'_exported_from', '_nexthop', 'next_path', 'prev_path',
'_is_withdraw', 'med_set_by_target_neighbor')
@ -810,7 +814,7 @@ class Path(object):
return not interested_rts.isdisjoint(curr_rts)
def is_local(self):
return self._source == None
return self._source is None
def has_nexthop(self):
return not (not self._nexthop or self._nexthop == '0.0.0.0' or
@ -832,6 +836,7 @@ class Path(object):
self._path_attr_map, self._nexthop, self._is_withdraw))
@six.add_metaclass(ABCMeta)
class Filter(object):
"""Represents a general filter for in-bound and out-bound filter
@ -842,7 +847,6 @@ class Filter(object):
================ ==================================================
"""
__metaclass__ = ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
@ -880,6 +884,7 @@ class Filter(object):
raise NotImplementedError()
@functools.total_ordering
class PrefixFilter(Filter):
"""
used to specify a prefix for filter.
@ -934,8 +939,11 @@ class PrefixFilter(Filter):
self._ge = ge
self._le = le
def __cmp__(self, other):
return cmp(self.prefix, other.prefix)
def __lt__(self, other):
return self._network < other._network
def __eq__(self, other):
return self._network == other._network
def __repr__(self):
policy = 'PERMIT' \
@ -1009,6 +1017,7 @@ class PrefixFilter(Filter):
le=self._le)
@functools.total_ordering
class ASPathFilter(Filter):
"""
used to specify a prefix for AS_PATH attribute.
@ -1055,8 +1064,11 @@ class ASPathFilter(Filter):
super(ASPathFilter, self).__init__(policy)
self._as_number = as_number
def __cmp__(self, other):
return cmp(self.as_number, other.as_number)
def __lt__(self, other):
return self.as_number < other.as_number
def __eq__(self, other):
return self.as_number == other.as_number
def __repr__(self):
policy = 'TOP'
@ -1223,5 +1235,8 @@ class AttributeMap(object):
if self.attr_type == self.ATTR_LOCAL_PREF else None
filter_string = ','.join(repr(f) for f in self.filters)
return 'AttributeMap(filters=[%s],attribute_type=%s,attribute_value=%s)'\
% (filter_string, attr_type, self.attr_value)
return ('AttributeMap(filters=[%s],'
'attribute_type=%s,'
'attribute_value=%s)' % (filter_string,
attr_type,
self.attr_value))

View File

@ -19,6 +19,7 @@
import abc
import logging
import six
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
@ -55,8 +56,8 @@ class VpnTable(Table):
)
@six.add_metaclass(abc.ABCMeta)
class VpnPath(Path):
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = None
VRF_PATH_CLASS = None
NLRI_CLASS = None
@ -82,11 +83,10 @@ class VpnPath(Path):
return vrf_path
@six.add_metaclass(abc.ABCMeta)
class VpnDest(Destination, NonVrfPathProcessingMixin):
"""Base class for VPN destinations."""
__metaclass__ = abc.ABCMeta
def _best_path_lost(self):
old_best_path = self._best_path
NonVrfPathProcessingMixin._best_path_lost(self)

View File

@ -19,6 +19,7 @@
import abc
import logging
import six
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
@ -44,12 +45,12 @@ from ryu.services.protocols.bgp.utils.stats import RESOURCE_NAME
LOG = logging.getLogger('bgpspeaker.info_base.vrf')
@six.add_metaclass(abc.ABCMeta)
class VrfTable(Table):
"""Virtual Routing and Forwarding information base.
Keeps destination imported to given vrf in represents.
"""
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = None
VPN_ROUTE_FAMILY = None
NLRI_CLASS = None
@ -104,8 +105,8 @@ class VrfTable(Table):
local_route_count = 0
for dest in self.values():
for path in dest.known_path_list:
if (hasattr(path.source, 'version_num')
or path.source == VPN_TABLE):
if (hasattr(path.source, 'version_num') or
path.source == VPN_TABLE):
remote_route_count += 1
else:
local_route_count += 1
@ -273,9 +274,9 @@ class VrfTable(Table):
return super(VrfTable, self).clean_uninteresting_paths(interested_rts)
@six.add_metaclass(abc.ABCMeta)
class VrfDest(Destination):
"""Base class for VRF destination."""
__metaclass__ = abc.ABCMeta
def __init__(self, table, nlri):
super(VrfDest, self).__init__(table, nlri)
@ -424,11 +425,11 @@ class VrfDest(Destination):
'with attribute label_list got %s' % path)
@six.add_metaclass(abc.ABCMeta)
class VrfPath(Path):
"""Represents a way of reaching an IP destination with a VPN.
"""
__slots__ = ('_label_list', '_puid')
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = None
VPN_PATH_CLASS = None

View File

@ -1,4 +1,6 @@
from route_formatter_mixin import RouteFormatterMixin
from __future__ import absolute_import
from .route_formatter_mixin import RouteFormatterMixin
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse

View File

@ -17,12 +17,7 @@ class RouteFormatterMixin(object):
@classmethod
def _format_family(cls, dest_list):
if six.PY3:
import io
msg = io.StringIO()
else:
import StringIO
msg = StringIO.StringIO()
msg = six.StringIO()
def _append_path_info(buff, path, is_best, show_prefix):
aspath = path.get('aspath')

Some files were not shown because too many files have changed in this diff Show More