diff --git a/.pylintrc b/.pylintrc index f4e55be3..3b01bee3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -10,6 +10,6 @@ # W0614: Unused import %s from wildcard import # R0801: Similar lines in %s files disable=C0111,W0511,W0142,E0602,C0103,E1101,R0903,W0614,R0801 -output-format=parseable +msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg} reports=yes files-output=no diff --git a/.travis.yml b/.travis.yml index 263224d9..76eb1985 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,17 +1,21 @@ language: python python: - - "2.7" + - "3.5" # Python 3.5 still needs to be installed on Travis-CI env: - - TOX_ENV=py26 - TOX_ENV=py27 - TOX_ENV=py34 + - TOX_ENV=py35 + - TOX_ENV=pypy26 - TOX_ENV=pep8 install: - - "pip install tox" + - pip install tox coveralls script: - NOSE_VERBOSE=0 tox -e $TOX_ENV +after_success: + - coveralls + sudo: false diff --git a/README.rst b/README.rst index 0626c713..77df3b33 100644 --- a/README.rst +++ b/README.rst @@ -24,7 +24,7 @@ If you prefer to install Ryu from the source code:: % cd ryu; python ./setup.py install If you want to write your Ryu application, have a look at -`Writing ryu application `_ document. +`Writing ryu application `_ document. After writing your application, just type:: % ryu-manager yourapp.py @@ -38,11 +38,13 @@ Some functionalities of ryu requires extra packages: - OF-Config requires lxml - NETCONF requires paramiko - BGP speaker (ssh console) requires paramiko +- OVSDB support requires ovs (Note: python 3.4 requires ovs>=2.6.0.dev0) If you want to use the functionalities, please install requirements:: % pip install lxml % pip install paramiko + % pip install ovs Support diff --git a/doc/source/app/ofctl_rest.rst b/doc/source/app/ofctl_rest.rst index 363b74d1..1cba2f65 100644 --- a/doc/source/app/ofctl_rest.rst +++ b/doc/source/app/ofctl_rest.rst @@ -6,7 +6,7 @@ ryu.app.ofctl_rest provides REST APIs for retrieving the switch stats and Updating the switch stats. This application helps you debug your application and get various statistics. -This application supports OpenFlow version 1.0, 1.2 and 1.3. +This application supports OpenFlow version 1.0, 1.2, 1.3, 1.4 and 1.5. .. contents:: :depth: 3 @@ -39,7 +39,7 @@ Get all switches $ curl -X GET http://localhost:8080/stats/switches - :: + .. code-block:: javascript [ 1, @@ -81,7 +81,7 @@ Get the desc stats $ curl -X GET http://localhost:8080/stats/desc/1 - :: + .. code-block:: javascript { "1": { @@ -108,7 +108,7 @@ Get all flows stats URI /stats/flow/ ======= =================== - Response message body: + Response message body(OpenFlow1.3 or earlier): ============== ============================================================ =============== Attribute Description Example @@ -129,11 +129,35 @@ Get all flows stats actions Instruction set ["OUTPUT:2"] ============== ============================================================ =============== + Response message body(OpenFlow1.4 or later): + + ============== ============================================================ ======================================== + Attribute Description Example + ============== ============================================================ ======================================== + dpid Datapath ID "1" + length Length of this entry 88 + table_id Table ID 0 + duration_sec Time flow has been alive in seconds 2 + duration_nsec Time flow has been alive in nanoseconds beyond duration_sec 6.76e+08 + priority Priority of the entry 11111 + idle_timeout Number of seconds idle before expiration 0 + hard_timeout Number of seconds before expiration 0 + flags Bitmap of OFPFF_* flags 1 + cookie Opaque controller-issued identifier 1 + packet_count Number of packets in flow 0 + byte_count Number of bytes in flow 0 + importance Eviction precedence 0 + match Fields to match {"eth_type": 2054} + instructions struct ofp_instruction_header [{"type":GOTO_TABLE", "table_id":1}] + ============== ============================================================ ======================================== + Example of use:: $ curl -X GET http://localhost:8080/stats/flow/1 - :: + Response (OpenFlow1.3 or earlier): + + .. code-block:: javascript { "1": [ @@ -159,6 +183,44 @@ Get all flows stats ] } + Response (OpenFlow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "length": 88, + "table_id": 0, + "duration_sec": 2, + "duration_nsec": 6.76e+08, + "priority": 11111, + "idle_timeout": 0, + "hard_timeout": 0, + "flags": 1, + "cookie": 1, + "packet_count": 0, + "byte_count": 0, + "match": { + "eth_type": 2054 + }, + "importance": 0, + "instructions": [ + { + "type": "APPLY_ACTIONS", + "actions": [ + { + "port": 2, + "max_len": 0, + "type": "OUTPUT" + } + ] + } + ] + } + ] + } + .. _get-flows-stats-filtered: @@ -203,17 +265,21 @@ Get flows stats filtered by fields } }' http://localhost:8080/stats/flow/1 - :: + Response (OpenFlow1.3 or earlier): + + .. code-block:: javascript { "1": [ { + "length": 88, "table_id": 0, "duration_sec": 2, "duration_nsec": 6.76e+08, "priority": 11111, "idle_timeout": 0, "hard_timeout": 0, + "flags": 1, "cookie": 1, "packet_count": 0, "byte_count": 0, @@ -227,6 +293,45 @@ Get flows stats filtered by fields ] } + Response (OpenFlow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "length": 88, + "table_id": 0, + "duration_sec": 2, + "duration_nsec": 6.76e+08, + "priority": 11111, + "idle_timeout": 0, + "hard_timeout": 0, + "flags": 1, + "cookie": 1, + "packet_count": 0, + "byte_count": 0, + "match": { + "eth_type": 2054 + }, + "importance": 0, + "instructions": [ + { + "type": "APPLY_ACTIONS", + "actions": [ + { + "port": 2, + "max_len": 0, + "type": "OUTPUT" + } + ] + } + ] + } + ] + } + + .. _get-aggregate-flow-stats: @@ -257,7 +362,7 @@ Get aggregate flow stats $ curl -X GET http://localhost:8080/stats/aggregateflow/1 - :: + .. code-block:: javascript { "1": [ @@ -311,7 +416,7 @@ Get aggregate flow stats filtered by fields } }' http://localhost:8080/stats/aggregateflow/1 - :: + .. code-block:: javascript { "1": [ @@ -400,7 +505,7 @@ Get table stats Response (OpenFlow1.0): - :: + .. code-block:: javascript { "1": [ @@ -434,7 +539,7 @@ Get table stats Response (OpenFlow1.2): - :: + .. code-block:: javascript { "1": [ @@ -522,7 +627,7 @@ Get table stats Response (OpenFlow1.3): - :: + .. code-block:: javascript { "1": [ @@ -574,7 +679,7 @@ Get table features $ curl -X GET http://localhost:8080/stats/tablefeatures/1 - :: + .. code-block:: javascript { "1": [ @@ -632,12 +737,17 @@ Get ports stats Usage: - ======= =================== + ======= =========================== Method GET - URI /stats/port/ - ======= =================== + URI /stats/port/[/] + ======= =========================== - Response message body: + .. NOTE:: + + Specification of port number is optional. + + + Response message body(OpenFlow1.3 or earlier): ============== ============================================================ ========= Attribute Description Example @@ -660,11 +770,34 @@ Get ports stats duration_nsec Time port has been alive in nanoseconds beyond duration_sec 9.76e+08 ============== ============================================================ ========= + + Response message body(OpenFlow1.4 or later): + + ============== ============================================================ ================================================================================= + Attribute Description Example + ============== ============================================================ ================================================================================= + dpid Datapath ID "1" + port_no Port number 1 + rx_packets Number of received packets 9 + tx_packets Number of transmitted packets 6 + rx_bytes Number of received bytes 738 + tx_bytes Number of transmitted bytes 252 + rx_dropped Number of packets dropped by RX 0 + tx_dropped Number of packets dropped by TX 0 + rx_errors Number of receive errors 0 + tx_errors Number of transmit errors 0 + duration_sec Time port has been alive in seconds 12 + duration_nsec Time port has been alive in nanoseconds beyond duration_sec 9.76e+08 + properties struct ofp_port_desc_prop_header [{"rx_frame_err": 0, "rx_over_err": 0, "rx_crc_err": 0, "collisions": 0,...},...] + ============== ============================================================ ================================================================================= + Example of use:: $ curl -X GET http://localhost:8080/stats/port/1 - :: + Response (OpenFlow1.3 or earlier): + + .. code-block:: javascript { "1": [ @@ -692,6 +825,62 @@ Get ports stats ] } + Response (OpenFlow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "port_no": 1, + "rx_packets": 9, + "tx_packets": 6, + "rx_bytes": 738, + "tx_bytes": 252, + "rx_dropped": 0, + "tx_dropped": 0, + "rx_errors": 0, + "tx_errors": 0, + "duration_nsec": 12, + "duration_sec": 9.76e+08, + "properties": [ + { + "rx_frame_err": 0, + "rx_over_err": 0, + "rx_crc_err": 0, + "collisions": 0, + "type": "ETHERNET" + }, + { + "bias_current": 300, + "flags": 3, + "rx_freq_lmda": 1500, + "rx_grid_span": 500, + "rx_offset": 700, + "rx_pwr": 2000, + "temperature": 273, + "tx_freq_lmda": 1500, + "tx_grid_span": 500, + "tx_offset": 700, + "tx_pwr": 2000, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + : + + : + } + ] + } + ] + } + .. _get-ports-description: @@ -700,14 +889,26 @@ Get ports description Get ports description of the switch which specified with Datapath ID in URI. - Usage: + Usage(OpenFlow1.4 or earlier): ======= ======================= Method GET URI /stats/portdesc/ ======= ======================= - Response message body: + Usage(OpenFlow1.5 or later): + + ======= ================================== + Method GET + URI /stats/portdesc//[] + ======= ================================== + + .. NOTE:: + + Specification of port number is optional. + + + Response message body(OpenFlow1.3 or earlier): ============== ====================================== ==================== Attribute Description Example @@ -726,11 +927,28 @@ Get ports description max_speed Max port bitrate in kbps 0 ============== ====================================== ==================== + Response message body(OpenFlow1.4 or later): + + ============== ====================================== ====================================== + Attribute Description Example + ============== ====================================== ====================================== + dpid Datapath ID "1" + port_no Port number 1 + hw_addr Ethernet hardware address "0a:b6:d0:0c:e1:d7" + name Name of port "s1-eth1" + config Bitmap of OFPPC_* flags 0 + state Bitmap of OFPPS_* flags 0 + length Length of this entry 168 + properties struct ofp_port_desc_prop_header [{"length": 32, "curr": 10248,...}...] + ============== ====================================== ====================================== + Example of use:: $ curl -X GET http://localhost:8080/stats/portdesc/1 - :: + Response (OpenFlow1.3 or earlier): + + .. code-block:: javascript { "1": [ @@ -754,6 +972,60 @@ Get ports description ] } + Response (OpenFlow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "port_no": 1, + "hw_addr": "0a:b6:d0:0c:e1:d7", + "name": "s1-eth1", + "config": 0, + "state": 0, + "length": 168, + "properties": [ + { + "length": 32, + "curr": 10248, + "advertised": 10240, + "supported": 10248, + "peer": 10248, + "curr_speed": 5000, + "max_speed": 5000, + "type": "ETHERNET" + }, + { + "length": 40, + "rx_grid_freq_lmda": 1500, + "tx_grid_freq_lmda": 1500, + "rx_max_freq_lmda": 2000, + "tx_max_freq_lmda": 2000, + "rx_min_freq_lmda": 1000, + "tx_min_freq_lmda": 1000, + "tx_pwr_max": 2000, + "tx_pwr_min": 1000, + "supported": 1, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + : + + : + } + ] + } + ] + } + Get queues stats ---------------- @@ -762,12 +1034,22 @@ Get queues stats Usage: - ======= ==================== + ======= ========================================= Method GET - URI /stats/queue/ - ======= ==================== + URI /stats/queue/[/[/]] + ======= ========================================= - Response message body: + .. NOTE:: + + Specification of port number and queue id are optional. + + If you want to omitting the port number and setting the queue id, + please specify the keyword "ALL" to the port number. + + e.g. GET http://localhost:8080/stats/queue/1/ALL/1 + + + Response message body(OpenFlow1.3 or earlier): ============== ============================================================= =========== Attribute Description Example @@ -782,11 +1064,30 @@ Get queues stats duration_nsec Time queue has been alive in nanoseconds beyond duration_sec 3912967296 ============== ============================================================= =========== + Response message body(OpenFlow1.4 or later): + + ============== ============================================================= ====================================== + Attribute Description Example + ============== ============================================================= ====================================== + dpid Datapath ID "1" + port_no Port number 1 + queue_id Queue ID 0 + tx_bytes Number of transmitted bytes 0 + tx_packets Number of transmitted packets 0 + tx_errors Number of packets dropped due to overrun 0 + duration_sec Time queue has been alive in seconds 4294963425 + duration_nsec Time queue has been alive in nanoseconds beyond duration_sec 3912967296 + length Length of this entry 104 + properties struct ofp_queue_stats_prop_header [{"type": 65535,"length": 12,...},...] + ============== ============================================================= ====================================== + Example of use:: $ curl -X GET http://localhost:8080/stats/queue/1 - :: + Response (OpenFlow1.3 or earlier): + + .. code-block:: javascript { "1": [ @@ -811,6 +1112,55 @@ Get queues stats ] } + Response (OpenFlow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "port_no": 1, + "queue_id": 0, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "duration_sec": 4294963425, + "duration_nsec": 3912967296, + "length": 104, + "properties": [ + { + "OFPQueueStatsPropExperimenter": { + "type": 65535, + "length": 16, + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101 + } + }, + { + : + + : + } + ] + }, + { + "port_no": 2, + "queue_id": 1, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "duration_sec": 4294963425, + "duration_nsec": 3912967296, + "length": 48, + "properties": [] + } + ] + } + +.. _get-queues-config: Get queues config ----------------- @@ -819,10 +1169,20 @@ Get queues config Usage: - ======= ================================ + ======= ================================== Method GET - URI /stats/queueconfig// - ======= ================================ + URI /stats/queueconfig//[] + ======= ================================== + + .. NOTE:: + + Specification of port number is optional. + + + .. CAUTION:: + + This message is deprecated in Openflow1.4. + If OpenFlow 1.4 or later is in use, please refer to :ref:`get-queues-description` instead. Response message body: @@ -841,7 +1201,7 @@ Get queues config $ curl -X GET http://localhost:8080/stats/queueconfig/1/1 - :: + .. code-block:: javascript { "1": [ @@ -884,6 +1244,89 @@ Get queues config ] } +.. _get-queues-description: + +Get queues description +---------------------- + + Get queues description of the switch which specified with Datapath ID, Port and Queue_id in URI. + + Usage: + + ======= ============================================= + Method GET + URI /stats/queuedesc/[//[]] + ======= ============================================= + + .. NOTE:: + + Specification of port number and queue id are optional. + + If you want to omitting the port number and setting the queue id, + please specify the keyword "ALL" to the port number. + + e.g. GET http://localhost:8080/stats/queuedesc/1/ALL/1 + + + .. CAUTION:: + + This message is available in OpenFlow1.4 or later. + If Openflow1.3 or earlier is in use, please refer to :ref:`get-queues-config` instead. + + + Response message body: + + ================ ====================================================== ======================================== + Attribute Description Example + ================ ====================================================== ======================================== + dpid Datapath ID "1" + len Length in bytes of this queue desc 88 + port_no Port which was queried 1 + queue_id Queue ID 1 + properties struct ofp_queue_desc_prop_header [{"length": 8, ...},...] + ================ ====================================================== ======================================== + + Example of use:: + + $ curl -X GET http://localhost:8080/stats/queuedesc/1/1/1 + + .. code-block:: javascript + + + { + "1": [ + { + "len": 88, + "port_no": 1, + "queue_id": 1, + "properties": [ + { + "length": 8, + "rate": 300, + "type": "MIN_RATE" + }, + { + "length": 8, + "rate": 900, + "type": "MAX_RATE" + }, + { + "length": 16, + "exp_type": 0, + "experimenter": 101, + "data": [1], + "type": "EXPERIMENTER" + }, + { + : + + : + } + ] + } + ] + } + Get groups stats ---------------- @@ -892,10 +1335,15 @@ Get groups stats Usage: - ======= ==================== + ======= ================================ Method GET - URI /stats/group/ - ======= ==================== + URI /stats/group/[/] + ======= ================================ + + .. NOTE:: + + Specification of group id is optional. + Response message body: @@ -919,7 +1367,7 @@ Get groups stats $ curl -X GET http://localhost:8080/stats/group/1 - :: + .. code-block:: javascript { "1": [ @@ -949,14 +1397,26 @@ Get group description stats Get group description stats of the switch which specified with Datapath ID in URI. - Usage: + Usage(Openflow1.4 or earlier): ======= ======================== Method GET URI /stats/groupdesc/ ======= ======================== - Response message body: + Usage(Openflow1.5 or later): + + ======= ==================================== + Method GET + URI /stats/groupdesc//[] + ======= ==================================== + + .. NOTE:: + + Specification of group id is optional. + + + Response message body(Openflow1.3 or earlier): =============== ======================================================= ============= Attribute Description Example @@ -974,11 +1434,34 @@ Get group description stats -- actions 0 or more actions associated with the bucket ["OUTPUT:1"] =============== ======================================================= ============= + Response message body(Openflow1.4 or later): + + =============== ======================================================= ==================================== + Attribute Description Example + =============== ======================================================= ==================================== + dpid Datapath ID "1" + type One of OFPGT_* "ALL" + group_id Group ID 1 + length Length of this entry 40 + buckets struct ofp_bucket + -- weight Relative weight of bucket 0 + (Only defined for select groups) + -- watch_port Port whose state affects whether this bucket is live 4294967295 + (Only required for fast failover groups) + -- watch_group Group whose state affects whether this bucket is live 4294967295 + (Only required for fast failover groups) + -- len Length the bucket in bytes, including this header and 32 + any adding to make it 64-bit aligned. + -- actions 0 or more actions associated with the bucket [{"OUTPUT:1", "max_len": 65535,...}] + =============== ======================================================= ==================================== + Example of use:: $ curl -X GET http://localhost:8080/stats/groupdesc/1 - :: + Response (Openflow1.3 or earlier): + + .. code-block:: javascript { "1": [ @@ -999,6 +1482,35 @@ Get group description stats ] } + Response (Openflow1.4 or later): + + .. code-block:: javascript + + { + "1": [ + { + "type": "ALL", + "group_id": 1, + "length": 40, + "buckets": [ + { + "weight": 1, + "watch_port": 1, + "watch_group": 1, + "len": 32, + "actions": [ + { + "type": "OUTPUT", + "max_len": 65535, + "port": 2 + } + ] + } + ] + } + ] + } + Get group features stats ------------------------ @@ -1028,7 +1540,7 @@ Get group features stats $ curl -X GET http://localhost:8080/stats/groupfeatures/1 - :: + .. code-block:: javascript { "1": [ @@ -1094,10 +1606,15 @@ Get meters stats Usage: - ======= ======================= + ======= ================================ Method GET - URI /stats/meter/ - ======= ======================= + URI /stats/meter/[/] + ======= ================================ + + .. NOTE:: + + Specification of meter id is optional. + Response message body: @@ -1121,7 +1638,7 @@ Get meters stats $ curl -X GET http://localhost:8080/stats/meter/1 - :: + .. code-block:: javascript { "1": [ @@ -1147,16 +1664,37 @@ Get meters stats .. _get-meter-config-stats: Get meter config stats ------------------------- +---------------------- +Get meter description stats +--------------------------- Get meter config stats of the switch which specified with Datapath ID in URI. - Usage: + .. CAUTION:: - ======= ============================ + This message has been renamed in openflow 1.5. + If Openflow 1.4 or earlier is in use, please used as Get meter description stats. + If Openflow 1.5 or later is in use, please used as Get meter description stats. + + + Usage(Openflow1.4 or earlier): + + ======= ====================================== Method GET - URI /stats/meterconfig/ - ======= ============================ + URI /stats/meterconfig/[/] + ======= ====================================== + + Usage(Openflow1.5 or later): + + ======= ====================================== + Method GET + URI /stats/meterdesc/[/] + ======= ====================================== + + .. NOTE:: + + Specification of meter id is optional. + Response message body: @@ -1176,7 +1714,7 @@ Get meter config stats $ curl -X GET http://localhost:8080/stats/meterconfig/1 - :: + .. code-block:: javascript { "1": [ @@ -1226,7 +1764,7 @@ Get meter features stats $ curl -X GET http://localhost:8080/stats/meterfeatures/1 - :: + .. code-block:: javascript { "1": [ @@ -1262,7 +1800,7 @@ Add a flow entry URI /stats/flowentry/add ======= ===================== - Request message body: + Request message body(Openflow1.3 or earlier): ============= ===================================================== ============================== =============== Attribute Description Example Default @@ -1280,12 +1818,31 @@ Add a flow entry actions Instruction set (list of dict) [{"type":"OUTPUT", "port":2}] [] #DROP ============= ===================================================== ============================== =============== + Request message body(Openflow1.4 or later): + + ============= ===================================================== ================================ =============== + Attribute Description Example Default + ============= ===================================================== ================================ =============== + dpid Datapath ID (int) 1 (Mandatory) + cookie Opaque controller-issued identifier (int) 1 0 + cookie_mask Mask used to restrict the cookie bits (int) 1 0 + table_id Table ID to put the flow in (int) 0 0 + idle_timeout Idle time before discarding (seconds) (int) 30 0 + hard_timeout Max time before discarding (seconds) (int) 30 0 + priority Priority level of flow entry (int) 11111 0 + buffer_id Buffered packet to apply to, or OFP_NO_BUFFER (int) 1 OFP_NO_BUFFER + flags Bitmap of OFPFF_* flags (int) 1 0 + match Fields to match (dict) {"in_port":1} {} #wildcarded + instructions Instruction set (list of dict) [{"type":"METER", "meter_id":2}] [] #DROP + ============= ===================================================== ================================ =============== + .. NOTE:: For description of match and actions, please see :ref:`description-of-match-and-actions`. + Example of use(Openflow1.3 or earlier): - Example of use:: + :: $ curl -X POST -d '{ "dpid": 1, @@ -1356,6 +1913,85 @@ Add a flow entry ] }' http://localhost:8080/stats/flowentry/add + Example of use(Openflow1.4 or later): + + :: + + $ curl -X POST -d '{ + "dpid": 1, + "cookie": 1, + "cookie_mask": 1, + "table_id": 0, + "idle_timeout": 30, + "hard_timeout": 30, + "priority": 11111, + "flags": 1, + "match":{ + "in_port":1 + }, + "instructions": [ + { + "type": "APPLY_ACTIONS", + "actions": [ + { + "max_len": 65535, + "port": 2, + "type": "OUTPUT" + } + ] + } + ] + }' http://localhost:8080/stats/flowentry/add + + :: + + $ curl -X POST -d '{ + "dpid": 1, + "priority": 22222, + "match":{ + "in_port":1 + }, + "instructions": [ + { + "type":"GOTO_TABLE", + "table_id": 1 + } + ] + }' http://localhost:8080/stats/flowentry/add + + :: + + $ curl -X POST -d '{ + "dpid": 1, + "priority": 33333, + "match":{ + "in_port":1 + }, + "instructions": [ + { + "type":"WRITE_METADATA", + "metadata": 1, + "metadata_mask": 1 + } + ] + }' http://localhost:8080/stats/flowentry/add + + :: + + $ curl -X POST -d '{ + "dpid": 1, + "priority": 44444, + "match":{ + "in_port":1 + }, + "instructions": [ + { + "type":"METER", + "meter_id": 1 + } + ] + }' http://localhost:8080/stats/flowentry/add + .. NOTE:: To confirm flow entry registration, please see :ref:`get-all-flows-stats` or :ref:`get-flows-stats-filtered`. @@ -1919,7 +2555,7 @@ Send a experimenter message .. _description-of-match-and-actions: Reference: Description of Match and Actions -============================================ +=========================================== Description of Match on request messages ---------------------------------------- @@ -1959,26 +2595,15 @@ Description of Match on request messages =============== ================================================== ======================================================================================================= in_port Switch input port (int) {"in_port": 7} in_phy_port Switch physical input port (int) {"in_phy_port": 5, "in_port": 3} - metadata Metadata passed between tables (int or string) {"metadata": 12345} - - | {"metadata": "0x1212/0xffff"} - dl_dst Ethernet destination address (string) {"dl_dst": "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"} - dl_src Ethernet source address (string) {"dl_src": "aa:bb:cc:11:22:33"} + metadata Metadata passed between tables (int or string) {"metadata": 12345} or {"metadata": "0x1212/0xffff"} eth_dst Ethernet destination address (string) {"eth_dst": "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"} eth_src Ethernet source address (string) {"eth_src": "aa:bb:cc:11:22:33"} - dl_type Ethernet frame type (int) {"dl_type": 123} eth_type Ethernet frame type (int) {"eth_type": 2048} - dl_vlan VLAN id (int or string) See :ref:`example-of-vlan-id-match-field` vlan_vid VLAN id (int or string) See :ref:`example-of-vlan-id-match-field` vlan_pcp VLAN priority (int) {"vlan_pcp": 3, "vlan_vid": 3} ip_dscp IP DSCP (6 bits in ToS field) (int) {"ip_dscp": 3, "eth_type": 2048} ip_ecn IP ECN (2 bits in ToS field) (int) {"ip_ecn": 0, "eth_type": 34525} - nw_proto IP protocol (int) {"nw_proto": 5, "eth_type": 2048} ip_proto IP protocol (int) {"ip_proto": 5, "eth_type": 34525} - tp_src Transport layer source port (int) {"tp_src": 1, "ip_proto": 6, "eth_type": 2048} - tp_dst Transport layer destination port (int) {"tp_dst": 2, "ip_proto": 6, "eth_type": 2048} - nw_src IPv4 source address (string) {"nw_src": "192.168.0.1", "eth_type": 2048} - nw_dst IPv4 destination address (string) {"nw_dst": "192.168.0.1/24", "eth_type": 2048} ipv4_src IPv4 source address (string) {"ipv4_src": "192.168.0.1", "eth_type": 2048} ipv4_dst IPv4 destination address (string) {"ipv4_dst": "192.168.10.10/255.255.255.0", "eth_type": 2048} tcp_src TCP source port (int) {"tcp_src": 3, "ip_proto": 6, "eth_type": 2048} @@ -2005,15 +2630,21 @@ Description of Match on request messages mpls_label MPLS label (int) {"mpls_label": 3, "eth_type": 34888} mpls_tc MPLS Traffic Class (int) {"mpls_tc": 2, "eth_type": 34888} mpls_bos MPLS BoS bit (int) {"mpls_bos": 1, "eth_type": 34888} - pbb_isid PBB I-SID (int or string) {"pbb_isid": 5, "eth_type": 35047} - - | {"pbb_isid": "0x05/0xff", "eth_type": 35047} - tunnel_id Logical Port Metadata (int or string) {"tunnel_id": 7} - - | {"tunnel_id": "0x07/0xff"} - ipv6_exthdr IPv6 Extension Header pseudo-field (int or string) {"ipv6_exthdr": 3, "eth_type": 34525} - - | {"ipv6_exthdr": "0x40/0x1F0", "eth_type": 34525} + (Openflow1.3+) + pbb_isid PBB I-SID (int or string) {"pbb_isid": 5, "eth_type": 35047} or{"pbb_isid": "0x05/0xff", "eth_type": 35047} + (Openflow1.3+) + tunnel_id Logical Port Metadata (int or string) {"tunnel_id": 7} or {"tunnel_id": "0x07/0xff"} + (Openflow1.3+) + ipv6_exthdr IPv6 Extension Header pseudo-field (int or string) {"ipv6_exthdr": 3, "eth_type": 34525} or {"ipv6_exthdr": "0x40/0x1F0", "eth_type": 34525} + (Openflow1.3+) + pbb_uca PBB UCA hander field(int) {"pbb_uca": 1, "eth_type": 35047} + (Openflow1.4+) + tcp_flags TCP flags(int) {"tcp_flags": 2, "ip_proto": 6, "eth_type": 2048} + (Openflow1.5+) + actset_output Output port from action set metadata(int) {"actset_output": 3} + (Openflow1.5+) + packet_type Packet type value(int) {"packet_type": [1, 2048]} + (Openflow1.5+) =============== ================================================== ======================================================================================================= .. NOTE:: @@ -2145,9 +2776,9 @@ Description of Actions on request messages List of Actions (OpenFlow1.2 or later): - =============== ============================================================================ ================================================================== + =============== ============================================================================ ======================================================================================================================== Actions Description Example - =============== ============================================================================ ================================================================== + =============== ============================================================================ ======================================================================================================================== OUTPUT Output packet from "port" {"type": "OUTPUT", "port": 3} COPY_TTL_OUT Copy TTL outwards {"type": "COPY_TTL_OUT"} COPY_TTL_IN Copy TTL inwards {"type": "COPY_TTL_IN"} @@ -2164,13 +2795,24 @@ Description of Actions on request messages SET_FIELD Set a "field" using "value" See :ref:`example-of-set-field-action` (The set of keywords available for "field" is the same as match field) PUSH_PBB Push a new PBB service tag with "ethertype" {"type": "PUSH_PBB", "ethertype": 35047} + (Openflow1.3+) POP_PBB Pop the outer PBB service tag {"type": "POP_PBB"} + (Openflow1.3+) + COPY_FIELD Copy value between header and register {"type": "COPY_FIELD", "n_bits": 32, "src_offset": 1, "dst_offset": 2, "src_oxm_id": "eth_src", "dst_oxm_id": "eth_dst"} + (Openflow1.5+) + METER Apply meter identified by "meter_id" {"type": "METER", "meter_id": 3} + (Openflow1.5+) + EXPERIMENTER Extensible action for the experimenter {"type": "EXPERIMENTER", "experimenter": 101, "data": "AAECAwQFBgc=", "data_type": "base64"} + (Set "base64" or "ascii" to "data_type" field) GOTO_TABLE (Instruction) Setup the next table identified by "table_id" {"type": "GOTO_TABLE", "table_id": 8} WRITE_METADATA (Instruction) Setup the metadata field using "metadata" and "metadata_mask" {"type": "WRITE_METADATA", "metadata": 0x3, "metadata_mask": 0x3} METER (Instruction) Apply meter identified by "meter_id" {"type": "METER", "meter_id": 3} + (deprecated in Openflow1.5) WRITE_ACTIONS (Instruction) Write the action(s) onto the datapath action set {"type": "WRITE_ACTIONS", actions":[{"type":"POP_VLAN",},{ "type":"OUTPUT", "port": 2}]} CLEAR_ACTIONS (Instruction) Clears all actions from the datapath action set {"type": "CLEAR_ACTIONS"} - =============== ============================================================================ ================================================================== + =============== ============================================================================ ======================================================================================================================== + + .. _example-of-set-field-action: @@ -2179,18 +2821,25 @@ Example of set-field action To set VLAN ID to non-VLAN-tagged frame:: - "actions":[ - { - "type": "PUSH_VLAN", # Push a new VLAN tag if a input frame is non-VLAN-tagged - "ethertype": 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame + $ curl -X POST -d '{ + "dpid": 1, + "match":{ + "dl_type": "0x8000" }, - { - "type": "SET_FIELD", - "field": "vlan_vid", # Set VLAN ID - "value": 4102 # Describe sum of vlan_id(e.g. 6) | OFPVID_PRESENT(0x1000=4096) - }, - { - "type": "OUTPUT", - "port": 2 - } - ] + "actions":[ + { + "type": "PUSH_VLAN", # Push a new VLAN tag if a input frame is non-VLAN-tagged + "ethertype": 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame + }, + { + "type": "SET_FIELD", + "field": "vlan_vid", # Set VLAN ID + "value": 4102 # Describe sum of vlan_id(e.g. 6) | OFPVID_PRESENT(0x1000=4096) + }, + { + "type": "OUTPUT", + "port": 2 + } + ] + }' http://localhost:8080/stats/flowentry/add + diff --git a/doc/source/conf.py b/doc/source/conf.py index 84a5af95..8c0f1937 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -132,6 +132,7 @@ html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True +html_use_smartypants = False # Custom sidebar templates, maps document names to template names. #html_sidebars = {} diff --git a/doc/source/library.rst b/doc/source/library.rst index bc8ff67f..ccdcae30 100644 --- a/doc/source/library.rst +++ b/doc/source/library.rst @@ -9,6 +9,7 @@ Ryu provides some useful library for your network applications. library_packet.rst library_packet_ref.rst + library_pcap.rst library_of_config.rst library_bgp_speaker.rst library_bgp_speaker_ref.rst diff --git a/doc/source/library_pcap.rst b/doc/source/library_pcap.rst new file mode 100644 index 00000000..2eb3fe5e --- /dev/null +++ b/doc/source/library_pcap.rst @@ -0,0 +1,27 @@ +***************** +PCAP file library +***************** + +Introduction +============ + +Ryu PCAP file library helps you to read/write PCAP file which file +format are described in `The Wireshark Wiki`_. + +.. _The Wireshark Wiki: https://wiki.wireshark.org/Development/LibpcapFileFormat + +Reading PCAP file +================= + +For loading the packet data containing in PCAP files, you can use +pcaplib.Reader. + +.. autoclass:: ryu.lib.pcaplib.Reader + +Writing PCAP file +================= + +For dumping the packet data which your RyuApp received, you can use +pcaplib.Writer. + +.. autoclass:: ryu.lib.pcaplib.Writer diff --git a/doc/source/nicira_ext_ref.rst b/doc/source/nicira_ext_ref.rst index dc7d5424..562e12aa 100644 --- a/doc/source/nicira_ext_ref.rst +++ b/doc/source/nicira_ext_ref.rst @@ -27,5 +27,5 @@ but also available in OF1.2+. Nicira Extended Match Structures ================================ -.. automodule:: ryu.ofproto.nx_match +.. automodule:: ryu.ofproto.nicira_ext diff --git a/doc/source/ryu_app_api.rst b/doc/source/ryu_app_api.rst index 06276e50..67566c7c 100644 --- a/doc/source/ryu_app_api.rst +++ b/doc/source/ryu_app_api.rst @@ -82,20 +82,10 @@ For example, EventOFPPacketIn for packet-in message. The OpenFlow controller part of Ryu automatically decodes OpenFlow messages received from switches and send these events to Ryu applications which expressed an interest using ryu.controller.handler.set_ev_cls. -OpenFlow event classes have at least the following attributes. +OpenFlow event classes are subclass of the following class. -.. tabularcolumns:: |l|L| +.. autoclass:: ryu.controller.ofp_event.EventOFPMsgBase -============ ============================================================= -Attribute Description -============ ============================================================= -msg An object which describes the corresponding OpenFlow message. -msg.datapath A ryu.controller.controller.Datapath instance which describes - an OpenFlow switch from which we received this OpenFlow message. -============ ============================================================= - -The msg object has some more additional members whose values are extracted -from the original OpenFlow message. See :ref:`ofproto_ref` for more info about OpenFlow messages. ryu.base.app_manager.RyuApp @@ -103,267 +93,87 @@ ryu.base.app_manager.RyuApp See :ref:`api_ref`. -ryu.controller.handler.set_ev_cls(ev_cls, dispatchers=None) -=========================================================== +ryu.controller.handler.set_ev_cls +================================= -A decorator for Ryu application to declare an event handler. -Decorated method will become an event handler. -ev_cls is an event class whose instances this RyuApp wants to receive. -dispatchers argument specifies one of the following negotiation phases -(or a list of them) for which events should be generated for this handler. -Note that, in case an event changes the phase, the phase before the change -is used to check the interest. - -.. tabularcolumns:: |l|L| - -=========================================== ================================== -Negotiation phase Description -=========================================== ================================== -ryu.controller.handler.HANDSHAKE_DISPATCHER Sending and waiting for hello - message -ryu.controller.handler.CONFIG_DISPATCHER Version negotiated and sent - features-request message -ryu.controller.handler.MAIN_DISPATCHER Switch-features message received - and sent set-config message -ryu.controller.handler.DEAD_DISPATCHER Disconnect from the peer. Or - disconnecting due to some - unrecoverable errors. -=========================================== ================================== +.. autofunction:: ryu.controller.handler.set_ev_cls ryu.controller.controller.Datapath ================================== -A class to describe an OpenFlow switch connected to this controller. -An instance has the following attributes. - -.. tabularcolumns:: |l|L| - -====================================== ======================================= -Attribute Description -====================================== ======================================= -id 64-bit OpenFlow Datapath ID. - Only available for - ryu.controller.handler.MAIN_DISPATCHER - phase. -ofproto A module which exports OpenFlow - definitions, mainly constants appeared - in the specification, for the - negotiated OpenFlow version. For - example, ryu.ofproto.ofproto_v1_0 for - OpenFlow 1.0. -ofproto_parser A module which exports OpenFlow wire - message encoder and decoder for the - negotiated OpenFlow version. For - example, ryu.ofproto.ofproto_v1_0_parser - for OpenFlow 1.0. -ofproto_parser.OFPxxxx(datapath, ....) A callable to prepare an OpenFlow - message for the given switch. It can - be sent with Datapath.send_msg later. - xxxx is a name of the message. For - example OFPFlowMod for flow-mod - message. Arguemnts depend on the - message. -set_xid(self, msg) Generate an OpenFlow XID and put it - in msg.xid. -send_msg(self, msg) Queue an OpenFlow message to send to - the corresponding switch. If msg.xid - is None, set_xid is automatically - called on the message before queueing. -send_packet_out deprecated -send_flow_mod deprecated -send_flow_del deprecated -send_delete_all_flows deprecated -send_barrier Queue an OpenFlow barrier message to - send to the switch. -send_nxt_set_flow_format deprecated -is_reserved_port deprecated -====================================== ======================================= +.. autoclass:: ryu.controller.controller.Datapath ryu.controller.event.EventBase ============================== -The base of all event classes. -A Ryu application can define its own event type by creating a subclass. +.. autoclass:: ryu.controller.event.EventBase ryu.controller.event.EventRequestBase ===================================== -The base class for synchronous request for RyuApp.send_request. +.. autoclass:: ryu.controller.event.EventRequestBase ryu.controller.event.EventReplyBase =================================== -The base class for synchronous request reply for RyuApp.send_reply. +.. autoclass:: ryu.controller.event.EventReplyBase ryu.controller.ofp_event.EventOFPStateChange ============================================ -An event class for negotiation phase change notification. -An instance of this class is sent to observer after changing -the negotiation phase. -An instance has at least the following attributes. +.. autoclass:: ryu.controller.ofp_event.EventOFPStateChange -========= ==================================================================== -Attribute Description -========= ==================================================================== -datapath ryu.controller.controller.Datapath instance of the switch -========= ==================================================================== +ryu.controller.ofp_event.EventOFPPortStateChange +================================================ + +.. autoclass:: ryu.controller.ofp_event.EventOFPPortStateChange ryu.controller.dpset.EventDP ============================ -An event class to notify connect/disconnect of a switch. -For OpenFlow switches, one can get the same notification by observing -ryu.controller.ofp_event.EventOFPStateChange. -An instance has at least the following attributes. - -========= ==================================================================== -Attribute Description -========= ==================================================================== -dp A ryu.controller.controller.Datapath instance of the switch -enter True when the switch connected to our controller. False for - disconnect. -========= ==================================================================== +.. autoclass:: ryu.controller.dpset.EventDP ryu.controller.dpset.EventPortAdd ================================= -An event class for switch port status notification. -This event is generated when a new port is added to a switch. -For OpenFlow switches, one can get the same notification by observing -ryu.controller.ofp_event.EventOFPPortStatus. -An instance has at least the following attributes. - -========= ==================================================================== -Attribute Description -========= ==================================================================== -dp A ryu.controller.controller.Datapath instance of the switch -port port number -========= ==================================================================== +.. autoclass:: ryu.controller.dpset.EventPortAdd ryu.controller.dpset.EventPortDelete ==================================== -An event class for switch port status notification. -This event is generated when a port is removed from a switch. -For OpenFlow switches, one can get the same notification by observing -ryu.controller.ofp_event.EventOFPPortStatus. -An instance has at least the following attributes. - -========= ==================================================================== -Attribute Description -========= ==================================================================== -dp A ryu.controller.controller.Datapath instance of the switch -port port number -========= ==================================================================== +.. autoclass:: ryu.controller.dpset.EventPortDelete ryu.controller.dpset.EventPortModify ==================================== -An event class for switch port status notification. -This event is generated when some attribute of a port is changed. -For OpenFlow switches, one can get the same notification by observing -ryu.controller.ofp_event.EventOFPPortStatus. -An instance has at least the following attributes. - -========= ==================================================================== -Attribute Description -========= ==================================================================== -dp A ryu.controller.controller.Datapath instance of the switch -port port number -========= ==================================================================== +.. autoclass:: ryu.controller.dpset.EventPortModify ryu.controller.network.EventNetworkPort ======================================= -An event class for notification of port arrival and deperture. -This event is generated when a port is introduced to or removed from a network -by the REST API. -An instance has at least the following attributes. - -========== =================================================================== -Attribute Description -========== =================================================================== -network_id Network ID -dpid OpenFlow Datapath ID of the switch to which the port belongs. -port_no OpenFlow port number of the port -add_del True for adding a port. False for removing a port. -========== =================================================================== +.. autoclass:: ryu.controller.network.EventNetworkPort ryu.controller.network.EventNetworkDel ====================================== -An event class for network deletion. -This event is generated when a network is deleted by the REST API. -An instance has at least the following attributes. - -========== =================================================================== -Attribute Description -========== =================================================================== -network_id Network ID -========== =================================================================== +.. autoclass:: ryu.controller.network.EventNetworkDel ryu.controller.network.EventMacAddress ====================================== -An event class for end-point MAC address registration. -This event is generated when a end-point MAC address is updated -by the REST API. -An instance has at least the following attributes. - -=========== ================================================================== -Attribute Description -=========== ================================================================== -network_id Network ID -dpid OpenFlow Datapath ID of the switch to which the port belongs. -port_no OpenFlow port number of the port -mac_address The old MAC address of the port if add_del is False. Otherwise - the new MAC address. -add_del False if this event is a result of a port removal. Otherwise - True. -=========== ================================================================== +.. autoclass:: ryu.controller.network.EventMacAddress ryu.controller.tunnels.EventTunnelKeyAdd ======================================== -An event class for tunnel key registration. -This event is generated when a tunnel key is registered or updated -by the REST API. -An instance has at least the following attributes. - -=========== ================================================================== -Attribute Description -=========== ================================================================== -network_id Network ID -tunnel_key Tunnel Key -=========== ================================================================== +.. autoclass:: ryu.controller.tunnels.EventTunnelKeyAdd ryu.controller.tunnels.EventTunnelKeyDel ======================================== -An event class for tunnel key registration. -This event is generated when a tunnel key is removed by the REST API. -An instance has at least the following attributes. - -=========== ================================================================== -Attribute Description -=========== ================================================================== -network_id Network ID -tunnel_key Tunnel Key -=========== ================================================================== +.. autoclass:: ryu.controller.tunnels.EventTunnelKeyDel ryu.controller.tunnels.EventTunnelPort ====================================== -An event class for tunnel port registration. -This event is generated when a tunnel port is added or removed by the REST API. -An instance has at least the following attributes. - -=========== ================================================================== -Attribute Description -=========== ================================================================== -dpid OpenFlow Datapath ID -port_no OpenFlow port number -remote_dpid OpenFlow port number of the tunnel peer -add_del True for adding a tunnel. False for removal. -=========== ================================================================== +.. autoclass:: ryu.controller.tunnels.EventTunnelPort diff --git a/ryu/__init__.py b/ryu/__init__.py index f5ac6f84..3f432657 100644 --- a/ryu/__init__.py +++ b/ryu/__init__.py @@ -14,5 +14,5 @@ # limitations under the License. -version_info = (3, 30) +version_info = (4, 4) version = '.'.join(map(str, version_info)) diff --git a/ryu/app/example_switch_13.py b/ryu/app/example_switch_13.py new file mode 100644 index 00000000..046e4ed0 --- /dev/null +++ b/ryu/app/example_switch_13.py @@ -0,0 +1,101 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ryu.base import app_manager +from ryu.controller import ofp_event +from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER +from ryu.controller.handler import set_ev_cls +from ryu.ofproto import ofproto_v1_3 +from ryu.lib.packet import packet +from ryu.lib.packet import ethernet + + +class ExampleSwitch13(app_manager.RyuApp): + OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] + + def __init__(self, *args, **kwargs): + super(ExampleSwitch13, self).__init__(*args, **kwargs) + # initialize mac address table. + self.mac_to_port = {} + + @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) + def switch_features_handler(self, ev): + datapath = ev.msg.datapath + ofproto = datapath.ofproto + parser = datapath.ofproto_parser + + # install the table-miss flow entry. + match = parser.OFPMatch() + actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, + ofproto.OFPCML_NO_BUFFER)] + self.add_flow(datapath, 0, match, actions) + + def add_flow(self, datapath, priority, match, actions): + ofproto = datapath.ofproto + parser = datapath.ofproto_parser + + # construct flow_mod message and send it. + inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, + actions)] + mod = parser.OFPFlowMod(datapath=datapath, priority=priority, + match=match, instructions=inst) + datapath.send_msg(mod) + + @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) + def _packet_in_handler(self, ev): + msg = ev.msg + datapath = msg.datapath + ofproto = datapath.ofproto + parser = datapath.ofproto_parser + + # get Datapath ID to identify OpenFlow switches. + dpid = datapath.id + self.mac_to_port.setdefault(dpid, {}) + + # analyse the received packets using the packet library. + pkt = packet.Packet(msg.data) + eth_pkt = pkt.get_protocol(ethernet.ethernet) + dst = eth_pkt.dst + src = eth_pkt.src + + # get the received port number from packet_in message. + in_port = msg.match['in_port'] + + self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port) + + # learn a mac address to avoid FLOOD next time. + self.mac_to_port[dpid][src] = in_port + + # if the destination mac address is already learned, + # decide which port to output the packet, otherwise FLOOD. + if dst in self.mac_to_port[dpid]: + out_port = self.mac_to_port[dpid][dst] + else: + out_port = ofproto.OFPP_FLOOD + + # construct action list. + actions = [parser.OFPActionOutput(out_port)] + + # install a flow to avoid packet_in next time. + if out_port != ofproto.OFPP_FLOOD: + match = parser.OFPMatch(in_port=in_port, eth_dst=dst) + self.add_flow(datapath, 1, match, actions) + + # construct packet_out message and send it. + out = parser.OFPPacketOut(datapath=datapath, + buffer_id=ofproto.OFP_NO_BUFFER, + in_port=in_port, actions=actions, + data=msg.data) + datapath.send_msg(out) diff --git a/ryu/app/ofctl_rest.py b/ryu/app/ofctl_rest.py index e41d7d6a..9167fbf3 100644 --- a/ryu/app/ofctl_rest.py +++ b/ryu/app/ofctl_rest.py @@ -24,12 +24,17 @@ from ryu.controller import ofp_event from ryu.controller import dpset from ryu.controller.handler import MAIN_DISPATCHER from ryu.controller.handler import set_ev_cls +from ryu.exception import RyuException from ryu.ofproto import ofproto_v1_0 from ryu.ofproto import ofproto_v1_2 from ryu.ofproto import ofproto_v1_3 +from ryu.ofproto import ofproto_v1_4 +from ryu.ofproto import ofproto_v1_5 from ryu.lib import ofctl_v1_0 from ryu.lib import ofctl_v1_2 from ryu.lib import ofctl_v1_3 +from ryu.lib import ofctl_v1_4 +from ryu.lib import ofctl_v1_5 from ryu.app.wsgi import ControllerBase, WSGIApplication @@ -40,6 +45,8 @@ supported_ofctl = { ofproto_v1_0.OFP_VERSION: ofctl_v1_0, ofproto_v1_2.OFP_VERSION: ofctl_v1_2, ofproto_v1_3.OFP_VERSION: ofctl_v1_3, + ofproto_v1_4.OFP_VERSION: ofctl_v1_4, + ofproto_v1_5.OFP_VERSION: ofctl_v1_5, } # REST API @@ -53,6 +60,12 @@ supported_ofctl = { # get the desc stats of the switch # GET /stats/desc/ # +# get flows desc stats of the switch +# GET /stats/flowdesc/ +# +# get flows desc stats of the switch filtered by the fields +# POST /stats/flowdesc/ +# # get flows stats of the switch # GET /stats/flow/ # @@ -72,34 +85,56 @@ supported_ofctl = { # GET /stats/tablefeatures/ # # get ports stats of the switch -# GET /stats/port/ +# GET /stats/port/[/] +# Note: Specification of port number is optional # # get queues stats of the switch -# GET /stats/queue/ +# GET /stats/queue/[/[/]] +# Note: Specification of port number and queue id are optional +# If you want to omitting the port number and setting the queue id, +# please specify the keyword "ALL" to the port number +# e.g. GET /stats/queue/1/ALL/1 # # get queues config stats of the switch -# GET /stats/queueconfig// +# GET /stats/queueconfig/[/] +# Note: Specification of port number is optional +# +# get queues desc stats of the switch +# GET /stats/queuedesc/[/[/]] +# Note: Specification of port number and queue id are optional +# If you want to omitting the port number and setting the queue id, +# please specify the keyword "ALL" to the port number +# e.g. GET /stats/queuedesc/1/ALL/1 # # get meter features stats of the switch # GET /stats/meterfeatures/ # # get meter config stats of the switch -# GET /stats/meterconfig/ +# GET /stats/meterconfig/[/] +# Note: Specification of meter id is optional +# +# get meter desc stats of the switch +# GET /stats/meterdesc/[/] +# Note: Specification of meter id is optional # # get meters stats of the switch -# GET /stats/meter/ +# GET /stats/meter/[/] +# Note: Specification of meter id is optional # # get group features stats of the switch # GET /stats/groupfeatures/ # # get groups desc stats of the switch -# GET /stats/groupdesc/ +# GET /stats/groupdesc/[/] +# Note: Specification of group id is optional (OpenFlow 1.5 or later) # # get groups stats of the switch -# GET /stats/group/ +# GET /stats/group/[/] +# Note: Specification of group id is optional # # get ports description of the switch -# GET /stats/portdesc/ +# GET /stats/portdesc/[/] +# Note: Specification of port number is optional (OpenFlow 1.5 or later) # Update the switch stats # @@ -147,6 +182,114 @@ supported_ofctl = { # POST /stats/experimenter/ +class CommandNotFoundError(RyuException): + message = 'No such command : %(cmd)s' + + +class PortNotFoundError(RyuException): + message = 'No such port info: %(port_no)s' + + +def stats_method(method): + def wrapper(self, req, dpid, *args, **kwargs): + # Get datapath instance from DPSet + try: + dp = self.dpset.get(int(str(dpid), 0)) + except ValueError: + LOG.exception('Invalid dpid: %s', dpid) + return Response(status=400) + if dp is None: + LOG.error('No such Datapath: %s', dpid) + return Response(status=404) + + # Get lib/ofctl_* module + try: + ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) + except KeyError: + LOG.exception('Unsupported OF version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + # Invoke StatsController method + try: + ret = method(self, req, dp, ofctl, *args, **kwargs) + return Response(content_type='application/json', + body=json.dumps(ret)) + except ValueError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + except AttributeError: + LOG.exception('Unsupported OF request in this version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + return wrapper + + +def command_method(method): + def wrapper(self, req, *args, **kwargs): + # Parse request json body + try: + if req.body: + # We use ast.literal_eval() to parse request json body + # instead of json.loads(). + # Because we need to parse binary format body + # in send_experimenter(). + body = ast.literal_eval(req.body.decode('utf-8')) + else: + body = {} + except SyntaxError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + + # Get datapath_id from request parameters + dpid = body.get('dpid', None) + if not dpid: + try: + dpid = kwargs.pop('dpid') + except KeyError: + LOG.exception('Cannot get dpid from request parameters') + return Response(status=400) + + # Get datapath instance from DPSet + try: + dp = self.dpset.get(int(str(dpid), 0)) + except ValueError: + LOG.exception('Invalid dpid: %s', dpid) + return Response(status=400) + if dp is None: + LOG.error('No such Datapath: %s', dpid) + return Response(status=404) + + # Get lib/ofctl_* module + try: + ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) + except KeyError: + LOG.exception('Unsupported OF version: version=%s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + # Invoke StatsController method + try: + method(self, req, dp, ofctl, body, *args, **kwargs) + return Response(status=200) + except ValueError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + except AttributeError: + LOG.exception('Unsupported OF request in this version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + except CommandNotFoundError as e: + LOG.exception(e.message) + return Response(status=404) + except PortNotFoundError as e: + LOG.exception(e.message) + return Response(status=404) + + return wrapper + + class StatsController(ControllerBase): def __init__(self, req, link, data, **config): super(StatsController, self).__init__(req, link, data, **config) @@ -158,637 +301,200 @@ class StatsController(ControllerBase): body = json.dumps(dps) return Response(content_type='application/json', body=body) - def get_desc_stats(self, req, dpid, **_kwargs): + @stats_method + def get_desc_stats(self, req, dp, ofctl, **kwargs): + return ofctl.get_desc_stats(dp, self.waiters) - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) + @stats_method + def get_flow_desc(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_flow_desc(dp, self.waiters, flow) - dp = self.dpset.get(int(dpid)) + @stats_method + def get_flow_stats(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_flow_stats(dp, self.waiters, flow) - if dp is None: - return Response(status=404) - _ofp_version = dp.ofproto.OFP_VERSION + @stats_method + def get_aggregate_flow_stats(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_aggregate_flow_stats(dp, self.waiters, flow) - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - desc = _ofctl.get_desc_stats(dp, self.waiters) + @stats_method + def get_table_stats(self, req, dp, ofctl, **kwargs): + return ofctl.get_table_stats(dp, self.waiters) + @stats_method + def get_table_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_table_features(dp, self.waiters) + + @stats_method + def get_port_stats(self, req, dp, ofctl, port=None, **kwargs): + if port == "ALL": + port = None + + return ofctl.get_port_stats(dp, self.waiters, port) + + @stats_method + def get_queue_stats(self, req, dp, ofctl, + port=None, queue_id=None, **kwargs): + if port == "ALL": + port = None + + if queue_id == "ALL": + queue_id = None + + return ofctl.get_queue_stats(dp, self.waiters, port, queue_id) + + @stats_method + def get_queue_config(self, req, dp, ofctl, port=None, **kwargs): + if port == "ALL": + port = None + + return ofctl.get_queue_config(dp, self.waiters, port) + + @stats_method + def get_queue_desc(self, req, dp, ofctl, + port=None, queue=None, **_kwargs): + if port == "ALL": + port = None + + if queue == "ALL": + queue = None + + return ofctl.get_queue_desc(dp, self.waiters, port, queue) + + @stats_method + def get_meter_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_meter_features(dp, self.waiters) + + @stats_method + def get_meter_config(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_config(dp, self.waiters, meter_id) + + @stats_method + def get_meter_desc(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_desc(dp, self.waiters, meter_id) + + @stats_method + def get_meter_stats(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_stats(dp, self.waiters, meter_id) + + @stats_method + def get_group_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_group_features(dp, self.waiters) + + @stats_method + def get_group_desc(self, req, dp, ofctl, group_id=None, **kwargs): + if dp.ofproto.OFP_VERSION < ofproto_v1_5.OFP_VERSION: + return ofctl.get_group_desc(dp, self.waiters) else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) + return ofctl.get_group_desc(dp, self.waiters, group_id) - body = json.dumps(desc) - return Response(content_type='application/json', body=body) + @stats_method + def get_group_stats(self, req, dp, ofctl, group_id=None, **kwargs): + if group_id == "ALL": + group_id = None - def get_flow_stats(self, req, dpid, **_kwargs): - - if req.body == '': - flow = {} + return ofctl.get_group_stats(dp, self.waiters, group_id) + @stats_method + def get_port_desc(self, req, dp, ofctl, port_no=None, **kwargs): + if dp.ofproto.OFP_VERSION < ofproto_v1_5.OFP_VERSION: + return ofctl.get_port_desc(dp, self.waiters) else: - - try: - flow = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - flows = _ofctl.get_flow_stats(dp, self.waiters, flow) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(flows) - return Response(content_type='application/json', body=body) - - def get_aggregate_flow_stats(self, req, dpid, **_kwargs): - - if req.body == '': - flow = {} - - else: - try: - flow = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - flows = _ofctl.get_aggregate_flow_stats(dp, self.waiters, flow) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(flows) - return Response(content_type='application/json', body=body) - - def get_table_stats(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - ports = _ofctl.get_table_stats(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(ports) - return Response(content_type='application/json', body=body) - - def get_table_features(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - ports = _ofctl.get_table_features(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(ports) - return Response(content_type='application/json', body=body) - - def get_port_stats(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - ports = _ofctl.get_port_stats(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(ports) - return Response(content_type='application/json', body=body) - - def get_queue_stats(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - queues = _ofctl.get_queue_stats(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(queues) - return Response(content_type='application/json', body=body) - - def get_queue_config(self, req, dpid, port, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - if type(port) == str and not port.isdigit(): - LOG.debug('invalid port %s', port) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - port = int(port) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - queues = _ofctl.get_queue_config(dp, port, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(queues) - return Response(content_type='application/json', body=body) - - def get_meter_features(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_meter_features'): - meters = _ofctl.get_meter_features(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(meters) - return Response(content_type='application/json', body=body) - - def get_meter_config(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_meter_config'): - meters = _ofctl.get_meter_config(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(meters) - return Response(content_type='application/json', body=body) - - def get_meter_stats(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_meter_stats'): - meters = _ofctl.get_meter_stats(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(meters) - return Response(content_type='application/json', body=body) - - def get_group_features(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_group_features'): - groups = _ofctl.get_group_features(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(groups) - return Response(content_type='application/json', body=body) - - def get_group_desc(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_group_desc'): - groups = _ofctl.get_group_desc(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(groups) - return Response(content_type='application/json', body=body) - - def get_group_stats(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'get_group_stats'): - groups = _ofctl.get_group_stats(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - body = json.dumps(groups) - return Response(content_type='application/json', body=body) - - def get_port_desc(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - groups = _ofctl.get_port_desc(dp, self.waiters) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - body = json.dumps(groups) - return Response(content_type='application/json', body=body) - - def mod_flow_entry(self, req, cmd, **_kwargs): - - try: - flow = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - dpid = flow.get('dpid') - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - if cmd == 'add': - cmd = dp.ofproto.OFPFC_ADD - elif cmd == 'modify': - cmd = dp.ofproto.OFPFC_MODIFY - elif cmd == 'modify_strict': - cmd = dp.ofproto.OFPFC_MODIFY_STRICT - elif cmd == 'delete': - cmd = dp.ofproto.OFPFC_DELETE - elif cmd == 'delete_strict': - cmd = dp.ofproto.OFPFC_DELETE_STRICT - else: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - _ofctl.mod_flow_entry(dp, flow, cmd) - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - return Response(status=200) - - def delete_flow_entry(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - - if ofproto_v1_0.OFP_VERSION == _ofp_version: + return ofctl.get_port_desc(dp, self.waiters, port_no) + + @command_method + def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPFC_ADD, + 'modify': dp.ofproto.OFPFC_MODIFY, + 'modify_strict': dp.ofproto.OFPFC_MODIFY_STRICT, + 'delete': dp.ofproto.OFPFC_DELETE, + 'delete_strict': dp.ofproto.OFPFC_DELETE_STRICT, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) + + ofctl.mod_flow_entry(dp, flow, mod_cmd) + + @command_method + def delete_flow_entry(self, req, dp, ofctl, flow, **kwargs): + if ofproto_v1_0.OFP_VERSION == dp.ofproto.OFP_VERSION: flow = {} else: flow = {'table_id': dp.ofproto.OFPTT_ALL} - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - _ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) + ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) + @command_method + def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPMC_ADD, + 'modify': dp.ofproto.OFPMC_MODIFY, + 'delete': dp.ofproto.OFPMC_DELETE, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) - return Response(status=200) + ofctl.mod_meter_entry(dp, meter, mod_cmd) - def mod_meter_entry(self, req, cmd, **_kwargs): + @command_method + def mod_group_entry(self, req, dp, ofctl, group, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPGC_ADD, + 'modify': dp.ofproto.OFPGC_MODIFY, + 'delete': dp.ofproto.OFPGC_DELETE, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) - try: - flow = ast.literal_eval(req.body) + ofctl.mod_group_entry(dp, group, mod_cmd) - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - dpid = flow.get('dpid') - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - if cmd == 'add': - cmd = dp.ofproto.OFPMC_ADD - elif cmd == 'modify': - cmd = dp.ofproto.OFPMC_MODIFY - elif cmd == 'delete': - cmd = dp.ofproto.OFPMC_DELETE - else: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'mod_meter_entry'): - _ofctl.mod_meter_entry(dp, flow, cmd) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - return Response(status=200) - - def mod_group_entry(self, req, cmd, **_kwargs): - - try: - group = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - dpid = group.get('dpid') - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - if cmd == 'add': - cmd = dp.ofproto.OFPGC_ADD - elif cmd == 'modify': - cmd = dp.ofproto.OFPGC_MODIFY - elif cmd == 'delete': - cmd = dp.ofproto.OFPGC_DELETE - else: - return Response(status=404) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'mod_group_entry'): - _ofctl.mod_group_entry(dp, group, cmd) - - else: - LOG.debug('Unsupported OF protocol or \ - request not supported in this OF protocol version') - return Response(status=501) - - return Response(status=200) - - def mod_port_behavior(self, req, cmd, **_kwargs): - - try: - port_config = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - dpid = port_config.get('dpid') - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - port_no = port_config.get('port_no', 0) - if type(port_no) == str and not port_no.isdigit(): - LOG.debug('invalid port_no %s', port_no) - return Response(status=400) - - port_info = self.dpset.port_state[int(dpid)].get(port_no) + @command_method + def mod_port_behavior(self, req, dp, ofctl, port_config, cmd, **kwargs): + port_no = port_config.get('port_no', None) + port_no = int(str(port_no), 0) + port_info = self.dpset.port_state[int(dp.id)].get(port_no) if port_info: port_config.setdefault('hw_addr', port_info.hw_addr) - port_config.setdefault('advertise', port_info.advertised) + if dp.ofproto.OFP_VERSION < ofproto_v1_4.OFP_VERSION: + port_config.setdefault('advertise', port_info.advertised) + else: + port_config.setdefault('properties', port_info.properties) else: - return Response(status=404) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) + raise PortNotFoundError(port_no=port_no) if cmd != 'modify': - return Response(status=404) + raise CommandNotFoundError(cmd=cmd) - _ofp_version = dp.ofproto.OFP_VERSION + ofctl.mod_port_behavior(dp, port_config) - _ofctl = supported_ofctl.get(_ofp_version, None) - if _ofctl is not None: - _ofctl.mod_port_behavior(dp, port_config) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - return Response(status=200) - - def send_experimenter(self, req, dpid, **_kwargs): - - if type(dpid) == str and not dpid.isdigit(): - LOG.debug('invalid dpid %s', dpid) - return Response(status=400) - - dp = self.dpset.get(int(dpid)) - - if dp is None: - return Response(status=404) - - try: - exp = ast.literal_eval(req.body) - - except SyntaxError: - LOG.debug('invalid syntax %s', req.body) - return Response(status=400) - - _ofp_version = dp.ofproto.OFP_VERSION - _ofctl = supported_ofctl.get(_ofp_version, None) - - if _ofctl is not None and hasattr(_ofctl, 'send_experimenter'): - _ofctl.send_experimenter(dp, exp) - - else: - LOG.debug('Unsupported OF protocol') - return Response(status=501) - - return Response(status=200) + @command_method + def send_experimenter(self, req, dp, ofctl, exp, **kwargs): + ofctl.send_experimenter(dp, exp) class RestStatsApi(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION, ofproto_v1_2.OFP_VERSION, - ofproto_v1_3.OFP_VERSION] + ofproto_v1_3.OFP_VERSION, + ofproto_v1_4.OFP_VERSION, + ofproto_v1_5.OFP_VERSION] _CONTEXTS = { 'dpset': dpset.DPSet, 'wsgi': WSGIApplication @@ -816,6 +522,11 @@ class RestStatsApi(app_manager.RyuApp): controller=StatsController, action='get_desc_stats', conditions=dict(method=['GET'])) + uri = path + '/flowdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_flow_stats', + conditions=dict(method=['GET', 'POST'])) + uri = path + '/flow/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_flow_stats', @@ -842,16 +553,51 @@ class RestStatsApi(app_manager.RyuApp): controller=StatsController, action='get_port_stats', conditions=dict(method=['GET'])) + uri = path + '/port/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_stats', + conditions=dict(method=['GET'])) + uri = path + '/queue/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_stats', conditions=dict(method=['GET'])) + uri = path + '/queue/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_stats', + conditions=dict(method=['GET'])) + + uri = path + '/queue/{dpid}/{port}/{queue_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_stats', + conditions=dict(method=['GET'])) + + uri = path + '/queueconfig/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_config', + conditions=dict(method=['GET'])) + uri = path + '/queueconfig/{dpid}/{port}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_config', conditions=dict(method=['GET'])) + uri = path + '/queuedesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + + uri = path + '/queuedesc/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + + uri = path + '/queuedesc/{dpid}/{port}/{queue}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + uri = path + '/meterfeatures/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_features', @@ -862,11 +608,31 @@ class RestStatsApi(app_manager.RyuApp): controller=StatsController, action='get_meter_config', conditions=dict(method=['GET'])) + uri = path + '/meterconfig/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_config', + conditions=dict(method=['GET'])) + + uri = path + '/meterdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_desc', + conditions=dict(method=['GET'])) + + uri = path + '/meterdesc/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_desc', + conditions=dict(method=['GET'])) + uri = path + '/meter/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_stats', conditions=dict(method=['GET'])) + uri = path + '/meter/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_stats', + conditions=dict(method=['GET'])) + uri = path + '/groupfeatures/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_group_features', @@ -877,16 +643,31 @@ class RestStatsApi(app_manager.RyuApp): controller=StatsController, action='get_group_desc', conditions=dict(method=['GET'])) + uri = path + '/groupdesc/{dpid}/{group_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_desc', + conditions=dict(method=['GET'])) + uri = path + '/group/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_group_stats', conditions=dict(method=['GET'])) + uri = path + '/group/{dpid}/{group_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_stats', + conditions=dict(method=['GET'])) + uri = path + '/portdesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_port_desc', conditions=dict(method=['GET'])) + uri = path + '/portdesc/{dpid}/{port_no}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_desc', + conditions=dict(method=['GET'])) + uri = path + '/flowentry/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_flow_entry', @@ -925,6 +706,7 @@ class RestStatsApi(app_manager.RyuApp): ofp_event.EventOFPTableFeaturesStatsReply, ofp_event.EventOFPPortStatsReply, ofp_event.EventOFPQueueStatsReply, + ofp_event.EventOFPQueueDescStatsReply, ofp_event.EventOFPMeterStatsReply, ofp_event.EventOFPMeterFeaturesStatsReply, ofp_event.EventOFPMeterConfigStatsReply, @@ -949,7 +731,7 @@ class RestStatsApi(app_manager.RyuApp): flags = dp.ofproto.OFPSF_REPLY_MORE elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION: flags = dp.ofproto.OFPSF_REPLY_MORE - elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION: + elif dp.ofproto.OFP_VERSION >= ofproto_v1_3.OFP_VERSION: flags = dp.ofproto.OFPMPF_REPLY_MORE if msg.flags & flags: diff --git a/ryu/app/rest_conf_switch.py b/ryu/app/rest_conf_switch.py index e397a37b..9767f363 100644 --- a/ryu/app/rest_conf_switch.py +++ b/ryu/app/rest_conf_switch.py @@ -111,7 +111,11 @@ class ConfSwitchController(ControllerBase): def set_key(self, req, dpid, key, **_kwargs): def _set_val(dpid, key): - val = json.loads(req.body) + try: + val = req.json if req.body else {} + except ValueError: + return Response(status=http_client.BAD_REQUEST, + body='invalid syntax %s' % req.body) self.conf_switch.set_key(dpid, key, val) return None diff --git a/ryu/app/rest_firewall.py b/ryu/app/rest_firewall.py index 322ddb1d..a04525f7 100644 --- a/ryu/app/rest_firewall.py +++ b/ryu/app/rest_firewall.py @@ -492,8 +492,8 @@ class FirewallController(ControllerBase): def _set_rule(self, req, switchid, vlan_id=VLANID_NONE): try: - rule = json.loads(req.body) - except SyntaxError: + rule = req.json if req.body else {} + except ValueError: FirewallController._LOGGER.debug('invalid syntax %s', req.body) return Response(status=400) @@ -516,8 +516,8 @@ class FirewallController(ControllerBase): def _delete_rule(self, req, switchid, vlan_id=VLANID_NONE): try: - ruleid = json.loads(req.body) - except SyntaxError: + ruleid = req.json if req.body else {} + except ValueError: FirewallController._LOGGER.debug('invalid syntax %s', req.body) return Response(status=400) diff --git a/ryu/app/rest_qos.py b/ryu/app/rest_qos.py index 051c96b3..89185a6e 100644 --- a/ryu/app/rest_qos.py +++ b/ryu/app/rest_qos.py @@ -506,8 +506,8 @@ class QoSController(ControllerBase): def _access_switch(self, req, switchid, vlan_id, func, waiters): try: - rest = json.loads(req.body) if req.body else {} - except SyntaxError: + rest = req.json if req.body else {} + except ValueError: QoSController._LOGGER.debug('invalid syntax %s', req.body) return Response(status=400) diff --git a/ryu/app/rest_router.py b/ryu/app/rest_router.py index 100d565e..2098ffae 100644 --- a/ryu/app/rest_router.py +++ b/ryu/app/rest_router.py @@ -376,42 +376,45 @@ class RouterController(ControllerBase): @rest_command def get_data(self, req, switch_id, **_kwargs): return self._access_router(switch_id, VLANID_NONE, - 'get_data', req.body) + 'get_data', req) # GET /router/{switch_id}/{vlan_id} @rest_command def get_vlan_data(self, req, switch_id, vlan_id, **_kwargs): return self._access_router(switch_id, vlan_id, - 'get_data', req.body) + 'get_data', req) # POST /router/{switch_id} @rest_command def set_data(self, req, switch_id, **_kwargs): return self._access_router(switch_id, VLANID_NONE, - 'set_data', req.body) + 'set_data', req) # POST /router/{switch_id}/{vlan_id} @rest_command def set_vlan_data(self, req, switch_id, vlan_id, **_kwargs): return self._access_router(switch_id, vlan_id, - 'set_data', req.body) + 'set_data', req) # DELETE /router/{switch_id} @rest_command def delete_data(self, req, switch_id, **_kwargs): return self._access_router(switch_id, VLANID_NONE, - 'delete_data', req.body) + 'delete_data', req) # DELETE /router/{switch_id}/{vlan_id} @rest_command def delete_vlan_data(self, req, switch_id, vlan_id, **_kwargs): return self._access_router(switch_id, vlan_id, - 'delete_data', req.body) + 'delete_data', req) - def _access_router(self, switch_id, vlan_id, func, rest_param): + def _access_router(self, switch_id, vlan_id, func, req): rest_message = [] routers = self._get_router(switch_id) - param = json.loads(rest_param) if rest_param else {} + try: + param = req.json if req.body else {} + except ValueError: + raise SyntaxError('invalid syntax %s', req.body) for router in routers.values(): function = getattr(router, func) data = function(vlan_id, param, self.waiters) diff --git a/ryu/base/app_manager.py b/ryu/base/app_manager.py index 3d5d8959..f6842591 100644 --- a/ryu/base/app_manager.py +++ b/ryu/base/app_manager.py @@ -158,6 +158,7 @@ class RyuApp(object): self.threads = [] self.main_thread = None self.events = hub.Queue(128) + self._events_sem = hub.BoundedSemaphore(self.events.maxsize) if hasattr(self.__class__, 'LOGGER_NAME'): self.logger = logging.getLogger(self.__class__.LOGGER_NAME) else: @@ -280,13 +281,25 @@ class RyuApp(object): def _event_loop(self): while self.is_active or not self.events.empty(): ev, state = self.events.get() + self._events_sem.release() if ev == self._event_stop: continue handlers = self.get_handlers(ev, state) for handler in handlers: - handler(ev) + try: + handler(ev) + except hub.TaskExit: + # Normal exit. + # Propagate upwards, so we leave the event loop. + raise + except: + LOG.exception('%s: Exception occurred during handler processing. ' + 'Backtrace from offending handler ' + '[%s] servicing event [%s] follows.', + self.name, handler.__name__, ev.__class__.__name__) def _send_event(self, ev, state): + self._events_sem.acquire() self.events.put((ev, state)) def send_event(self, name, ev, state=None): @@ -336,7 +349,7 @@ class RyuApp(object): class AppManager(object): - # singletone + # singleton _instance = None @staticmethod @@ -373,6 +386,7 @@ class AppManager(object): self.applications = {} self.contexts_cls = {} self.contexts = {} + self.close_sem = hub.Semaphore() def load_app(self, name): mod = utils.import_module(name) @@ -520,7 +534,7 @@ class AppManager(object): self._close(app) events = app.events if not events.empty(): - app.logger.debug('%s events remians %d', app.name, events.qsize()) + app.logger.debug('%s events remains %d', app.name, events.qsize()) def close(self): def close_all(close_dict): @@ -528,7 +542,10 @@ class AppManager(object): self._close(app) close_dict.clear() - for app_name in list(self.applications.keys()): - self.uninstantiate(app_name) - assert not self.applications - close_all(self.contexts) + # This semaphore prevents parallel execution of this function, + # as run_apps's finally clause starts another close() call. + with self.close_sem: + for app_name in list(self.applications.keys()): + self.uninstantiate(app_name) + assert not self.applications + close_all(self.contexts) diff --git a/ryu/contrib/__init__.py b/ryu/contrib/__init__.py index b79831ee..6d274af0 100644 --- a/ryu/contrib/__init__.py +++ b/ryu/contrib/__init__.py @@ -1,7 +1,9 @@ import sys + _orig_sys_path = None + def update_module_path(): # Adjust module loading path for third party libraries import os @@ -16,6 +18,7 @@ def update_module_path(): sys.path.remove(path) sys.path.insert(0, path) # prioritize our own copy than system's + def restore_module_path(): global _orig_sys_path diff --git a/ryu/contrib/ovs/__init__.py b/ryu/contrib/ovs/__init__.py deleted file mode 100644 index 218d8921..00000000 --- a/ryu/contrib/ovs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# This file intentionally left blank. diff --git a/ryu/contrib/ovs/daemon.py b/ryu/contrib/ovs/daemon.py deleted file mode 100644 index 650d2504..00000000 --- a/ryu/contrib/ovs/daemon.py +++ /dev/null @@ -1,537 +0,0 @@ -# Copyright (c) 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import fcntl -import os -import resource -import signal -import sys -import time - -import ovs.dirs -import ovs.fatal_signal -#import ovs.lockfile -import ovs.process -import ovs.socket_util -import ovs.timeval -import ovs.util -import ovs.vlog - -vlog = ovs.vlog.Vlog("daemon") - -# --detach: Should we run in the background? -_detach = False - -# --pidfile: Name of pidfile (null if none). -_pidfile = None - -# Our pidfile's inode and device, if we have created one. -_pidfile_dev = None -_pidfile_ino = None - -# --overwrite-pidfile: Create pidfile even if one already exists and is locked? -_overwrite_pidfile = False - -# --no-chdir: Should we chdir to "/"? -_chdir = True - -# --monitor: Should a supervisory process monitor the daemon and restart it if -# it dies due to an error signal? -_monitor = False - -# File descriptor used by daemonize_start() and daemonize_complete(). -_daemonize_fd = None - -RESTART_EXIT_CODE = 5 - - -def make_pidfile_name(name): - """Returns the file name that would be used for a pidfile if 'name' were - provided to set_pidfile().""" - if name is None or name == "": - return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME) - else: - return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name) - - -def set_pidfile(name): - """Sets up a following call to daemonize() to create a pidfile named - 'name'. If 'name' begins with '/', then it is treated as an absolute path. - Otherwise, it is taken relative to ovs.util.RUNDIR, which is - $(prefix)/var/run by default. - - If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is - used.""" - global _pidfile - _pidfile = make_pidfile_name(name) - - -def get_pidfile(): - """Returns an absolute path to the configured pidfile, or None if no - pidfile is configured.""" - return _pidfile - - -def set_no_chdir(): - """Sets that we do not chdir to "/".""" - global _chdir - _chdir = False - - -def is_chdir_enabled(): - """Will we chdir to "/" as part of daemonizing?""" - return _chdir - - -def ignore_existing_pidfile(): - """Normally, daemonize() or daemonize_start() will terminate the program - with a message if a locked pidfile already exists. If this function is - called, an existing pidfile will be replaced, with a warning.""" - global _overwrite_pidfile - _overwrite_pidfile = True - - -def set_detach(): - """Sets up a following call to daemonize() to detach from the foreground - session, running this process in the background.""" - global _detach - _detach = True - - -def get_detach(): - """Will daemonize() really detach?""" - return _detach - - -def set_monitor(): - """Sets up a following call to daemonize() to fork a supervisory process to - monitor the daemon and restart it if it dies due to an error signal.""" - global _monitor - _monitor = True - - -def _fatal(msg): - vlog.err(msg) - sys.stderr.write("%s\n" % msg) - sys.exit(1) - - -def _make_pidfile(): - """If a pidfile has been configured, creates it and stores the running - process's pid in it. Ensures that the pidfile will be deleted when the - process exits.""" - pid = os.getpid() - - # Create a temporary pidfile. - tmpfile = "%s.tmp%d" % (_pidfile, pid) - ovs.fatal_signal.add_file_to_unlink(tmpfile) - try: - # This is global to keep Python from garbage-collecting and - # therefore closing our file after this function exits. That would - # unlock the lock for us, and we don't want that. - global file_handle - - file_handle = open(tmpfile, "w") - except IOError, e: - _fatal("%s: create failed (%s)" % (tmpfile, e.strerror)) - - try: - s = os.fstat(file_handle.fileno()) - except IOError, e: - _fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror)) - - try: - file_handle.write("%s\n" % pid) - file_handle.flush() - except OSError, e: - _fatal("%s: write failed: %s" % (tmpfile, e.strerror)) - - try: - fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError, e: - _fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror)) - - # Rename or link it to the correct name. - if _overwrite_pidfile: - try: - os.rename(tmpfile, _pidfile) - except OSError, e: - _fatal("failed to rename \"%s\" to \"%s\" (%s)" - % (tmpfile, _pidfile, e.strerror)) - else: - while True: - try: - os.link(tmpfile, _pidfile) - error = 0 - except OSError, e: - error = e.errno - if error == errno.EEXIST: - _check_already_running() - elif error != errno.EINTR: - break - if error: - _fatal("failed to link \"%s\" as \"%s\" (%s)" - % (tmpfile, _pidfile, os.strerror(error))) - - # Ensure that the pidfile will get deleted on exit. - ovs.fatal_signal.add_file_to_unlink(_pidfile) - - # Delete the temporary pidfile if it still exists. - if not _overwrite_pidfile: - error = ovs.fatal_signal.unlink_file_now(tmpfile) - if error: - _fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error))) - - global _pidfile_dev - global _pidfile_ino - _pidfile_dev = s.st_dev - _pidfile_ino = s.st_ino - - -def daemonize(): - """If configured with set_pidfile() or set_detach(), creates the pid file - and detaches from the foreground session.""" - daemonize_start() - daemonize_complete() - - -def _waitpid(pid, options): - while True: - try: - return os.waitpid(pid, options) - except OSError, e: - if e.errno == errno.EINTR: - pass - return -e.errno, 0 - - -def _fork_and_wait_for_startup(): - try: - rfd, wfd = os.pipe() - except OSError, e: - sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno)) - sys.exit(1) - - try: - pid = os.fork() - except OSError, e: - sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno)) - sys.exit(1) - - if pid > 0: - # Running in parent process. - os.close(wfd) - ovs.fatal_signal.fork() - while True: - try: - s = os.read(rfd, 1) - error = 0 - except OSError, e: - s = "" - error = e.errno - if error != errno.EINTR: - break - if len(s) != 1: - retval, status = _waitpid(pid, 0) - if retval == pid: - if os.WIFEXITED(status) and os.WEXITSTATUS(status): - # Child exited with an error. Convey the same error to - # our parent process as a courtesy. - sys.exit(os.WEXITSTATUS(status)) - else: - sys.stderr.write("fork child failed to signal " - "startup (%s)\n" - % ovs.process.status_msg(status)) - else: - assert retval < 0 - sys.stderr.write("waitpid failed (%s)\n" - % os.strerror(-retval)) - sys.exit(1) - - os.close(rfd) - else: - # Running in parent process. - os.close(rfd) - ovs.timeval.postfork() - #ovs.lockfile.postfork() - - global _daemonize_fd - _daemonize_fd = wfd - return pid - - -def _fork_notify_startup(fd): - if fd is not None: - error, bytes_written = ovs.socket_util.write_fully(fd, "0") - if error: - sys.stderr.write("could not write to pipe\n") - sys.exit(1) - os.close(fd) - - -def _should_restart(status): - global RESTART_EXIT_CODE - - if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE: - return True - - if os.WIFSIGNALED(status): - for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL", - "SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"): - if os.WTERMSIG(status) == getattr(signal, signame, None): - return True - return False - - -def _monitor_daemon(daemon_pid): - # XXX should log daemon's stderr output at startup time - # XXX should use setproctitle module if available - last_restart = None - while True: - retval, status = _waitpid(daemon_pid, 0) - if retval < 0: - sys.stderr.write("waitpid failed\n") - sys.exit(1) - elif retval == daemon_pid: - status_msg = ("pid %d died, %s" - % (daemon_pid, ovs.process.status_msg(status))) - - if _should_restart(status): - if os.WCOREDUMP(status): - # Disable further core dumps to save disk space. - try: - resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) - except resource.error: - vlog.warn("failed to disable core dumps") - - # Throttle restarts to no more than once every 10 seconds. - if (last_restart is not None and - ovs.timeval.msec() < last_restart + 10000): - vlog.warn("%s, waiting until 10 seconds since last " - "restart" % status_msg) - while True: - now = ovs.timeval.msec() - wakeup = last_restart + 10000 - if now > wakeup: - break - print "sleep %f" % ((wakeup - now) / 1000.0) - time.sleep((wakeup - now) / 1000.0) - last_restart = ovs.timeval.msec() - - vlog.err("%s, restarting" % status_msg) - daemon_pid = _fork_and_wait_for_startup() - if not daemon_pid: - break - else: - vlog.info("%s, exiting" % status_msg) - sys.exit(0) - - # Running in new daemon process. - - -def _close_standard_fds(): - """Close stdin, stdout, stderr. If we're started from e.g. an SSH session, - then this keeps us from holding that session open artificially.""" - null_fd = ovs.socket_util.get_null_fd() - if null_fd >= 0: - os.dup2(null_fd, 0) - os.dup2(null_fd, 1) - os.dup2(null_fd, 2) - - -def daemonize_start(): - """If daemonization is configured, then starts daemonization, by forking - and returning in the child process. The parent process hangs around until - the child lets it know either that it completed startup successfully (by - calling daemon_complete()) or that it failed to start up (by exiting with a - nonzero exit code).""" - - if _detach: - if _fork_and_wait_for_startup() > 0: - # Running in parent process. - sys.exit(0) - # Running in daemon or monitor process. - - if _monitor: - saved_daemonize_fd = _daemonize_fd - daemon_pid = _fork_and_wait_for_startup() - if daemon_pid > 0: - # Running in monitor process. - _fork_notify_startup(saved_daemonize_fd) - _close_standard_fds() - _monitor_daemon(daemon_pid) - # Running in daemon process - - if _pidfile: - _make_pidfile() - - -def daemonize_complete(): - """If daemonization is configured, then this function notifies the parent - process that the child process has completed startup successfully.""" - _fork_notify_startup(_daemonize_fd) - - if _detach: - os.setsid() - if _chdir: - os.chdir("/") - _close_standard_fds() - - -def usage(): - sys.stdout.write(""" -Daemon options: - --detach run in background as daemon - --no-chdir do not chdir to '/' - --pidfile[=FILE] create pidfile (default: %s/%s.pid) - --overwrite-pidfile with --pidfile, start even if already running -""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)) - - -def __read_pidfile(pidfile, delete_if_stale): - if _pidfile_dev is not None: - try: - s = os.stat(pidfile) - if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev: - # It's our own pidfile. We can't afford to open it, - # because closing *any* fd for a file that a process - # has locked also releases all the locks on that file. - # - # Fortunately, we know the associated pid anyhow. - return os.getpid() - except OSError: - pass - - try: - file_handle = open(pidfile, "r+") - except IOError, e: - if e.errno == errno.ENOENT and delete_if_stale: - return 0 - vlog.warn("%s: open: %s" % (pidfile, e.strerror)) - return -e.errno - - # Python fcntl doesn't directly support F_GETLK so we have to just try - # to lock it. - try: - fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) - - # pidfile exists but wasn't locked by anyone. Now we have the lock. - if not delete_if_stale: - file_handle.close() - vlog.warn("%s: pid file is stale" % pidfile) - return -errno.ESRCH - - # Is the file we have locked still named 'pidfile'? - try: - raced = False - s = os.stat(pidfile) - s2 = os.fstat(file_handle.fileno()) - if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev: - raced = True - except IOError: - raced = True - if raced: - vlog.warn("%s: lost race to delete pidfile" % pidfile) - return -errno.EALREADY - - # We won the right to delete the stale pidfile. - try: - os.unlink(pidfile) - except IOError, e: - vlog.warn("%s: failed to delete stale pidfile (%s)" - % (pidfile, e.strerror)) - return -e.errno - else: - vlog.dbg("%s: deleted stale pidfile" % pidfile) - file_handle.close() - return 0 - except IOError, e: - if e.errno not in [errno.EACCES, errno.EAGAIN]: - vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror)) - return -e.errno - - # Someone else has the pidfile locked. - try: - try: - error = int(file_handle.readline()) - except IOError, e: - vlog.warn("%s: read: %s" % (pidfile, e.strerror)) - error = -e.errno - except ValueError: - vlog.warn("%s does not contain a pid" % pidfile) - error = -errno.EINVAL - - return error - finally: - try: - file_handle.close() - except IOError: - pass - - -def read_pidfile(pidfile): - """Opens and reads a PID from 'pidfile'. Returns the positive PID if - successful, otherwise a negative errno value.""" - return __read_pidfile(pidfile, False) - - -def _check_already_running(): - pid = __read_pidfile(_pidfile, True) - if pid > 0: - _fatal("%s: already running as pid %d, aborting" % (_pidfile, pid)) - elif pid < 0: - _fatal("%s: pidfile check failed (%s), aborting" - % (_pidfile, os.strerror(pid))) - - -def add_args(parser): - """Populates 'parser', an ArgumentParser allocated using the argparse - module, with the command line arguments required by the daemon module.""" - - pidfile = make_pidfile_name(None) - - group = parser.add_argument_group(title="Daemon Options") - group.add_argument("--detach", action="store_true", - help="Run in background as a daemon.") - group.add_argument("--no-chdir", action="store_true", - help="Do not chdir to '/'.") - group.add_argument("--monitor", action="store_true", - help="Monitor %s process." % ovs.util.PROGRAM_NAME) - group.add_argument("--pidfile", nargs="?", const=pidfile, - help="Create pidfile (default %s)." % pidfile) - group.add_argument("--overwrite-pidfile", action="store_true", - help="With --pidfile, start even if already running.") - - -def handle_args(args): - """Handles daemon module settings in 'args'. 'args' is an object - containing values parsed by the parse_args() method of ArgumentParser. The - parent ArgumentParser should have been prepared by add_args() before - calling parse_args().""" - - if args.detach: - set_detach() - - if args.no_chdir: - set_no_chdir() - - if args.pidfile: - set_pidfile(args.pidfile) - - if args.overwrite_pidfile: - ignore_existing_pidfile() - - if args.monitor: - set_monitor() diff --git a/ryu/contrib/ovs/db/__init__.py b/ryu/contrib/ovs/db/__init__.py deleted file mode 100644 index 218d8921..00000000 --- a/ryu/contrib/ovs/db/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# This file intentionally left blank. diff --git a/ryu/contrib/ovs/db/data.py b/ryu/contrib/ovs/db/data.py deleted file mode 100644 index 55e7a732..00000000 --- a/ryu/contrib/ovs/db/data.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright (c) 2009, 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import uuid - -import ovs.poller -import ovs.socket_util -import ovs.json -import ovs.jsonrpc -import ovs.ovsuuid - -import ovs.db.parser -from ovs.db import error -import ovs.db.types - - -class ConstraintViolation(error.Error): - def __init__(self, msg, json=None): - error.Error.__init__(self, msg, json, tag="constraint violation") - - -def escapeCString(src): - dst = [] - for c in src: - if c in "\\\"": - dst.append("\\" + c) - elif ord(c) < 32: - if c == '\n': - dst.append('\\n') - elif c == '\r': - dst.append('\\r') - elif c == '\a': - dst.append('\\a') - elif c == '\b': - dst.append('\\b') - elif c == '\f': - dst.append('\\f') - elif c == '\t': - dst.append('\\t') - elif c == '\v': - dst.append('\\v') - else: - dst.append('\\%03o' % ord(c)) - else: - dst.append(c) - return ''.join(dst) - - -def returnUnchanged(x): - return x - - -class Atom(object): - def __init__(self, type_, value=None): - self.type = type_ - if value is not None: - self.value = value - else: - self.value = type_.default_atom() - - def __cmp__(self, other): - if not isinstance(other, Atom) or self.type != other.type: - return NotImplemented - elif self.value < other.value: - return -1 - elif self.value > other.value: - return 1 - else: - return 0 - - def __hash__(self): - return hash(self.value) - - @staticmethod - def default(type_): - """Returns the default value for the given type_, which must be an - instance of ovs.db.types.AtomicType. - - The default value for each atomic type is; - - - 0, for integer or real atoms. - - - False, for a boolean atom. - - - "", for a string atom. - - - The all-zeros UUID, for a UUID atom.""" - return Atom(type_) - - def is_default(self): - return self == self.default(self.type) - - @staticmethod - def from_json(base, json, symtab=None): - type_ = base.type - json = ovs.db.parser.float_to_int(json) - if ((type_ == ovs.db.types.IntegerType and type(json) in [int, long]) - or (type_ == ovs.db.types.RealType - and type(json) in [int, long, float]) - or (type_ == ovs.db.types.BooleanType and type(json) == bool) - or (type_ == ovs.db.types.StringType - and type(json) in [str, unicode])): - atom = Atom(type_, json) - elif type_ == ovs.db.types.UuidType: - atom = Atom(type_, ovs.ovsuuid.from_json(json, symtab)) - else: - raise error.Error("expected %s" % type_.to_string(), json) - atom.check_constraints(base) - return atom - - @staticmethod - def from_python(base, value): - value = ovs.db.parser.float_to_int(value) - if type(value) in base.type.python_types: - atom = Atom(base.type, value) - else: - raise error.Error("expected %s, got %s" % (base.type, type(value))) - atom.check_constraints(base) - return atom - - def check_constraints(self, base): - """Checks whether 'atom' meets the constraints (if any) defined in - 'base' and raises an ovs.db.error.Error if any constraint is violated. - - 'base' and 'atom' must have the same type. - Checking UUID constraints is deferred to transaction commit time, so - this function does nothing for UUID constraints.""" - assert base.type == self.type - if base.enum is not None and self not in base.enum: - raise ConstraintViolation( - "%s is not one of the allowed values (%s)" - % (self.to_string(), base.enum.to_string())) - elif base.type in [ovs.db.types.IntegerType, ovs.db.types.RealType]: - if ((base.min is None or self.value >= base.min) and - (base.max is None or self.value <= base.max)): - pass - elif base.min is not None and base.max is not None: - raise ConstraintViolation( - "%s is not in the valid range %.15g to %.15g (inclusive)" - % (self.to_string(), base.min, base.max)) - elif base.min is not None: - raise ConstraintViolation( - "%s is less than minimum allowed value %.15g" - % (self.to_string(), base.min)) - else: - raise ConstraintViolation( - "%s is greater than maximum allowed value %.15g" - % (self.to_string(), base.max)) - elif base.type == ovs.db.types.StringType: - # XXX The C version validates that the string is valid UTF-8 here. - # Do we need to do that in Python too? - s = self.value - length = len(s) - if length < base.min_length: - raise ConstraintViolation( - '"%s" length %d is less than minimum allowed length %d' - % (s, length, base.min_length)) - elif length > base.max_length: - raise ConstraintViolation( - '"%s" length %d is greater than maximum allowed ' - 'length %d' % (s, length, base.max_length)) - - def to_json(self): - if self.type == ovs.db.types.UuidType: - return ovs.ovsuuid.to_json(self.value) - else: - return self.value - - def cInitAtom(self, var): - if self.type == ovs.db.types.IntegerType: - return ['%s.integer = %d;' % (var, self.value)] - elif self.type == ovs.db.types.RealType: - return ['%s.real = %.15g;' % (var, self.value)] - elif self.type == ovs.db.types.BooleanType: - if self.value: - return ['%s.boolean = true;'] - else: - return ['%s.boolean = false;'] - elif self.type == ovs.db.types.StringType: - return ['%s.string = xstrdup("%s");' - % (var, escapeCString(self.value))] - elif self.type == ovs.db.types.UuidType: - return ovs.ovsuuid.to_c_assignment(self.value, var) - - def toEnglish(self, escapeLiteral=returnUnchanged): - if self.type == ovs.db.types.IntegerType: - return '%d' % self.value - elif self.type == ovs.db.types.RealType: - return '%.15g' % self.value - elif self.type == ovs.db.types.BooleanType: - if self.value: - return 'true' - else: - return 'false' - elif self.type == ovs.db.types.StringType: - return escapeLiteral(self.value) - elif self.type == ovs.db.types.UuidType: - return self.value.value - - __need_quotes_re = re.compile("$|true|false|[^_a-zA-Z]|.*[^-._a-zA-Z]") - - @staticmethod - def __string_needs_quotes(s): - return Atom.__need_quotes_re.match(s) - - def to_string(self): - if self.type == ovs.db.types.IntegerType: - return '%d' % self.value - elif self.type == ovs.db.types.RealType: - return '%.15g' % self.value - elif self.type == ovs.db.types.BooleanType: - if self.value: - return 'true' - else: - return 'false' - elif self.type == ovs.db.types.StringType: - if Atom.__string_needs_quotes(self.value): - return ovs.json.to_string(self.value) - else: - return self.value - elif self.type == ovs.db.types.UuidType: - return str(self.value) - - @staticmethod - def new(x): - if type(x) in [int, long]: - t = ovs.db.types.IntegerType - elif type(x) == float: - t = ovs.db.types.RealType - elif x in [False, True]: - t = ovs.db.types.BooleanType - elif type(x) in [str, unicode]: - t = ovs.db.types.StringType - elif isinstance(x, uuid): - t = ovs.db.types.UuidType - else: - raise TypeError - return Atom(t, x) - - -class Datum(object): - def __init__(self, type_, values={}): - self.type = type_ - self.values = values - - def __cmp__(self, other): - if not isinstance(other, Datum): - return NotImplemented - elif self.values < other.values: - return -1 - elif self.values > other.values: - return 1 - else: - return 0 - - __hash__ = None - - def __contains__(self, item): - return item in self.values - - def copy(self): - return Datum(self.type, dict(self.values)) - - @staticmethod - def default(type_): - if type_.n_min == 0: - values = {} - elif type_.is_map(): - values = {type_.key.default(): type_.value.default()} - else: - values = {type_.key.default(): None} - return Datum(type_, values) - - def is_default(self): - return self == Datum.default(self.type) - - def check_constraints(self): - """Checks that each of the atoms in 'datum' conforms to the constraints - specified by its 'type' and raises an ovs.db.error.Error. - - This function is not commonly useful because the most ordinary way to - obtain a datum is ultimately via Datum.from_json() or Atom.from_json(), - which check constraints themselves.""" - for keyAtom, valueAtom in self.values.iteritems(): - keyAtom.check_constraints(self.type.key) - if valueAtom is not None: - valueAtom.check_constraints(self.type.value) - - @staticmethod - def from_json(type_, json, symtab=None): - """Parses 'json' as a datum of the type described by 'type'. If - successful, returns a new datum. On failure, raises an - ovs.db.error.Error. - - Violations of constraints expressed by 'type' are treated as errors. - - If 'symtab' is nonnull, then named UUIDs in 'symtab' are accepted. - Refer to ovsdb/SPECS for information about this, and for the syntax - that this function accepts.""" - is_map = type_.is_map() - if (is_map or - (type(json) == list and len(json) > 0 and json[0] == "set")): - if is_map: - class_ = "map" - else: - class_ = "set" - - inner = ovs.db.parser.unwrap_json(json, class_, [list, tuple], - "array") - n = len(inner) - if n < type_.n_min or n > type_.n_max: - raise error.Error("%s must have %d to %d members but %d are " - "present" % (class_, type_.n_min, - type_.n_max, n), - json) - - values = {} - for element in inner: - if is_map: - key, value = ovs.db.parser.parse_json_pair(element) - keyAtom = Atom.from_json(type_.key, key, symtab) - valueAtom = Atom.from_json(type_.value, value, symtab) - else: - keyAtom = Atom.from_json(type_.key, element, symtab) - valueAtom = None - - if keyAtom in values: - if is_map: - raise error.Error("map contains duplicate key") - else: - raise error.Error("set contains duplicate") - - values[keyAtom] = valueAtom - - return Datum(type_, values) - else: - keyAtom = Atom.from_json(type_.key, json, symtab) - return Datum(type_, {keyAtom: None}) - - def to_json(self): - if self.type.is_map(): - return ["map", [[k.to_json(), v.to_json()] - for k, v in sorted(self.values.items())]] - elif len(self.values) == 1: - key = self.values.keys()[0] - return key.to_json() - else: - return ["set", [k.to_json() for k in sorted(self.values.keys())]] - - def to_string(self): - head = tail = None - if self.type.n_max > 1 or len(self.values) == 0: - if self.type.is_map(): - head = "{" - tail = "}" - else: - head = "[" - tail = "]" - - s = [] - if head: - s.append(head) - - for i, key in enumerate(sorted(self.values)): - if i: - s.append(", ") - - s.append(key.to_string()) - if self.type.is_map(): - s.append("=") - s.append(self.values[key].to_string()) - - if tail: - s.append(tail) - return ''.join(s) - - def as_list(self): - if self.type.is_map(): - return [[k.value, v.value] for k, v in self.values.iteritems()] - else: - return [k.value for k in self.values.iterkeys()] - - def as_dict(self): - return dict(self.values) - - def as_scalar(self): - if len(self.values) == 1: - if self.type.is_map(): - k, v = self.values.iteritems()[0] - return [k.value, v.value] - else: - return self.values.keys()[0].value - else: - return None - - def to_python(self, uuid_to_row): - """Returns this datum's value converted into a natural Python - representation of this datum's type, according to the following - rules: - - - If the type has exactly one value and it is not a map (that is, - self.type.is_scalar() returns True), then the value is: - - * An int or long, for an integer column. - - * An int or long or float, for a real column. - - * A bool, for a boolean column. - - * A str or unicode object, for a string column. - - * A uuid.UUID object, for a UUID column without a ref_table. - - * An object represented the referenced row, for a UUID column with - a ref_table. (For the Idl, this object will be an ovs.db.idl.Row - object.) - - If some error occurs (e.g. the database server's idea of the column - is different from the IDL's idea), then the default value for the - scalar type is used (see Atom.default()). - - - Otherwise, if the type is not a map, then the value is a Python list - whose elements have the types described above. - - - Otherwise, the type is a map, and the value is a Python dict that - maps from key to value, with key and value types determined as - described above. - - 'uuid_to_row' must be a function that takes a value and an - ovs.db.types.BaseType and translates UUIDs into row objects.""" - if self.type.is_scalar(): - value = uuid_to_row(self.as_scalar(), self.type.key) - if value is None: - return self.type.key.default() - else: - return value - elif self.type.is_map(): - value = {} - for k, v in self.values.iteritems(): - dk = uuid_to_row(k.value, self.type.key) - dv = uuid_to_row(v.value, self.type.value) - if dk is not None and dv is not None: - value[dk] = dv - return value - else: - s = set() - for k in self.values: - dk = uuid_to_row(k.value, self.type.key) - if dk is not None: - s.add(dk) - return sorted(s) - - @staticmethod - def from_python(type_, value, row_to_uuid): - """Returns a new Datum with the given ovs.db.types.Type 'type_'. The - new datum's value is taken from 'value', which must take the form - described as a valid return value from Datum.to_python() for 'type'. - - Each scalar value within 'value' is initally passed through - 'row_to_uuid', which should convert objects that represent rows (if - any) into uuid.UUID objects and return other data unchanged. - - Raises ovs.db.error.Error if 'value' is not in an appropriate form for - 'type_'.""" - d = {} - if type(value) == dict: - for k, v in value.iteritems(): - ka = Atom.from_python(type_.key, row_to_uuid(k)) - va = Atom.from_python(type_.value, row_to_uuid(v)) - d[ka] = va - elif type(value) in (list, tuple): - for k in value: - ka = Atom.from_python(type_.key, row_to_uuid(k)) - d[ka] = None - else: - ka = Atom.from_python(type_.key, row_to_uuid(value)) - d[ka] = None - - datum = Datum(type_, d) - datum.check_constraints() - if not datum.conforms_to_type(): - raise error.Error("%d values when type requires between %d and %d" - % (len(d), type_.n_min, type_.n_max)) - - return datum - - def __getitem__(self, key): - if not isinstance(key, Atom): - key = Atom.new(key) - if not self.type.is_map(): - raise IndexError - elif key not in self.values: - raise KeyError - else: - return self.values[key].value - - def get(self, key, default=None): - if not isinstance(key, Atom): - key = Atom.new(key) - if key in self.values: - return self.values[key].value - else: - return default - - def __str__(self): - return self.to_string() - - def conforms_to_type(self): - n = len(self.values) - return self.type.n_min <= n <= self.type.n_max - - def cInitDatum(self, var): - if len(self.values) == 0: - return ["ovsdb_datum_init_empty(%s);" % var] - - s = ["%s->n = %d;" % (var, len(self.values))] - s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);" - % (var, len(self.values), var)] - - for i, key in enumerate(sorted(self.values)): - s += key.cInitAtom("%s->keys[%d]" % (var, i)) - - if self.type.value: - s += ["%s->values = xmalloc(%d * sizeof *%s->values);" - % (var, len(self.values), var)] - for i, (key, value) in enumerate(sorted(self.values.items())): - s += value.cInitAtom("%s->values[%d]" % (var, i)) - else: - s += ["%s->values = NULL;" % var] - - if len(self.values) > 1: - s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);" - % (var, self.type.key.type.to_string().upper())] - - return s diff --git a/ryu/contrib/ovs/db/error.py b/ryu/contrib/ovs/db/error.py deleted file mode 100644 index d9217e41..00000000 --- a/ryu/contrib/ovs/db/error.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2009, 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ovs.json - - -class Error(Exception): - def __init__(self, msg, json=None, tag=None): - self.msg = msg - self.json = json - if tag is None: - if json is None: - self.tag = "ovsdb error" - else: - self.tag = "syntax error" - else: - self.tag = tag - - # Compose message. - syntax = "" - if self.json is not None: - syntax = 'syntax "%s": ' % ovs.json.to_string(self.json) - Exception.__init__(self, "%s%s: %s" % (syntax, self.tag, self.msg)) diff --git a/ryu/contrib/ovs/db/idl.py b/ryu/contrib/ovs/db/idl.py deleted file mode 100644 index 9e9bf0f5..00000000 --- a/ryu/contrib/ovs/db/idl.py +++ /dev/null @@ -1,1287 +0,0 @@ -# Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -import ovs.jsonrpc -import ovs.db.parser -import ovs.db.schema -from ovs.db import error -import ovs.ovsuuid -import ovs.poller -import ovs.vlog - -vlog = ovs.vlog.Vlog("idl") - -__pychecker__ = 'no-classattr no-objattrs' - - -class Idl: - """Open vSwitch Database Interface Definition Language (OVSDB IDL). - - The OVSDB IDL maintains an in-memory replica of a database. It issues RPC - requests to an OVSDB database server and parses the responses, converting - raw JSON into data structures that are easier for clients to digest. - - The IDL also assists with issuing database transactions. The client - creates a transaction, manipulates the IDL data structures, and commits or - aborts the transaction. The IDL then composes and issues the necessary - JSON-RPC requests and reports to the client whether the transaction - completed successfully. - - The client is allowed to access the following attributes directly, in a - read-only fashion: - - - 'tables': This is the 'tables' map in the ovs.db.schema.DbSchema provided - to the Idl constructor. Each ovs.db.schema.TableSchema in the map is - annotated with a new attribute 'rows', which is a dict from a uuid.UUID - to a Row object. - - The client may directly read and write the Row objects referenced by the - 'rows' map values. Refer to Row for more details. - - - 'change_seqno': A number that represents the IDL's state. When the IDL - is updated (by Idl.run()), its value changes. The sequence number can - occasionally change even if the database does not. This happens if the - connection to the database drops and reconnects, which causes the - database contents to be reloaded even if they didn't change. (It could - also happen if the database server sends out a "change" that reflects - what the IDL already thought was in the database. The database server is - not supposed to do that, but bugs could in theory cause it to do so.) - - - 'lock_name': The name of the lock configured with Idl.set_lock(), or None - if no lock is configured. - - - 'has_lock': True, if the IDL is configured to obtain a lock and owns that - lock, and False otherwise. - - Locking and unlocking happens asynchronously from the database client's - point of view, so the information is only useful for optimization - (e.g. if the client doesn't have the lock then there's no point in trying - to write to the database). - - - 'is_lock_contended': True, if the IDL is configured to obtain a lock but - the database server has indicated that some other client already owns the - requested lock, and False otherwise. - - - 'txn': The ovs.db.idl.Transaction object for the database transaction - currently being constructed, if there is one, or None otherwise. -""" - - def __init__(self, remote, schema): - """Creates and returns a connection to the database named 'db_name' on - 'remote', which should be in a form acceptable to - ovs.jsonrpc.session.open(). The connection will maintain an in-memory - replica of the remote database. - - 'schema' should be the schema for the remote database. The caller may - have cut it down by removing tables or columns that are not of - interest. The IDL will only replicate the tables and columns that - remain. The caller may also add a attribute named 'alert' to selected - remaining columns, setting its value to False; if so, then changes to - those columns will not be considered changes to the database for the - purpose of the return value of Idl.run() and Idl.change_seqno. This is - useful for columns that the IDL's client will write but not read. - - As a convenience to users, 'schema' may also be an instance of the - SchemaHelper class. - - The IDL uses and modifies 'schema' directly.""" - - assert isinstance(schema, SchemaHelper) - schema = schema.get_idl_schema() - - self.tables = schema.tables - self._db = schema - self._session = ovs.jsonrpc.Session.open(remote) - self._monitor_request_id = None - self._last_seqno = None - self.change_seqno = 0 - - # Database locking. - self.lock_name = None # Name of lock we need, None if none. - self.has_lock = False # Has db server said we have the lock? - self.is_lock_contended = False # Has db server said we can't get lock? - self._lock_request_id = None # JSON-RPC ID of in-flight lock request. - - # Transaction support. - self.txn = None - self._outstanding_txns = {} - - for table in schema.tables.itervalues(): - for column in table.columns.itervalues(): - if not hasattr(column, 'alert'): - column.alert = True - table.need_table = False - table.rows = {} - table.idl = self - - def close(self): - """Closes the connection to the database. The IDL will no longer - update.""" - self._session.close() - - def run(self): - """Processes a batch of messages from the database server. Returns - True if the database as seen through the IDL changed, False if it did - not change. The initial fetch of the entire contents of the remote - database is considered to be one kind of change. If the IDL has been - configured to acquire a database lock (with Idl.set_lock()), then - successfully acquiring the lock is also considered to be a change. - - This function can return occasional false positives, that is, report - that the database changed even though it didn't. This happens if the - connection to the database drops and reconnects, which causes the - database contents to be reloaded even if they didn't change. (It could - also happen if the database server sends out a "change" that reflects - what we already thought was in the database, but the database server is - not supposed to do that.) - - As an alternative to checking the return value, the client may check - for changes in self.change_seqno.""" - assert not self.txn - initial_change_seqno = self.change_seqno - self._session.run() - i = 0 - while i < 50: - i += 1 - if not self._session.is_connected(): - break - - seqno = self._session.get_seqno() - if seqno != self._last_seqno: - self._last_seqno = seqno - self.__txn_abort_all() - self.__send_monitor_request() - if self.lock_name: - self.__send_lock_request() - break - - msg = self._session.recv() - if msg is None: - break - if (msg.type == ovs.jsonrpc.Message.T_NOTIFY - and msg.method == "update" - and len(msg.params) == 2 - and msg.params[0] == None): - # Database contents changed. - self.__parse_update(msg.params[1]) - elif (msg.type == ovs.jsonrpc.Message.T_REPLY - and self._monitor_request_id is not None - and self._monitor_request_id == msg.id): - # Reply to our "monitor" request. - try: - self.change_seqno += 1 - self._monitor_request_id = None - self.__clear() - self.__parse_update(msg.result) - except error.Error, e: - vlog.err("%s: parse error in received schema: %s" - % (self._session.get_name(), e)) - self.__error() - elif (msg.type == ovs.jsonrpc.Message.T_REPLY - and self._lock_request_id is not None - and self._lock_request_id == msg.id): - # Reply to our "lock" request. - self.__parse_lock_reply(msg.result) - elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY - and msg.method == "locked"): - # We got our lock. - self.__parse_lock_notify(msg.params, True) - elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY - and msg.method == "stolen"): - # Someone else stole our lock. - self.__parse_lock_notify(msg.params, False) - elif msg.type == ovs.jsonrpc.Message.T_NOTIFY and msg.id == "echo": - # Reply to our echo request. Ignore it. - pass - elif (msg.type in (ovs.jsonrpc.Message.T_ERROR, - ovs.jsonrpc.Message.T_REPLY) - and self.__txn_process_reply(msg)): - # __txn_process_reply() did everything needed. - pass - else: - # This can happen if a transaction is destroyed before we - # receive the reply, so keep the log level low. - vlog.dbg("%s: received unexpected %s message" - % (self._session.get_name(), - ovs.jsonrpc.Message.type_to_string(msg.type))) - - return initial_change_seqno != self.change_seqno - - def wait(self, poller): - """Arranges for poller.block() to wake up when self.run() has something - to do or when activity occurs on a transaction on 'self'.""" - self._session.wait(poller) - self._session.recv_wait(poller) - - def has_ever_connected(self): - """Returns True, if the IDL successfully connected to the remote - database and retrieved its contents (even if the connection - subsequently dropped and is in the process of reconnecting). If so, - then the IDL contains an atomic snapshot of the database's contents - (but it might be arbitrarily old if the connection dropped). - - Returns False if the IDL has never connected or retrieved the - database's contents. If so, the IDL is empty.""" - return self.change_seqno != 0 - - def force_reconnect(self): - """Forces the IDL to drop its connection to the database and reconnect. - In the meantime, the contents of the IDL will not change.""" - self._session.force_reconnect() - - def set_lock(self, lock_name): - """If 'lock_name' is not None, configures the IDL to obtain the named - lock from the database server and to avoid modifying the database when - the lock cannot be acquired (that is, when another client has the same - lock). - - If 'lock_name' is None, drops the locking requirement and releases the - lock.""" - assert not self.txn - assert not self._outstanding_txns - - if self.lock_name and (not lock_name or lock_name != self.lock_name): - # Release previous lock. - self.__send_unlock_request() - self.lock_name = None - self.is_lock_contended = False - - if lock_name and not self.lock_name: - # Acquire new lock. - self.lock_name = lock_name - self.__send_lock_request() - - def __clear(self): - changed = False - - for table in self.tables.itervalues(): - if table.rows: - changed = True - table.rows = {} - - if changed: - self.change_seqno += 1 - - def __update_has_lock(self, new_has_lock): - if new_has_lock and not self.has_lock: - if self._monitor_request_id is None: - self.change_seqno += 1 - else: - # We're waiting for a monitor reply, so don't signal that the - # database changed. The monitor reply will increment - # change_seqno anyhow. - pass - self.is_lock_contended = False - self.has_lock = new_has_lock - - def __do_send_lock_request(self, method): - self.__update_has_lock(False) - self._lock_request_id = None - if self._session.is_connected(): - msg = ovs.jsonrpc.Message.create_request(method, [self.lock_name]) - msg_id = msg.id - self._session.send(msg) - else: - msg_id = None - return msg_id - - def __send_lock_request(self): - self._lock_request_id = self.__do_send_lock_request("lock") - - def __send_unlock_request(self): - self.__do_send_lock_request("unlock") - - def __parse_lock_reply(self, result): - self._lock_request_id = None - got_lock = type(result) == dict and result.get("locked") is True - self.__update_has_lock(got_lock) - if not got_lock: - self.is_lock_contended = True - - def __parse_lock_notify(self, params, new_has_lock): - if (self.lock_name is not None - and type(params) in (list, tuple) - and params - and params[0] == self.lock_name): - self.__update_has_lock(self, new_has_lock) - if not new_has_lock: - self.is_lock_contended = True - - def __send_monitor_request(self): - monitor_requests = {} - for table in self.tables.itervalues(): - monitor_requests[table.name] = {"columns": table.columns.keys()} - msg = ovs.jsonrpc.Message.create_request( - "monitor", [self._db.name, None, monitor_requests]) - self._monitor_request_id = msg.id - self._session.send(msg) - - def __parse_update(self, update): - try: - self.__do_parse_update(update) - except error.Error, e: - vlog.err("%s: error parsing update: %s" - % (self._session.get_name(), e)) - - def __do_parse_update(self, table_updates): - if type(table_updates) != dict: - raise error.Error(" is not an object", - table_updates) - - for table_name, table_update in table_updates.iteritems(): - table = self.tables.get(table_name) - if not table: - raise error.Error(' includes unknown ' - 'table "%s"' % table_name) - - if type(table_update) != dict: - raise error.Error(' for table "%s" is not ' - 'an object' % table_name, table_update) - - for uuid_string, row_update in table_update.iteritems(): - if not ovs.ovsuuid.is_valid_string(uuid_string): - raise error.Error(' for table "%s" ' - 'contains bad UUID "%s" as member ' - 'name' % (table_name, uuid_string), - table_update) - uuid = ovs.ovsuuid.from_string(uuid_string) - - if type(row_update) != dict: - raise error.Error(' for table "%s" ' - 'contains for %s that ' - 'is not an object' - % (table_name, uuid_string)) - - parser = ovs.db.parser.Parser(row_update, "row-update") - old = parser.get_optional("old", [dict]) - new = parser.get_optional("new", [dict]) - parser.finish() - - if not old and not new: - raise error.Error(' missing "old" and ' - '"new" members', row_update) - - if self.__process_update(table, uuid, old, new): - self.change_seqno += 1 - - def __process_update(self, table, uuid, old, new): - """Returns True if a column changed, False otherwise.""" - row = table.rows.get(uuid) - changed = False - if not new: - # Delete row. - if row: - del table.rows[uuid] - changed = True - else: - # XXX rate-limit - vlog.warn("cannot delete missing row %s from table %s" - % (uuid, table.name)) - elif not old: - # Insert row. - if not row: - row = self.__create_row(table, uuid) - changed = True - else: - # XXX rate-limit - vlog.warn("cannot add existing row %s to table %s" - % (uuid, table.name)) - if self.__row_update(table, row, new): - changed = True - else: - if not row: - row = self.__create_row(table, uuid) - changed = True - # XXX rate-limit - vlog.warn("cannot modify missing row %s in table %s" - % (uuid, table.name)) - if self.__row_update(table, row, new): - changed = True - return changed - - def __row_update(self, table, row, row_json): - changed = False - for column_name, datum_json in row_json.iteritems(): - column = table.columns.get(column_name) - if not column: - # XXX rate-limit - vlog.warn("unknown column %s updating table %s" - % (column_name, table.name)) - continue - - try: - datum = ovs.db.data.Datum.from_json(column.type, datum_json) - except error.Error, e: - # XXX rate-limit - vlog.warn("error parsing column %s in table %s: %s" - % (column_name, table.name, e)) - continue - - if datum != row._data[column_name]: - row._data[column_name] = datum - if column.alert: - changed = True - else: - # Didn't really change but the OVSDB monitor protocol always - # includes every value in a row. - pass - return changed - - def __create_row(self, table, uuid): - data = {} - for column in table.columns.itervalues(): - data[column.name] = ovs.db.data.Datum.default(column.type) - row = table.rows[uuid] = Row(self, table, uuid, data) - return row - - def __error(self): - self._session.force_reconnect() - - def __txn_abort_all(self): - while self._outstanding_txns: - txn = self._outstanding_txns.popitem()[1] - txn._status = Transaction.TRY_AGAIN - - def __txn_process_reply(self, msg): - txn = self._outstanding_txns.pop(msg.id, None) - if txn: - txn._process_reply(msg) - - -def _uuid_to_row(atom, base): - if base.ref_table: - return base.ref_table.rows.get(atom) - else: - return atom - - -def _row_to_uuid(value): - if type(value) == Row: - return value.uuid - else: - return value - - -class Row(object): - """A row within an IDL. - - The client may access the following attributes directly: - - - 'uuid': a uuid.UUID object whose value is the row's database UUID. - - - An attribute for each column in the Row's table, named for the column, - whose values are as returned by Datum.to_python() for the column's type. - - If some error occurs (e.g. the database server's idea of the column is - different from the IDL's idea), then the attribute values is the - "default" value return by Datum.default() for the column's type. (It is - important to know this because the default value may violate constraints - for the column's type, e.g. the default integer value is 0 even if column - contraints require the column's value to be positive.) - - When a transaction is active, column attributes may also be assigned new - values. Committing the transaction will then cause the new value to be - stored into the database. - - *NOTE*: In the current implementation, the value of a column is a *copy* - of the value in the database. This means that modifying its value - directly will have no useful effect. For example, the following: - row.mycolumn["a"] = "b" # don't do this - will not change anything in the database, even after commit. To modify - the column, instead assign the modified column value back to the column: - d = row.mycolumn - d["a"] = "b" - row.mycolumn = d -""" - def __init__(self, idl, table, uuid, data): - # All of the explicit references to self.__dict__ below are required - # to set real attributes with invoking self.__getattr__(). - self.__dict__["uuid"] = uuid - - self.__dict__["_idl"] = idl - self.__dict__["_table"] = table - - # _data is the committed data. It takes the following values: - # - # - A dictionary that maps every column name to a Datum, if the row - # exists in the committed form of the database. - # - # - None, if this row is newly inserted within the active transaction - # and thus has no committed form. - self.__dict__["_data"] = data - - # _changes describes changes to this row within the active transaction. - # It takes the following values: - # - # - {}, the empty dictionary, if no transaction is active or if the - # row has yet not been changed within this transaction. - # - # - A dictionary that maps a column name to its new Datum, if an - # active transaction changes those columns' values. - # - # - A dictionary that maps every column name to a Datum, if the row - # is newly inserted within the active transaction. - # - # - None, if this transaction deletes this row. - self.__dict__["_changes"] = {} - - # A dictionary whose keys are the names of columns that must be - # verified as prerequisites when the transaction commits. The values - # in the dictionary are all None. - self.__dict__["_prereqs"] = {} - - def __getattr__(self, column_name): - assert self._changes is not None - - datum = self._changes.get(column_name) - if datum is None: - if self._data is None: - raise AttributeError("%s instance has no attribute '%s'" % - (self.__class__.__name__, column_name)) - datum = self._data[column_name] - - return datum.to_python(_uuid_to_row) - - def __setattr__(self, column_name, value): - assert self._changes is not None - assert self._idl.txn - - column = self._table.columns[column_name] - try: - datum = ovs.db.data.Datum.from_python(column.type, value, - _row_to_uuid) - except error.Error, e: - # XXX rate-limit - vlog.err("attempting to write bad value to column %s (%s)" - % (column_name, e)) - return - self._idl.txn._write(self, column, datum) - - def verify(self, column_name): - """Causes the original contents of column 'column_name' in this row to - be verified as a prerequisite to completing the transaction. That is, - if 'column_name' changed in this row (or if this row was deleted) - between the time that the IDL originally read its contents and the time - that the transaction commits, then the transaction aborts and - Transaction.commit() returns Transaction.TRY_AGAIN. - - The intention is that, to ensure that no transaction commits based on - dirty reads, an application should call Row.verify() on each data item - read as part of a read-modify-write operation. - - In some cases Row.verify() reduces to a no-op, because the current - value of the column is already known: - - - If this row is a row created by the current transaction (returned - by Transaction.insert()). - - - If the column has already been modified within the current - transaction. - - Because of the latter property, always call Row.verify() *before* - modifying the column, for a given read-modify-write. - - A transaction must be in progress.""" - assert self._idl.txn - assert self._changes is not None - if not self._data or column_name in self._changes: - return - - self._prereqs[column_name] = None - - def delete(self): - """Deletes this row from its table. - - A transaction must be in progress.""" - assert self._idl.txn - assert self._changes is not None - if self._data is None: - del self._idl.txn._txn_rows[self.uuid] - self.__dict__["_changes"] = None - del self._table.rows[self.uuid] - - def increment(self, column_name): - """Causes the transaction, when committed, to increment the value of - 'column_name' within this row by 1. 'column_name' must have an integer - type. After the transaction commits successfully, the client may - retrieve the final (incremented) value of 'column_name' with - Transaction.get_increment_new_value(). - - The client could accomplish something similar by reading and writing - and verify()ing columns. However, increment() will never (by itself) - cause a transaction to fail because of a verify error. - - The intended use is for incrementing the "next_cfg" column in - the Open_vSwitch table.""" - self._idl.txn._increment(self, column_name) - - -def _uuid_name_from_uuid(uuid): - return "row%s" % str(uuid).replace("-", "_") - - -def _where_uuid_equals(uuid): - return [["_uuid", "==", ["uuid", str(uuid)]]] - - -class _InsertedRow(object): - def __init__(self, op_index): - self.op_index = op_index - self.real = None - - -class Transaction(object): - """A transaction may modify the contents of a database by modifying the - values of columns, deleting rows, inserting rows, or adding checks that - columns in the database have not changed ("verify" operations), through - Row methods. - - Reading and writing columns and inserting and deleting rows are all - straightforward. The reasons to verify columns are less obvious. - Verification is the key to maintaining transactional integrity. Because - OVSDB handles multiple clients, it can happen that between the time that - OVSDB client A reads a column and writes a new value, OVSDB client B has - written that column. Client A's write should not ordinarily overwrite - client B's, especially if the column in question is a "map" column that - contains several more or less independent data items. If client A adds a - "verify" operation before it writes the column, then the transaction fails - in case client B modifies it first. Client A will then see the new value - of the column and compose a new transaction based on the new contents - written by client B. - - When a transaction is complete, which must be before the next call to - Idl.run(), call Transaction.commit() or Transaction.abort(). - - The life-cycle of a transaction looks like this: - - 1. Create the transaction and record the initial sequence number: - - seqno = idl.change_seqno(idl) - txn = Transaction(idl) - - 2. Modify the database with Row and Transaction methods. - - 3. Commit the transaction by calling Transaction.commit(). The first call - to this function probably returns Transaction.INCOMPLETE. The client - must keep calling again along as this remains true, calling Idl.run() in - between to let the IDL do protocol processing. (If the client doesn't - have anything else to do in the meantime, it can use - Transaction.commit_block() to avoid having to loop itself.) - - 4. If the final status is Transaction.TRY_AGAIN, wait for Idl.change_seqno - to change from the saved 'seqno' (it's possible that it's already - changed, in which case the client should not wait at all), then start - over from step 1. Only a call to Idl.run() will change the return value - of Idl.change_seqno. (Transaction.commit_block() calls Idl.run().)""" - - # Status values that Transaction.commit() can return. - UNCOMMITTED = "uncommitted" # Not yet committed or aborted. - UNCHANGED = "unchanged" # Transaction didn't include any changes. - INCOMPLETE = "incomplete" # Commit in progress, please wait. - ABORTED = "aborted" # ovsdb_idl_txn_abort() called. - SUCCESS = "success" # Commit successful. - TRY_AGAIN = "try again" # Commit failed because a "verify" operation - # reported an inconsistency, due to a network - # problem, or other transient failure. Wait - # for a change, then try again. - NOT_LOCKED = "not locked" # Server hasn't given us the lock yet. - ERROR = "error" # Commit failed due to a hard error. - - @staticmethod - def status_to_string(status): - """Converts one of the status values that Transaction.commit() can - return into a human-readable string. - - (The status values are in fact such strings already, so - there's nothing to do.)""" - return status - - def __init__(self, idl): - """Starts a new transaction on 'idl' (an instance of ovs.db.idl.Idl). - A given Idl may only have a single active transaction at a time. - - A Transaction may modify the contents of a database by assigning new - values to columns (attributes of Row), deleting rows (with - Row.delete()), or inserting rows (with Transaction.insert()). It may - also check that columns in the database have not changed with - Row.verify(). - - When a transaction is complete (which must be before the next call to - Idl.run()), call Transaction.commit() or Transaction.abort().""" - assert idl.txn is None - - idl.txn = self - self._request_id = None - self.idl = idl - self.dry_run = False - self._txn_rows = {} - self._status = Transaction.UNCOMMITTED - self._error = None - self._comments = [] - self._commit_seqno = self.idl.change_seqno - - self._inc_row = None - self._inc_column = None - - self._inserted_rows = {} # Map from UUID to _InsertedRow - - def add_comment(self, comment): - """Appens 'comment' to the comments that will be passed to the OVSDB - server when this transaction is committed. (The comment will be - committed to the OVSDB log, which "ovsdb-tool show-log" can print in a - relatively human-readable form.)""" - self._comments.append(comment) - - def wait(self, poller): - """Causes poll_block() to wake up if this transaction has completed - committing.""" - if self._status not in (Transaction.UNCOMMITTED, - Transaction.INCOMPLETE): - poller.immediate_wake() - - def _substitute_uuids(self, json): - if type(json) in (list, tuple): - if (len(json) == 2 - and json[0] == 'uuid' - and ovs.ovsuuid.is_valid_string(json[1])): - uuid = ovs.ovsuuid.from_string(json[1]) - row = self._txn_rows.get(uuid, None) - if row and row._data is None: - return ["named-uuid", _uuid_name_from_uuid(uuid)] - else: - return [self._substitute_uuids(elem) for elem in json] - return json - - def __disassemble(self): - self.idl.txn = None - - for row in self._txn_rows.itervalues(): - if row._changes is None: - row._table.rows[row.uuid] = row - elif row._data is None: - del row._table.rows[row.uuid] - row.__dict__["_changes"] = {} - row.__dict__["_prereqs"] = {} - self._txn_rows = {} - - def commit(self): - """Attempts to commit 'txn'. Returns the status of the commit - operation, one of the following constants: - - Transaction.INCOMPLETE: - - The transaction is in progress, but not yet complete. The caller - should call again later, after calling Idl.run() to let the - IDL do OVSDB protocol processing. - - Transaction.UNCHANGED: - - The transaction is complete. (It didn't actually change the - database, so the IDL didn't send any request to the database - server.) - - Transaction.ABORTED: - - The caller previously called Transaction.abort(). - - Transaction.SUCCESS: - - The transaction was successful. The update made by the - transaction (and possibly other changes made by other database - clients) should already be visible in the IDL. - - Transaction.TRY_AGAIN: - - The transaction failed for some transient reason, e.g. because a - "verify" operation reported an inconsistency or due to a network - problem. The caller should wait for a change to the database, - then compose a new transaction, and commit the new transaction. - - Use Idl.change_seqno to wait for a change in the database. It is - important to use its value *before* the initial call to - Transaction.commit() as the baseline for this purpose, because - the change that one should wait for can happen after the initial - call but before the call that returns Transaction.TRY_AGAIN, and - using some other baseline value in that situation could cause an - indefinite wait if the database rarely changes. - - Transaction.NOT_LOCKED: - - The transaction failed because the IDL has been configured to - require a database lock (with Idl.set_lock()) but didn't - get it yet or has already lost it. - - Committing a transaction rolls back all of the changes that it made to - the IDL's copy of the database. If the transaction commits - successfully, then the database server will send an update and, thus, - the IDL will be updated with the committed changes.""" - # The status can only change if we're the active transaction. - # (Otherwise, our status will change only in Idl.run().) - if self != self.idl.txn: - return self._status - - # If we need a lock but don't have it, give up quickly. - if self.idl.lock_name and not self.idl.has_lock(): - self._status = Transaction.NOT_LOCKED - self.__disassemble() - return self._status - - operations = [self.idl._db.name] - - # Assert that we have the required lock (avoiding a race). - if self.idl.lock_name: - operations.append({"op": "assert", - "lock": self.idl.lock_name}) - - # Add prerequisites and declarations of new rows. - for row in self._txn_rows.itervalues(): - if row._prereqs: - rows = {} - columns = [] - for column_name in row._prereqs: - columns.append(column_name) - rows[column_name] = row._data[column_name].to_json() - operations.append({"op": "wait", - "table": row._table.name, - "timeout": 0, - "where": _where_uuid_equals(row.uuid), - "until": "==", - "columns": columns, - "rows": [rows]}) - - # Add updates. - any_updates = False - for row in self._txn_rows.itervalues(): - if row._changes is None: - if row._table.is_root: - operations.append({"op": "delete", - "table": row._table.name, - "where": _where_uuid_equals(row.uuid)}) - any_updates = True - else: - # Let ovsdb-server decide whether to really delete it. - pass - elif row._changes: - op = {"table": row._table.name} - if row._data is None: - op["op"] = "insert" - op["uuid-name"] = _uuid_name_from_uuid(row.uuid) - any_updates = True - - op_index = len(operations) - 1 - self._inserted_rows[row.uuid] = _InsertedRow(op_index) - else: - op["op"] = "update" - op["where"] = _where_uuid_equals(row.uuid) - - row_json = {} - op["row"] = row_json - - for column_name, datum in row._changes.iteritems(): - if row._data is not None or not datum.is_default(): - row_json[column_name] = ( - self._substitute_uuids(datum.to_json())) - - # If anything really changed, consider it an update. - # We can't suppress not-really-changed values earlier - # or transactions would become nonatomic (see the big - # comment inside Transaction._write()). - if (not any_updates and row._data is not None and - row._data[column_name] != datum): - any_updates = True - - if row._data is None or row_json: - operations.append(op) - - # Add increment. - if self._inc_row and any_updates: - self._inc_index = len(operations) - 1 - - operations.append({"op": "mutate", - "table": self._inc_row._table.name, - "where": self._substitute_uuids( - _where_uuid_equals(self._inc_row.uuid)), - "mutations": [[self._inc_column, "+=", 1]]}) - operations.append({"op": "select", - "table": self._inc_row._table.name, - "where": self._substitute_uuids( - _where_uuid_equals(self._inc_row.uuid)), - "columns": [self._inc_column]}) - - # Add comment. - if self._comments: - operations.append({"op": "comment", - "comment": "\n".join(self._comments)}) - - # Dry run? - if self.dry_run: - operations.append({"op": "abort"}) - - if not any_updates: - self._status = Transaction.UNCHANGED - else: - msg = ovs.jsonrpc.Message.create_request("transact", operations) - self._request_id = msg.id - if not self.idl._session.send(msg): - self.idl._outstanding_txns[self._request_id] = self - self._status = Transaction.INCOMPLETE - else: - self._status = Transaction.TRY_AGAIN - - self.__disassemble() - return self._status - - def commit_block(self): - """Attempts to commit this transaction, blocking until the commit - either succeeds or fails. Returns the final commit status, which may - be any Transaction.* value other than Transaction.INCOMPLETE. - - This function calls Idl.run() on this transaction'ss IDL, so it may - cause Idl.change_seqno to change.""" - while True: - status = self.commit() - if status != Transaction.INCOMPLETE: - return status - - self.idl.run() - - poller = ovs.poller.Poller() - self.idl.wait(poller) - self.wait(poller) - poller.block() - - def get_increment_new_value(self): - """Returns the final (incremented) value of the column in this - transaction that was set to be incremented by Row.increment. This - transaction must have committed successfully.""" - assert self._status == Transaction.SUCCESS - return self._inc_new_value - - def abort(self): - """Aborts this transaction. If Transaction.commit() has already been - called then the transaction might get committed anyhow.""" - self.__disassemble() - if self._status in (Transaction.UNCOMMITTED, - Transaction.INCOMPLETE): - self._status = Transaction.ABORTED - - def get_error(self): - """Returns a string representing this transaction's current status, - suitable for use in log messages.""" - if self._status != Transaction.ERROR: - return Transaction.status_to_string(self._status) - elif self._error: - return self._error - else: - return "no error details available" - - def __set_error_json(self, json): - if self._error is None: - self._error = ovs.json.to_string(json) - - def get_insert_uuid(self, uuid): - """Finds and returns the permanent UUID that the database assigned to a - newly inserted row, given the UUID that Transaction.insert() assigned - locally to that row. - - Returns None if 'uuid' is not a UUID assigned by Transaction.insert() - or if it was assigned by that function and then deleted by Row.delete() - within the same transaction. (Rows that are inserted and then deleted - within a single transaction are never sent to the database server, so - it never assigns them a permanent UUID.) - - This transaction must have completed successfully.""" - assert self._status in (Transaction.SUCCESS, - Transaction.UNCHANGED) - inserted_row = self._inserted_rows.get(uuid) - if inserted_row: - return inserted_row.real - return None - - def _increment(self, row, column): - assert not self._inc_row - self._inc_row = row - self._inc_column = column - - def _write(self, row, column, datum): - assert row._changes is not None - - txn = row._idl.txn - - # If this is a write-only column and the datum being written is the - # same as the one already there, just skip the update entirely. This - # is worth optimizing because we have a lot of columns that get - # periodically refreshed into the database but don't actually change - # that often. - # - # We don't do this for read/write columns because that would break - # atomicity of transactions--some other client might have written a - # different value in that column since we read it. (But if a whole - # transaction only does writes of existing values, without making any - # real changes, we will drop the whole transaction later in - # ovsdb_idl_txn_commit().) - if not column.alert and row._data.get(column.name) == datum: - new_value = row._changes.get(column.name) - if new_value is None or new_value == datum: - return - - txn._txn_rows[row.uuid] = row - row._changes[column.name] = datum.copy() - - def insert(self, table, new_uuid=None): - """Inserts and returns a new row in 'table', which must be one of the - ovs.db.schema.TableSchema objects in the Idl's 'tables' dict. - - The new row is assigned a provisional UUID. If 'uuid' is None then one - is randomly generated; otherwise 'uuid' should specify a randomly - generated uuid.UUID not otherwise in use. ovsdb-server will assign a - different UUID when 'txn' is committed, but the IDL will replace any - uses of the provisional UUID in the data to be to be committed by the - UUID assigned by ovsdb-server.""" - assert self._status == Transaction.UNCOMMITTED - if new_uuid is None: - new_uuid = uuid.uuid4() - row = Row(self.idl, table, new_uuid, None) - table.rows[row.uuid] = row - self._txn_rows[row.uuid] = row - return row - - def _process_reply(self, msg): - if msg.type == ovs.jsonrpc.Message.T_ERROR: - self._status = Transaction.ERROR - elif type(msg.result) not in (list, tuple): - # XXX rate-limit - vlog.warn('reply to "transact" is not JSON array') - else: - hard_errors = False - soft_errors = False - lock_errors = False - - ops = msg.result - for op in ops: - if op is None: - # This isn't an error in itself but indicates that some - # prior operation failed, so make sure that we know about - # it. - soft_errors = True - elif type(op) == dict: - error = op.get("error") - if error is not None: - if error == "timed out": - soft_errors = True - elif error == "not owner": - lock_errors = True - elif error == "aborted": - pass - else: - hard_errors = True - self.__set_error_json(op) - else: - hard_errors = True - self.__set_error_json(op) - # XXX rate-limit - vlog.warn("operation reply is not JSON null or object") - - if not soft_errors and not hard_errors and not lock_errors: - if self._inc_row and not self.__process_inc_reply(ops): - hard_errors = True - - for insert in self._inserted_rows.itervalues(): - if not self.__process_insert_reply(insert, ops): - hard_errors = True - - if hard_errors: - self._status = Transaction.ERROR - elif lock_errors: - self._status = Transaction.NOT_LOCKED - elif soft_errors: - self._status = Transaction.TRY_AGAIN - else: - self._status = Transaction.SUCCESS - - @staticmethod - def __check_json_type(json, types, name): - if not json: - # XXX rate-limit - vlog.warn("%s is missing" % name) - return False - elif type(json) not in types: - # XXX rate-limit - vlog.warn("%s has unexpected type %s" % (name, type(json))) - return False - else: - return True - - def __process_inc_reply(self, ops): - if self._inc_index + 2 > len(ops): - # XXX rate-limit - vlog.warn("reply does not contain enough operations for " - "increment (has %d, needs %d)" % - (len(ops), self._inc_index + 2)) - - # We know that this is a JSON object because the loop in - # __process_reply() already checked. - mutate = ops[self._inc_index] - count = mutate.get("count") - if not Transaction.__check_json_type(count, (int, long), - '"mutate" reply "count"'): - return False - if count != 1: - # XXX rate-limit - vlog.warn('"mutate" reply "count" is %d instead of 1' % count) - return False - - select = ops[self._inc_index + 1] - rows = select.get("rows") - if not Transaction.__check_json_type(rows, (list, tuple), - '"select" reply "rows"'): - return False - if len(rows) != 1: - # XXX rate-limit - vlog.warn('"select" reply "rows" has %d elements ' - 'instead of 1' % len(rows)) - return False - row = rows[0] - if not Transaction.__check_json_type(row, (dict,), - '"select" reply row'): - return False - column = row.get(self._inc_column) - if not Transaction.__check_json_type(column, (int, long), - '"select" reply inc column'): - return False - self._inc_new_value = column - return True - - def __process_insert_reply(self, insert, ops): - if insert.op_index >= len(ops): - # XXX rate-limit - vlog.warn("reply does not contain enough operations " - "for insert (has %d, needs %d)" - % (len(ops), insert.op_index)) - return False - - # We know that this is a JSON object because the loop in - # __process_reply() already checked. - reply = ops[insert.op_index] - json_uuid = reply.get("uuid") - if not Transaction.__check_json_type(json_uuid, (tuple, list), - '"insert" reply "uuid"'): - return False - - try: - uuid_ = ovs.ovsuuid.from_json(json_uuid) - except error.Error: - # XXX rate-limit - vlog.warn('"insert" reply "uuid" is not a JSON UUID') - return False - - insert.real = uuid_ - return True - - -class SchemaHelper(object): - """IDL Schema helper. - - This class encapsulates the logic required to generate schemas suitable - for creating 'ovs.db.idl.Idl' objects. Clients should register columns - they are interested in using register_columns(). When finished, the - get_idl_schema() function may be called. - - The location on disk of the schema used may be found in the - 'schema_location' variable.""" - - def __init__(self, location=None, schema_json=None): - """Creates a new Schema object. - - 'location' file path to ovs schema. None means default location - 'schema_json' schema in json preresentation in memory - """ - - if location and schema_json: - raise ValueError("both location and schema_json can't be " - "specified. it's ambiguous.") - if schema_json is None: - if location is None: - location = "%s/vswitch.ovsschema" % ovs.dirs.PKGDATADIR - schema_json = ovs.json.from_file(location) - - self.schema_json = schema_json - self._tables = {} - self._all = False - - def register_columns(self, table, columns): - """Registers interest in the given 'columns' of 'table'. Future calls - to get_idl_schema() will include 'table':column for each column in - 'columns'. This function automatically avoids adding duplicate entries - to the schema. - - 'table' must be a string. - 'columns' must be a list of strings. - """ - - assert type(table) is str - assert type(columns) is list - - columns = set(columns) | self._tables.get(table, set()) - self._tables[table] = columns - - def register_table(self, table): - """Registers interest in the given all columns of 'table'. Future calls - to get_idl_schema() will include all columns of 'table'. - - 'table' must be a string - """ - assert type(table) is str - self._tables[table] = set() # empty set means all columns in the table - - def register_all(self): - """Registers interest in every column of every table.""" - self._all = True - - def get_idl_schema(self): - """Gets a schema appropriate for the creation of an 'ovs.db.id.IDL' - object based on columns registered using the register_columns() - function.""" - - schema = ovs.db.schema.DbSchema.from_json(self.schema_json) - self.schema_json = None - - if not self._all: - schema_tables = {} - for table, columns in self._tables.iteritems(): - schema_tables[table] = ( - self._keep_table_columns(schema, table, columns)) - - schema.tables = schema_tables - return schema - - def _keep_table_columns(self, schema, table_name, columns): - assert table_name in schema.tables - table = schema.tables[table_name] - - if not columns: - # empty set means all columns in the table - return table - - new_columns = {} - for column_name in columns: - assert type(column_name) is str - assert column_name in table.columns - - new_columns[column_name] = table.columns[column_name] - - table.columns = new_columns - return table diff --git a/ryu/contrib/ovs/db/parser.py b/ryu/contrib/ovs/db/parser.py deleted file mode 100644 index 2556becc..00000000 --- a/ryu/contrib/ovs/db/parser.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from ovs.db import error - - -class Parser(object): - def __init__(self, json, name): - self.name = name - self.json = json - if type(json) != dict: - self.__raise_error("Object expected.") - self.used = set() - - def __get(self, name, types, optional, default=None): - if name in self.json: - self.used.add(name) - member = float_to_int(self.json[name]) - if is_identifier(member) and "id" in types: - return member - if len(types) and type(member) not in types: - self.__raise_error("Type mismatch for member '%s'." % name) - return member - else: - if not optional: - self.__raise_error("Required '%s' member is missing." % name) - return default - - def get(self, name, types): - return self.__get(name, types, False) - - def get_optional(self, name, types, default=None): - return self.__get(name, types, True, default) - - def __raise_error(self, message): - raise error.Error("Parsing %s failed: %s" % (self.name, message), - self.json) - - def finish(self): - missing = set(self.json) - set(self.used) - if missing: - name = missing.pop() - if len(missing) > 1: - present = "and %d other members are" % len(missing) - elif missing: - present = "and 1 other member are" - else: - present = "is" - self.__raise_error("Member '%s' %s present but not allowed here" % - (name, present)) - - -def float_to_int(x): - # XXX still needed? - if type(x) == float: - integer = int(x) - if integer == x and -2 ** 53 <= integer < 2 ** 53: - return integer - return x - - -id_re = re.compile("[_a-zA-Z][_a-zA-Z0-9]*$") - - -def is_identifier(s): - return type(s) in [str, unicode] and id_re.match(s) - - -def json_type_to_string(type_): - if type_ == None: - return "null" - elif type_ == bool: - return "boolean" - elif type_ == dict: - return "object" - elif type_ == list: - return "array" - elif type_ in [int, long, float]: - return "number" - elif type_ in [str, unicode]: - return "string" - else: - return "" - - -def unwrap_json(json, name, types, desc): - if (type(json) not in (list, tuple) or len(json) != 2 or json[0] != name or - type(json[1]) not in types): - raise error.Error('expected ["%s", <%s>]' % (name, desc), json) - return json[1] - - -def parse_json_pair(json): - if type(json) != list or len(json) != 2: - raise error.Error("expected 2-element array", json) - return json diff --git a/ryu/contrib/ovs/db/schema.py b/ryu/contrib/ovs/db/schema.py deleted file mode 100644 index 1b5a771f..00000000 --- a/ryu/contrib/ovs/db/schema.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) 2009, 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import sys - -from ovs.db import error -import ovs.db.parser -from ovs.db import types - - -def _check_id(name, json): - if name.startswith('_'): - raise error.Error('names beginning with "_" are reserved', json) - elif not ovs.db.parser.is_identifier(name): - raise error.Error("name must be a valid id", json) - - -class DbSchema(object): - """Schema for an OVSDB database.""" - - def __init__(self, name, version, tables): - self.name = name - self.version = version - self.tables = tables - - # "isRoot" was not part of the original schema definition. Before it - # was added, there was no support for garbage collection. So, for - # backward compatibility, if the root set is empty then assume that - # every table is in the root set. - if self.__root_set_size() == 0: - for table in self.tables.itervalues(): - table.is_root = True - - # Find the "ref_table"s referenced by "ref_table_name"s. - # - # Also force certain columns to be persistent, as explained in - # __check_ref_table(). This requires 'is_root' to be known, so this - # must follow the loop updating 'is_root' above. - for table in self.tables.itervalues(): - for column in table.columns.itervalues(): - self.__follow_ref_table(column, column.type.key, "key") - self.__follow_ref_table(column, column.type.value, "value") - - def __root_set_size(self): - """Returns the number of tables in the schema's root set.""" - n_root = 0 - for table in self.tables.itervalues(): - if table.is_root: - n_root += 1 - return n_root - - @staticmethod - def from_json(json): - parser = ovs.db.parser.Parser(json, "database schema") - name = parser.get("name", ['id']) - version = parser.get_optional("version", [str, unicode]) - parser.get_optional("cksum", [str, unicode]) - tablesJson = parser.get("tables", [dict]) - parser.finish() - - if (version is not None and - not re.match('[0-9]+\.[0-9]+\.[0-9]+$', version)): - raise error.Error('schema version "%s" not in format x.y.z' - % version) - - tables = {} - for tableName, tableJson in tablesJson.iteritems(): - _check_id(tableName, json) - tables[tableName] = TableSchema.from_json(tableJson, tableName) - - return DbSchema(name, version, tables) - - def to_json(self): - # "isRoot" was not part of the original schema definition. Before it - # was added, there was no support for garbage collection. So, for - # backward compatibility, if every table is in the root set then do not - # output "isRoot" in table schemas. - default_is_root = self.__root_set_size() == len(self.tables) - - tables = {} - for table in self.tables.itervalues(): - tables[table.name] = table.to_json(default_is_root) - json = {"name": self.name, "tables": tables} - if self.version: - json["version"] = self.version - return json - - def copy(self): - return DbSchema.from_json(self.to_json()) - - def __follow_ref_table(self, column, base, base_name): - if not base or base.type != types.UuidType or not base.ref_table_name: - return - - base.ref_table = self.tables.get(base.ref_table_name) - if not base.ref_table: - raise error.Error("column %s %s refers to undefined table %s" - % (column.name, base_name, base.ref_table_name), - tag="syntax error") - - if base.is_strong_ref() and not base.ref_table.is_root: - # We cannot allow a strong reference to a non-root table to be - # ephemeral: if it is the only reference to a row, then replaying - # the database log from disk will cause the referenced row to be - # deleted, even though it did exist in memory. If there are - # references to that row later in the log (to modify it, to delete - # it, or just to point to it), then this will yield a transaction - # error. - column.persistent = True - - -class IdlSchema(DbSchema): - def __init__(self, name, version, tables, idlPrefix, idlHeader): - DbSchema.__init__(self, name, version, tables) - self.idlPrefix = idlPrefix - self.idlHeader = idlHeader - - @staticmethod - def from_json(json): - parser = ovs.db.parser.Parser(json, "IDL schema") - idlPrefix = parser.get("idlPrefix", [str, unicode]) - idlHeader = parser.get("idlHeader", [str, unicode]) - - subjson = dict(json) - del subjson["idlPrefix"] - del subjson["idlHeader"] - schema = DbSchema.from_json(subjson) - - return IdlSchema(schema.name, schema.version, schema.tables, - idlPrefix, idlHeader) - - -def column_set_from_json(json, columns): - if json is None: - return tuple(columns) - elif type(json) != list: - raise error.Error("array of distinct column names expected", json) - else: - for column_name in json: - if type(column_name) not in [str, unicode]: - raise error.Error("array of distinct column names expected", - json) - elif column_name not in columns: - raise error.Error("%s is not a valid column name" - % column_name, json) - if len(set(json)) != len(json): - # Duplicate. - raise error.Error("array of distinct column names expected", json) - return tuple([columns[column_name] for column_name in json]) - - -class TableSchema(object): - def __init__(self, name, columns, mutable=True, max_rows=sys.maxint, - is_root=True, indexes=[]): - self.name = name - self.columns = columns - self.mutable = mutable - self.max_rows = max_rows - self.is_root = is_root - self.indexes = indexes - - @staticmethod - def from_json(json, name): - parser = ovs.db.parser.Parser(json, "table schema for table %s" % name) - columns_json = parser.get("columns", [dict]) - mutable = parser.get_optional("mutable", [bool], True) - max_rows = parser.get_optional("maxRows", [int]) - is_root = parser.get_optional("isRoot", [bool], False) - indexes_json = parser.get_optional("indexes", [list], []) - parser.finish() - - if max_rows == None: - max_rows = sys.maxint - elif max_rows <= 0: - raise error.Error("maxRows must be at least 1", json) - - if not columns_json: - raise error.Error("table must have at least one column", json) - - columns = {} - for column_name, column_json in columns_json.iteritems(): - _check_id(column_name, json) - columns[column_name] = ColumnSchema.from_json(column_json, - column_name) - - indexes = [] - for index_json in indexes_json: - index = column_set_from_json(index_json, columns) - if not index: - raise error.Error("index must have at least one column", json) - elif len(index) == 1: - index[0].unique = True - for column in index: - if not column.persistent: - raise error.Error("ephemeral columns (such as %s) may " - "not be indexed" % column.name, json) - indexes.append(index) - - return TableSchema(name, columns, mutable, max_rows, is_root, indexes) - - def to_json(self, default_is_root=False): - """Returns this table schema serialized into JSON. - - The "isRoot" member is included in the JSON only if its value would - differ from 'default_is_root'. Ordinarily 'default_is_root' should be - false, because ordinarily a table would be not be part of the root set - if its "isRoot" member is omitted. However, garbage collection was not - orginally included in OVSDB, so in older schemas that do not include - any "isRoot" members, every table is implicitly part of the root set. - To serialize such a schema in a way that can be read by older OVSDB - tools, specify 'default_is_root' as True. - """ - json = {} - if not self.mutable: - json["mutable"] = False - if default_is_root != self.is_root: - json["isRoot"] = self.is_root - - json["columns"] = columns = {} - for column in self.columns.itervalues(): - if not column.name.startswith("_"): - columns[column.name] = column.to_json() - - if self.max_rows != sys.maxint: - json["maxRows"] = self.max_rows - - if self.indexes: - json["indexes"] = [] - for index in self.indexes: - json["indexes"].append([column.name for column in index]) - - return json - - -class ColumnSchema(object): - def __init__(self, name, mutable, persistent, type_): - self.name = name - self.mutable = mutable - self.persistent = persistent - self.type = type_ - self.unique = False - - @staticmethod - def from_json(json, name): - parser = ovs.db.parser.Parser(json, "schema for column %s" % name) - mutable = parser.get_optional("mutable", [bool], True) - ephemeral = parser.get_optional("ephemeral", [bool], False) - type_ = types.Type.from_json(parser.get("type", [dict, str, unicode])) - parser.finish() - - return ColumnSchema(name, mutable, not ephemeral, type_) - - def to_json(self): - json = {"type": self.type.to_json()} - if not self.mutable: - json["mutable"] = False - if not self.persistent: - json["ephemeral"] = True - return json diff --git a/ryu/contrib/ovs/db/types.py b/ryu/contrib/ovs/db/types.py deleted file mode 100644 index 5865acd7..00000000 --- a/ryu/contrib/ovs/db/types.py +++ /dev/null @@ -1,587 +0,0 @@ -# Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import uuid - -from ovs.db import error -import ovs.db.parser -import ovs.db.data -import ovs.ovsuuid - - -class AtomicType(object): - def __init__(self, name, default, python_types): - self.name = name - self.default = default - self.python_types = python_types - - @staticmethod - def from_string(s): - if s != "void": - for atomic_type in ATOMIC_TYPES: - if s == atomic_type.name: - return atomic_type - raise error.Error('"%s" is not an atomic-type' % s, s) - - @staticmethod - def from_json(json): - if type(json) not in [str, unicode]: - raise error.Error("atomic-type expected", json) - else: - return AtomicType.from_string(json) - - def __str__(self): - return self.name - - def to_string(self): - return self.name - - def to_json(self): - return self.name - - def default_atom(self): - return ovs.db.data.Atom(self, self.default) - -VoidType = AtomicType("void", None, ()) -IntegerType = AtomicType("integer", 0, (int, long)) -RealType = AtomicType("real", 0.0, (int, long, float)) -BooleanType = AtomicType("boolean", False, (bool,)) -StringType = AtomicType("string", "", (str, unicode)) -UuidType = AtomicType("uuid", ovs.ovsuuid.zero(), (uuid.UUID,)) - -ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType, - UuidType] - - -def escapeCString(src): - dst = "" - for c in src: - if c in "\\\"": - dst += "\\" + c - elif ord(c) < 32: - if c == '\n': - dst += '\\n' - elif c == '\r': - dst += '\\r' - elif c == '\a': - dst += '\\a' - elif c == '\b': - dst += '\\b' - elif c == '\f': - dst += '\\f' - elif c == '\t': - dst += '\\t' - elif c == '\v': - dst += '\\v' - else: - dst += '\\%03o' % ord(c) - else: - dst += c - return dst - - -def commafy(x): - """Returns integer x formatted in decimal with thousands set off by - commas.""" - return _commafy("%d" % x) - - -def _commafy(s): - if s.startswith('-'): - return '-' + _commafy(s[1:]) - elif len(s) <= 3: - return s - else: - return _commafy(s[:-3]) + ',' + _commafy(s[-3:]) - - -def returnUnchanged(x): - return x - - -class BaseType(object): - def __init__(self, type_, enum=None, min=None, max=None, - min_length=0, max_length=sys.maxint, ref_table_name=None): - assert isinstance(type_, AtomicType) - self.type = type_ - self.enum = enum - self.min = min - self.max = max - self.min_length = min_length - self.max_length = max_length - self.ref_table_name = ref_table_name - if ref_table_name: - self.ref_type = 'strong' - else: - self.ref_type = None - self.ref_table = None - - def default(self): - return ovs.db.data.Atom.default(self.type) - - def __eq__(self, other): - if not isinstance(other, BaseType): - return NotImplemented - return (self.type == other.type and self.enum == other.enum and - self.min == other.min and self.max == other.max and - self.min_length == other.min_length and - self.max_length == other.max_length and - self.ref_table_name == other.ref_table_name) - - def __ne__(self, other): - if not isinstance(other, BaseType): - return NotImplemented - else: - return not (self == other) - - @staticmethod - def __parse_uint(parser, name, default): - value = parser.get_optional(name, [int, long]) - if value is None: - value = default - else: - max_value = 2 ** 32 - 1 - if not (0 <= value <= max_value): - raise error.Error("%s out of valid range 0 to %d" - % (name, max_value), value) - return value - - @staticmethod - def from_json(json): - if type(json) in [str, unicode]: - return BaseType(AtomicType.from_json(json)) - - parser = ovs.db.parser.Parser(json, "ovsdb type") - atomic_type = AtomicType.from_json(parser.get("type", [str, unicode])) - - base = BaseType(atomic_type) - - enum = parser.get_optional("enum", []) - if enum is not None: - base.enum = ovs.db.data.Datum.from_json( - BaseType.get_enum_type(base.type), enum) - elif base.type == IntegerType: - base.min = parser.get_optional("minInteger", [int, long]) - base.max = parser.get_optional("maxInteger", [int, long]) - if (base.min is not None and base.max is not None - and base.min > base.max): - raise error.Error("minInteger exceeds maxInteger", json) - elif base.type == RealType: - base.min = parser.get_optional("minReal", [int, long, float]) - base.max = parser.get_optional("maxReal", [int, long, float]) - if (base.min is not None and base.max is not None - and base.min > base.max): - raise error.Error("minReal exceeds maxReal", json) - elif base.type == StringType: - base.min_length = BaseType.__parse_uint(parser, "minLength", 0) - base.max_length = BaseType.__parse_uint(parser, "maxLength", - sys.maxint) - if base.min_length > base.max_length: - raise error.Error("minLength exceeds maxLength", json) - elif base.type == UuidType: - base.ref_table_name = parser.get_optional("refTable", ['id']) - if base.ref_table_name: - base.ref_type = parser.get_optional("refType", [str, unicode], - "strong") - if base.ref_type not in ['strong', 'weak']: - raise error.Error('refType must be "strong" or "weak" ' - '(not "%s")' % base.ref_type) - parser.finish() - - return base - - def to_json(self): - if not self.has_constraints(): - return self.type.to_json() - - json = {'type': self.type.to_json()} - - if self.enum: - json['enum'] = self.enum.to_json() - - if self.type == IntegerType: - if self.min is not None: - json['minInteger'] = self.min - if self.max is not None: - json['maxInteger'] = self.max - elif self.type == RealType: - if self.min is not None: - json['minReal'] = self.min - if self.max is not None: - json['maxReal'] = self.max - elif self.type == StringType: - if self.min_length != 0: - json['minLength'] = self.min_length - if self.max_length != sys.maxint: - json['maxLength'] = self.max_length - elif self.type == UuidType: - if self.ref_table_name: - json['refTable'] = self.ref_table_name - if self.ref_type != 'strong': - json['refType'] = self.ref_type - return json - - def copy(self): - base = BaseType(self.type, self.enum.copy(), self.min, self.max, - self.min_length, self.max_length, self.ref_table_name) - base.ref_table = self.ref_table - return base - - def is_valid(self): - if self.type in (VoidType, BooleanType, UuidType): - return True - elif self.type in (IntegerType, RealType): - return self.min is None or self.max is None or self.min <= self.max - elif self.type == StringType: - return self.min_length <= self.max_length - else: - return False - - def has_constraints(self): - return (self.enum is not None or self.min is not None or - self.max is not None or - self.min_length != 0 or self.max_length != sys.maxint or - self.ref_table_name is not None) - - def without_constraints(self): - return BaseType(self.type) - - @staticmethod - def get_enum_type(atomic_type): - """Returns the type of the 'enum' member for a BaseType whose - 'type' is 'atomic_type'.""" - return Type(BaseType(atomic_type), None, 1, sys.maxint) - - def is_ref(self): - return self.type == UuidType and self.ref_table_name is not None - - def is_strong_ref(self): - return self.is_ref() and self.ref_type == 'strong' - - def is_weak_ref(self): - return self.is_ref() and self.ref_type == 'weak' - - def toEnglish(self, escapeLiteral=returnUnchanged): - if self.type == UuidType and self.ref_table_name: - s = escapeLiteral(self.ref_table_name) - if self.ref_type == 'weak': - s = "weak reference to " + s - return s - else: - return self.type.to_string() - - def constraintsToEnglish(self, escapeLiteral=returnUnchanged, - escapeNumber=returnUnchanged): - if self.enum: - literals = [value.toEnglish(escapeLiteral) - for value in self.enum.values] - if len(literals) == 2: - english = 'either %s or %s' % (literals[0], literals[1]) - else: - english = 'one of %s, %s, or %s' % (literals[0], - ', '.join(literals[1:-1]), - literals[-1]) - elif self.min is not None and self.max is not None: - if self.type == IntegerType: - english = 'in range %s to %s' % ( - escapeNumber(commafy(self.min)), - escapeNumber(commafy(self.max))) - else: - english = 'in range %s to %s' % ( - escapeNumber("%g" % self.min), - escapeNumber("%g" % self.max)) - elif self.min is not None: - if self.type == IntegerType: - english = 'at least %s' % escapeNumber(commafy(self.min)) - else: - english = 'at least %s' % escapeNumber("%g" % self.min) - elif self.max is not None: - if self.type == IntegerType: - english = 'at most %s' % escapeNumber(commafy(self.max)) - else: - english = 'at most %s' % escapeNumber("%g" % self.max) - elif self.min_length != 0 and self.max_length != sys.maxint: - if self.min_length == self.max_length: - english = ('exactly %s characters long' - % commafy(self.min_length)) - else: - english = ('between %s and %s characters long' - % (commafy(self.min_length), - commafy(self.max_length))) - elif self.min_length != 0: - return 'at least %s characters long' % commafy(self.min_length) - elif self.max_length != sys.maxint: - english = 'at most %s characters long' % commafy(self.max_length) - else: - english = '' - - return english - - def toCType(self, prefix): - if self.ref_table_name: - return "struct %s%s *" % (prefix, self.ref_table_name.lower()) - else: - return {IntegerType: 'int64_t ', - RealType: 'double ', - UuidType: 'struct uuid ', - BooleanType: 'bool ', - StringType: 'char *'}[self.type] - - def toAtomicType(self): - return "OVSDB_TYPE_%s" % self.type.to_string().upper() - - def copyCValue(self, dst, src): - args = {'dst': dst, 'src': src} - if self.ref_table_name: - return ("%(dst)s = %(src)s->header_.uuid;") % args - elif self.type == StringType: - return "%(dst)s = xstrdup(%(src)s);" % args - else: - return "%(dst)s = %(src)s;" % args - - def initCDefault(self, var, is_optional): - if self.ref_table_name: - return "%s = NULL;" % var - elif self.type == StringType and not is_optional: - return '%s = "";' % var - else: - pattern = {IntegerType: '%s = 0;', - RealType: '%s = 0.0;', - UuidType: 'uuid_zero(&%s);', - BooleanType: '%s = false;', - StringType: '%s = NULL;'}[self.type] - return pattern % var - - def cInitBaseType(self, indent, var): - stmts = [] - stmts.append('ovsdb_base_type_init(&%s, %s);' % ( - var, self.toAtomicType())) - if self.enum: - stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);" - % (var, var)) - stmts += self.enum.cInitDatum("%s.enum_" % var) - if self.type == IntegerType: - if self.min is not None: - stmts.append('%s.u.integer.min = INT64_C(%d);' - % (var, self.min)) - if self.max is not None: - stmts.append('%s.u.integer.max = INT64_C(%d);' - % (var, self.max)) - elif self.type == RealType: - if self.min is not None: - stmts.append('%s.u.real.min = %d;' % (var, self.min)) - if self.max is not None: - stmts.append('%s.u.real.max = %d;' % (var, self.max)) - elif self.type == StringType: - if self.min_length is not None: - stmts.append('%s.u.string.minLen = %d;' - % (var, self.min_length)) - if self.max_length != sys.maxint: - stmts.append('%s.u.string.maxLen = %d;' - % (var, self.max_length)) - elif self.type == UuidType: - if self.ref_table_name is not None: - stmts.append('%s.u.uuid.refTableName = "%s";' - % (var, escapeCString(self.ref_table_name))) - stmts.append('%s.u.uuid.refType = OVSDB_REF_%s;' - % (var, self.ref_type.upper())) - return '\n'.join([indent + stmt for stmt in stmts]) - - -class Type(object): - DEFAULT_MIN = 1 - DEFAULT_MAX = 1 - - def __init__(self, key, value=None, n_min=DEFAULT_MIN, n_max=DEFAULT_MAX): - self.key = key - self.value = value - self.n_min = n_min - self.n_max = n_max - - def copy(self): - if self.value is None: - value = None - else: - value = self.value.copy() - return Type(self.key.copy(), value, self.n_min, self.n_max) - - def __eq__(self, other): - if not isinstance(other, Type): - return NotImplemented - return (self.key == other.key and self.value == other.value and - self.n_min == other.n_min and self.n_max == other.n_max) - - def __ne__(self, other): - if not isinstance(other, Type): - return NotImplemented - else: - return not (self == other) - - def is_valid(self): - return (self.key.type != VoidType and self.key.is_valid() and - (self.value is None or - (self.value.type != VoidType and self.value.is_valid())) and - self.n_min <= 1 <= self.n_max) - - def is_scalar(self): - return self.n_min == 1 and self.n_max == 1 and not self.value - - def is_optional(self): - return self.n_min == 0 and self.n_max == 1 - - def is_composite(self): - return self.n_max > 1 - - def is_set(self): - return self.value is None and (self.n_min != 1 or self.n_max != 1) - - def is_map(self): - return self.value is not None - - def is_smap(self): - return (self.is_map() - and self.key.type == StringType - and self.value.type == StringType) - - def is_optional_pointer(self): - return (self.is_optional() and not self.value - and (self.key.type == StringType or self.key.ref_table_name)) - - @staticmethod - def __n_from_json(json, default): - if json is None: - return default - elif type(json) == int and 0 <= json <= sys.maxint: - return json - else: - raise error.Error("bad min or max value", json) - - @staticmethod - def from_json(json): - if type(json) in [str, unicode]: - return Type(BaseType.from_json(json)) - - parser = ovs.db.parser.Parser(json, "ovsdb type") - key_json = parser.get("key", [dict, str, unicode]) - value_json = parser.get_optional("value", [dict, str, unicode]) - min_json = parser.get_optional("min", [int]) - max_json = parser.get_optional("max", [int, str, unicode]) - parser.finish() - - key = BaseType.from_json(key_json) - if value_json: - value = BaseType.from_json(value_json) - else: - value = None - - n_min = Type.__n_from_json(min_json, Type.DEFAULT_MIN) - - if max_json == 'unlimited': - n_max = sys.maxint - else: - n_max = Type.__n_from_json(max_json, Type.DEFAULT_MAX) - - type_ = Type(key, value, n_min, n_max) - if not type_.is_valid(): - raise error.Error("ovsdb type fails constraint checks", json) - return type_ - - def to_json(self): - if self.is_scalar() and not self.key.has_constraints(): - return self.key.to_json() - - json = {"key": self.key.to_json()} - if self.value is not None: - json["value"] = self.value.to_json() - if self.n_min != Type.DEFAULT_MIN: - json["min"] = self.n_min - if self.n_max == sys.maxint: - json["max"] = "unlimited" - elif self.n_max != Type.DEFAULT_MAX: - json["max"] = self.n_max - return json - - def toEnglish(self, escapeLiteral=returnUnchanged): - keyName = self.key.toEnglish(escapeLiteral) - if self.value: - valueName = self.value.toEnglish(escapeLiteral) - - if self.is_scalar(): - return keyName - elif self.is_optional(): - if self.value: - return "optional %s-%s pair" % (keyName, valueName) - else: - return "optional %s" % keyName - else: - if self.n_max == sys.maxint: - if self.n_min: - quantity = "%s or more " % commafy(self.n_min) - else: - quantity = "" - elif self.n_min: - quantity = "%s to %s " % (commafy(self.n_min), - commafy(self.n_max)) - else: - quantity = "up to %s " % commafy(self.n_max) - - if self.value: - return "map of %s%s-%s pairs" % (quantity, keyName, valueName) - else: - if keyName.endswith('s'): - plural = keyName + "es" - else: - plural = keyName + "s" - return "set of %s%s" % (quantity, plural) - - def constraintsToEnglish(self, escapeLiteral=returnUnchanged, - escapeNumber=returnUnchanged): - constraints = [] - keyConstraints = self.key.constraintsToEnglish(escapeLiteral, - escapeNumber) - if keyConstraints: - if self.value: - constraints.append('key %s' % keyConstraints) - else: - constraints.append(keyConstraints) - - if self.value: - valueConstraints = self.value.constraintsToEnglish(escapeLiteral, - escapeNumber) - if valueConstraints: - constraints.append('value %s' % valueConstraints) - - return ', '.join(constraints) - - def cDeclComment(self): - if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType: - return "\t/* Always nonnull. */" - else: - return "" - - def cInitType(self, indent, var): - initKey = self.key.cInitBaseType(indent, "%s.key" % var) - if self.value: - initValue = self.value.cInitBaseType(indent, "%s.value" % var) - else: - initValue = ('%sovsdb_base_type_init(&%s.value, ' - 'OVSDB_TYPE_VOID);' % (indent, var)) - initMin = "%s%s.n_min = %s;" % (indent, var, self.n_min) - if self.n_max == sys.maxint: - n_max = "UINT_MAX" - else: - n_max = self.n_max - initMax = "%s%s.n_max = %s;" % (indent, var, n_max) - return "\n".join((initKey, initValue, initMin, initMax)) diff --git a/ryu/contrib/ovs/dirs.py b/ryu/contrib/ovs/dirs.py deleted file mode 100644 index de1605f1..00000000 --- a/ryu/contrib/ovs/dirs.py +++ /dev/null @@ -1,13 +0,0 @@ -import os -PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """/usr/share/openvswitch""") -RUNDIR = os.environ.get("OVS_RUNDIR", """/var/run/openvswitch""") -LOGDIR = os.environ.get("OVS_LOGDIR", """/var/log/openvswitch""") -BINDIR = os.environ.get("OVS_BINDIR", """/usr/bin""") - -DBDIR = os.environ.get("OVS_DBDIR") -if not DBDIR: - sysconfdir = os.environ.get("OVS_SYSCONFDIR") - if sysconfdir: - DBDIR = "%s/openvswitch" % sysconfdir - else: - DBDIR = """/etc/openvswitch""" diff --git a/ryu/contrib/ovs/fatal_signal.py b/ryu/contrib/ovs/fatal_signal.py deleted file mode 100644 index e6fe7838..00000000 --- a/ryu/contrib/ovs/fatal_signal.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import atexit -import os -import signal - -import ovs.vlog - -_hooks = [] -vlog = ovs.vlog.Vlog("fatal-signal") - - -def add_hook(hook, cancel, run_at_exit): - _init() - _hooks.append((hook, cancel, run_at_exit)) - - -def fork(): - """Clears all of the fatal signal hooks without executing them. If any of - the hooks passed a 'cancel' function to add_hook(), then those functions - will be called, allowing them to free resources, etc. - - Following a fork, one of the resulting processes can call this function to - allow it to terminate without calling the hooks registered before calling - this function. New hooks registered after calling this function will take - effect normally.""" - global _hooks - for hook, cancel, run_at_exit in _hooks: - if cancel: - cancel() - - _hooks = [] - -_added_hook = False -_files = {} - - -def add_file_to_unlink(file): - """Registers 'file' to be unlinked when the program terminates via - sys.exit() or a fatal signal.""" - global _added_hook - if not _added_hook: - _added_hook = True - add_hook(_unlink_files, _cancel_files, True) - _files[file] = None - - -def remove_file_to_unlink(file): - """Unregisters 'file' from being unlinked when the program terminates via - sys.exit() or a fatal signal.""" - if file in _files: - del _files[file] - - -def unlink_file_now(file): - """Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'. - Returns 0 if successful, otherwise a positive errno value.""" - error = _unlink(file) - if error: - vlog.warn("could not unlink \"%s\" (%s)" % (file, os.strerror(error))) - remove_file_to_unlink(file) - return error - - -def _unlink_files(): - for file_ in _files: - _unlink(file_) - - -def _cancel_files(): - global _added_hook - global _files - _added_hook = False - _files = {} - - -def _unlink(file_): - try: - os.unlink(file_) - return 0 - except OSError, e: - return e.errno - - -def _signal_handler(signr, _): - _call_hooks(signr) - - # Re-raise the signal with the default handling so that the program - # termination status reflects that we were killed by this signal. - signal.signal(signr, signal.SIG_DFL) - os.kill(os.getpid(), signr) - - -def _atexit_handler(): - _call_hooks(0) - - -recurse = False - - -def _call_hooks(signr): - global recurse - if recurse: - return - recurse = True - - for hook, cancel, run_at_exit in _hooks: - if signr != 0 or run_at_exit: - hook() - - -_inited = False - - -def _init(): - global _inited - if not _inited: - _inited = True - - for signr in (signal.SIGTERM, signal.SIGINT, - signal.SIGHUP, signal.SIGALRM): - if signal.getsignal(signr) == signal.SIG_DFL: - signal.signal(signr, _signal_handler) - atexit.register(_atexit_handler) diff --git a/ryu/contrib/ovs/json.py b/ryu/contrib/ovs/json.py deleted file mode 100644 index d329ee41..00000000 --- a/ryu/contrib/ovs/json.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright (c) 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import StringIO -import sys - -__pychecker__ = 'no-stringiter' - -escapes = {ord('"'): u"\\\"", - ord("\\"): u"\\\\", - ord("\b"): u"\\b", - ord("\f"): u"\\f", - ord("\n"): u"\\n", - ord("\r"): u"\\r", - ord("\t"): u"\\t"} -for esc in range(32): - if esc not in escapes: - escapes[esc] = u"\\u%04x" % esc - -SPACES_PER_LEVEL = 2 - - -class _Serializer(object): - def __init__(self, stream, pretty, sort_keys): - self.stream = stream - self.pretty = pretty - self.sort_keys = sort_keys - self.depth = 0 - - def __serialize_string(self, s): - self.stream.write(u'"%s"' % ''.join(escapes.get(ord(c), c) for c in s)) - - def __indent_line(self): - if self.pretty: - self.stream.write('\n') - self.stream.write(' ' * (SPACES_PER_LEVEL * self.depth)) - - def serialize(self, obj): - if obj is None: - self.stream.write(u"null") - elif obj is False: - self.stream.write(u"false") - elif obj is True: - self.stream.write(u"true") - elif type(obj) in (int, long): - self.stream.write(u"%d" % obj) - elif type(obj) == float: - self.stream.write("%.15g" % obj) - elif type(obj) == unicode: - self.__serialize_string(obj) - elif type(obj) == str: - self.__serialize_string(unicode(obj)) - elif type(obj) == dict: - self.stream.write(u"{") - - self.depth += 1 - self.__indent_line() - - if self.sort_keys: - items = sorted(obj.items()) - else: - items = obj.iteritems() - for i, (key, value) in enumerate(items): - if i > 0: - self.stream.write(u",") - self.__indent_line() - self.__serialize_string(unicode(key)) - self.stream.write(u":") - if self.pretty: - self.stream.write(u' ') - self.serialize(value) - - self.stream.write(u"}") - self.depth -= 1 - elif type(obj) in (list, tuple): - self.stream.write(u"[") - self.depth += 1 - - if obj: - self.__indent_line() - - for i, value in enumerate(obj): - if i > 0: - self.stream.write(u",") - self.__indent_line() - self.serialize(value) - - self.depth -= 1 - self.stream.write(u"]") - else: - raise Exception("can't serialize %s as JSON" % obj) - - -def to_stream(obj, stream, pretty=False, sort_keys=True): - _Serializer(stream, pretty, sort_keys).serialize(obj) - - -def to_file(obj, name, pretty=False, sort_keys=True): - stream = open(name, "w") - try: - to_stream(obj, stream, pretty, sort_keys) - finally: - stream.close() - - -def to_string(obj, pretty=False, sort_keys=True): - output = StringIO.StringIO() - to_stream(obj, output, pretty, sort_keys) - s = output.getvalue() - output.close() - return s - - -def from_stream(stream): - p = Parser(check_trailer=True) - while True: - buf = stream.read(4096) - if buf == "" or p.feed(buf) != len(buf): - break - return p.finish() - - -def from_file(name): - stream = open(name, "r") - try: - return from_stream(stream) - finally: - stream.close() - - -def from_string(s): - try: - s = unicode(s, 'utf-8') - except UnicodeDecodeError, e: - seq = ' '.join(["0x%2x" % ord(c) - for c in e.object[e.start:e.end] if ord(c) >= 0x80]) - return ("not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq) - p = Parser(check_trailer=True) - p.feed(s) - return p.finish() - - -class Parser(object): - ## Maximum height of parsing stack. ## - MAX_HEIGHT = 1000 - - def __init__(self, check_trailer=False): - self.check_trailer = check_trailer - - # Lexical analysis. - self.lex_state = Parser.__lex_start - self.buffer = "" - self.line_number = 0 - self.column_number = 0 - self.byte_number = 0 - - # Parsing. - self.parse_state = Parser.__parse_start - self.stack = [] - self.member_name = None - - # Parse status. - self.done = False - self.error = None - - def __lex_start_space(self, c): - pass - - def __lex_start_alpha(self, c): - self.buffer = c - self.lex_state = Parser.__lex_keyword - - def __lex_start_token(self, c): - self.__parser_input(c) - - def __lex_start_number(self, c): - self.buffer = c - self.lex_state = Parser.__lex_number - - def __lex_start_string(self, _): - self.lex_state = Parser.__lex_string - - def __lex_start_error(self, c): - if ord(c) >= 32 and ord(c) < 128: - self.__error("invalid character '%s'" % c) - else: - self.__error("invalid character U+%04x" % ord(c)) - - __lex_start_actions = {} - for c in " \t\n\r": - __lex_start_actions[c] = __lex_start_space - for c in "abcdefghijklmnopqrstuvwxyz": - __lex_start_actions[c] = __lex_start_alpha - for c in "[{]}:,": - __lex_start_actions[c] = __lex_start_token - for c in "-0123456789": - __lex_start_actions[c] = __lex_start_number - __lex_start_actions['"'] = __lex_start_string - - def __lex_start(self, c): - Parser.__lex_start_actions.get( - c, Parser.__lex_start_error)(self, c) - return True - - __lex_alpha = {} - for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": - __lex_alpha[c] = True - - def __lex_finish_keyword(self): - if self.buffer == "false": - self.__parser_input(False) - elif self.buffer == "true": - self.__parser_input(True) - elif self.buffer == "null": - self.__parser_input(None) - else: - self.__error("invalid keyword '%s'" % self.buffer) - - def __lex_keyword(self, c): - if c in Parser.__lex_alpha: - self.buffer += c - return True - else: - self.__lex_finish_keyword() - return False - - __number_re = re.compile("(-)?(0|[1-9][0-9]*)" - "(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$") - - def __lex_finish_number(self): - s = self.buffer - m = Parser.__number_re.match(s) - if m: - sign, integer, fraction, exp = m.groups() - if (exp is not None and - (long(exp) > sys.maxint or long(exp) < -sys.maxint - 1)): - self.__error("exponent outside valid range") - return - - if fraction is not None and len(fraction.lstrip('0')) == 0: - fraction = None - - sig_string = integer - if fraction is not None: - sig_string += fraction - significand = int(sig_string) - - pow10 = 0 - if fraction is not None: - pow10 -= len(fraction) - if exp is not None: - pow10 += long(exp) - - if significand == 0: - self.__parser_input(0) - return - elif significand <= 2 ** 63: - while pow10 > 0 and significand <= 2 ** 63: - significand *= 10 - pow10 -= 1 - while pow10 < 0 and significand % 10 == 0: - significand /= 10 - pow10 += 1 - if (pow10 == 0 and - ((not sign and significand < 2 ** 63) or - (sign and significand <= 2 ** 63))): - if sign: - self.__parser_input(-significand) - else: - self.__parser_input(significand) - return - - value = float(s) - if value == float("inf") or value == float("-inf"): - self.__error("number outside valid range") - return - if value == 0: - # Suppress negative zero. - value = 0 - self.__parser_input(value) - elif re.match("-?0[0-9]", s): - self.__error("leading zeros not allowed") - elif re.match("-([^0-9]|$)", s): - self.__error("'-' must be followed by digit") - elif re.match("-?(0|[1-9][0-9]*)\.([^0-9]|$)", s): - self.__error("decimal point must be followed by digit") - elif re.search("e[-+]?([^0-9]|$)", s): - self.__error("exponent must contain at least one digit") - else: - self.__error("syntax error in number") - - def __lex_number(self, c): - if c in ".0123456789eE-+": - self.buffer += c - return True - else: - self.__lex_finish_number() - return False - - __4hex_re = re.compile("[0-9a-fA-F]{4}") - - def __lex_4hex(self, s): - if len(s) < 4: - self.__error("quoted string ends within \\u escape") - elif not Parser.__4hex_re.match(s): - self.__error("malformed \\u escape") - elif s == "0000": - self.__error("null bytes not supported in quoted strings") - else: - return int(s, 16) - - @staticmethod - def __is_leading_surrogate(c): - """Returns true if 'c' is a Unicode code point for a leading - surrogate.""" - return c >= 0xd800 and c <= 0xdbff - - @staticmethod - def __is_trailing_surrogate(c): - """Returns true if 'c' is a Unicode code point for a trailing - surrogate.""" - return c >= 0xdc00 and c <= 0xdfff - - @staticmethod - def __utf16_decode_surrogate_pair(leading, trailing): - """Returns the unicode code point corresponding to leading surrogate - 'leading' and trailing surrogate 'trailing'. The return value will not - make any sense if 'leading' or 'trailing' are not in the correct ranges - for leading or trailing surrogates.""" - # Leading surrogate: 110110wwwwxxxxxx - # Trailing surrogate: 110111xxxxxxxxxx - # Code point: 000uuuuuxxxxxxxxxxxxxxxx - w = (leading >> 6) & 0xf - u = w + 1 - x0 = leading & 0x3f - x1 = trailing & 0x3ff - return (u << 16) | (x0 << 10) | x1 - __unescape = {'"': u'"', - "\\": u"\\", - "/": u"/", - "b": u"\b", - "f": u"\f", - "n": u"\n", - "r": u"\r", - "t": u"\t"} - - def __lex_finish_string(self): - inp = self.buffer - out = u"" - while len(inp): - backslash = inp.find('\\') - if backslash == -1: - out += inp - break - out += inp[:backslash] - inp = inp[backslash + 1:] - if inp == "": - self.__error("quoted string may not end with backslash") - return - - replacement = Parser.__unescape.get(inp[0]) - if replacement is not None: - out += replacement - inp = inp[1:] - continue - elif inp[0] != u'u': - self.__error("bad escape \\%s" % inp[0]) - return - - c0 = self.__lex_4hex(inp[1:5]) - if c0 is None: - return - inp = inp[5:] - - if Parser.__is_leading_surrogate(c0): - if inp[:2] != u'\\u': - self.__error("malformed escaped surrogate pair") - return - c1 = self.__lex_4hex(inp[2:6]) - if c1 is None: - return - if not Parser.__is_trailing_surrogate(c1): - self.__error("second half of escaped surrogate pair is " - "not trailing surrogate") - return - code_point = Parser.__utf16_decode_surrogate_pair(c0, c1) - inp = inp[6:] - else: - code_point = c0 - out += unichr(code_point) - self.__parser_input('string', out) - - def __lex_string_escape(self, c): - self.buffer += c - self.lex_state = Parser.__lex_string - return True - - def __lex_string(self, c): - if c == '\\': - self.buffer += c - self.lex_state = Parser.__lex_string_escape - elif c == '"': - self.__lex_finish_string() - elif ord(c) >= 0x20: - self.buffer += c - else: - self.__error("U+%04X must be escaped in quoted string" % ord(c)) - return True - - def __lex_input(self, c): - eat = self.lex_state(self, c) - assert eat is True or eat is False - return eat - - def __parse_start(self, token, unused_string): - if token == '{': - self.__push_object() - elif token == '[': - self.__push_array() - else: - self.__error("syntax error at beginning of input") - - def __parse_end(self, unused_token, unused_string): - self.__error("trailing garbage at end of input") - - def __parse_object_init(self, token, string): - if token == '}': - self.__parser_pop() - else: - self.__parse_object_name(token, string) - - def __parse_object_name(self, token, string): - if token == 'string': - self.member_name = string - self.parse_state = Parser.__parse_object_colon - else: - self.__error("syntax error parsing object expecting string") - - def __parse_object_colon(self, token, unused_string): - if token == ":": - self.parse_state = Parser.__parse_object_value - else: - self.__error("syntax error parsing object expecting ':'") - - def __parse_object_value(self, token, string): - self.__parse_value(token, string, Parser.__parse_object_next) - - def __parse_object_next(self, token, unused_string): - if token == ",": - self.parse_state = Parser.__parse_object_name - elif token == "}": - self.__parser_pop() - else: - self.__error("syntax error expecting '}' or ','") - - def __parse_array_init(self, token, string): - if token == ']': - self.__parser_pop() - else: - self.__parse_array_value(token, string) - - def __parse_array_value(self, token, string): - self.__parse_value(token, string, Parser.__parse_array_next) - - def __parse_array_next(self, token, unused_string): - if token == ",": - self.parse_state = Parser.__parse_array_value - elif token == "]": - self.__parser_pop() - else: - self.__error("syntax error expecting ']' or ','") - - def __parser_input(self, token, string=None): - self.lex_state = Parser.__lex_start - self.buffer = "" - self.parse_state(self, token, string) - - def __put_value(self, value): - top = self.stack[-1] - if type(top) == dict: - top[self.member_name] = value - else: - top.append(value) - - def __parser_push(self, new_json, next_state): - if len(self.stack) < Parser.MAX_HEIGHT: - if len(self.stack) > 0: - self.__put_value(new_json) - self.stack.append(new_json) - self.parse_state = next_state - else: - self.__error("input exceeds maximum nesting depth %d" % - Parser.MAX_HEIGHT) - - def __push_object(self): - self.__parser_push({}, Parser.__parse_object_init) - - def __push_array(self): - self.__parser_push([], Parser.__parse_array_init) - - def __parser_pop(self): - if len(self.stack) == 1: - self.parse_state = Parser.__parse_end - if not self.check_trailer: - self.done = True - else: - self.stack.pop() - top = self.stack[-1] - if type(top) == list: - self.parse_state = Parser.__parse_array_next - else: - self.parse_state = Parser.__parse_object_next - - def __parse_value(self, token, string, next_state): - if token in [False, None, True] or type(token) in [int, long, float]: - self.__put_value(token) - elif token == 'string': - self.__put_value(string) - else: - if token == '{': - self.__push_object() - elif token == '[': - self.__push_array() - else: - self.__error("syntax error expecting value") - return - self.parse_state = next_state - - def __error(self, message): - if self.error is None: - self.error = ("line %d, column %d, byte %d: %s" - % (self.line_number, self.column_number, - self.byte_number, message)) - self.done = True - - def feed(self, s): - i = 0 - while True: - if self.done or i >= len(s): - return i - - c = s[i] - if self.__lex_input(c): - self.byte_number += 1 - if c == '\n': - self.column_number = 0 - self.line_number += 1 - else: - self.column_number += 1 - - i += 1 - - def is_done(self): - return self.done - - def finish(self): - if self.lex_state == Parser.__lex_start: - pass - elif self.lex_state in (Parser.__lex_string, - Parser.__lex_string_escape): - self.__error("unexpected end of input in quoted string") - else: - self.__lex_input(" ") - - if self.parse_state == Parser.__parse_start: - self.__error("empty input stream") - elif self.parse_state != Parser.__parse_end: - self.__error("unexpected end of input") - - if self.error == None: - assert len(self.stack) == 1 - return self.stack.pop() - else: - return self.error diff --git a/ryu/contrib/ovs/jsonrpc.py b/ryu/contrib/ovs/jsonrpc.py deleted file mode 100644 index c1540eb7..00000000 --- a/ryu/contrib/ovs/jsonrpc.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (c) 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import os - -import ovs.json -import ovs.poller -import ovs.reconnect -import ovs.stream -import ovs.timeval -import ovs.util -import ovs.vlog - -EOF = ovs.util.EOF -vlog = ovs.vlog.Vlog("jsonrpc") - - -class Message(object): - T_REQUEST = 0 # Request. - T_NOTIFY = 1 # Notification. - T_REPLY = 2 # Successful reply. - T_ERROR = 3 # Error reply. - - __types = {T_REQUEST: "request", - T_NOTIFY: "notification", - T_REPLY: "reply", - T_ERROR: "error"} - - def __init__(self, type_, method, params, result, error, id): - self.type = type_ - self.method = method - self.params = params - self.result = result - self.error = error - self.id = id - - _next_id = 0 - - @staticmethod - def _create_id(): - this_id = Message._next_id - Message._next_id += 1 - return this_id - - @staticmethod - def create_request(method, params): - return Message(Message.T_REQUEST, method, params, None, None, - Message._create_id()) - - @staticmethod - def create_notify(method, params): - return Message(Message.T_NOTIFY, method, params, None, None, - None) - - @staticmethod - def create_reply(result, id): - return Message(Message.T_REPLY, None, None, result, None, id) - - @staticmethod - def create_error(error, id): - return Message(Message.T_ERROR, None, None, None, error, id) - - @staticmethod - def type_to_string(type_): - return Message.__types[type_] - - def __validate_arg(self, value, name, must_have): - if (value is not None) == (must_have != 0): - return None - else: - type_name = Message.type_to_string(self.type) - if must_have: - verb = "must" - else: - verb = "must not" - return "%s %s have \"%s\"" % (type_name, verb, name) - - def is_valid(self): - if self.params is not None and type(self.params) != list: - return "\"params\" must be JSON array" - - pattern = {Message.T_REQUEST: 0x11001, - Message.T_NOTIFY: 0x11000, - Message.T_REPLY: 0x00101, - Message.T_ERROR: 0x00011}.get(self.type) - if pattern is None: - return "invalid JSON-RPC message type %s" % self.type - - return ( - self.__validate_arg(self.method, "method", pattern & 0x10000) or - self.__validate_arg(self.params, "params", pattern & 0x1000) or - self.__validate_arg(self.result, "result", pattern & 0x100) or - self.__validate_arg(self.error, "error", pattern & 0x10) or - self.__validate_arg(self.id, "id", pattern & 0x1)) - - @staticmethod - def from_json(json): - if type(json) != dict: - return "message is not a JSON object" - - # Make a copy to avoid modifying the caller's dict. - json = dict(json) - - if "method" in json: - method = json.pop("method") - if type(method) not in [str, unicode]: - return "method is not a JSON string" - else: - method = None - - params = json.pop("params", None) - result = json.pop("result", None) - error = json.pop("error", None) - id_ = json.pop("id", None) - if len(json): - return "message has unexpected member \"%s\"" % json.popitem()[0] - - if result is not None: - msg_type = Message.T_REPLY - elif error is not None: - msg_type = Message.T_ERROR - elif id_ is not None: - msg_type = Message.T_REQUEST - else: - msg_type = Message.T_NOTIFY - - msg = Message(msg_type, method, params, result, error, id_) - validation_error = msg.is_valid() - if validation_error is not None: - return validation_error - else: - return msg - - def to_json(self): - json = {} - - if self.method is not None: - json["method"] = self.method - - if self.params is not None: - json["params"] = self.params - - if self.result is not None or self.type == Message.T_ERROR: - json["result"] = self.result - - if self.error is not None or self.type == Message.T_REPLY: - json["error"] = self.error - - if self.id is not None or self.type == Message.T_NOTIFY: - json["id"] = self.id - - return json - - def __str__(self): - s = [Message.type_to_string(self.type)] - if self.method is not None: - s.append("method=\"%s\"" % self.method) - if self.params is not None: - s.append("params=" + ovs.json.to_string(self.params)) - if self.result is not None: - s.append("result=" + ovs.json.to_string(self.result)) - if self.error is not None: - s.append("error=" + ovs.json.to_string(self.error)) - if self.id is not None: - s.append("id=" + ovs.json.to_string(self.id)) - return ", ".join(s) - - -class Connection(object): - def __init__(self, stream): - self.name = stream.name - self.stream = stream - self.status = 0 - self.input = "" - self.output = "" - self.parser = None - self.received_bytes = 0 - - def close(self): - self.stream.close() - self.stream = None - - def run(self): - if self.status: - return - - while len(self.output): - retval = self.stream.send(self.output) - if retval >= 0: - self.output = self.output[retval:] - else: - if retval != -errno.EAGAIN: - vlog.warn("%s: send error: %s" % - (self.name, os.strerror(-retval))) - self.error(-retval) - break - - def wait(self, poller): - if not self.status: - self.stream.run_wait(poller) - if len(self.output): - self.stream.send_wait(poller) - - def get_status(self): - return self.status - - def get_backlog(self): - if self.status != 0: - return 0 - else: - return len(self.output) - - def get_received_bytes(self): - return self.received_bytes - - def __log_msg(self, title, msg): - vlog.dbg("%s: %s %s" % (self.name, title, msg)) - - def send(self, msg): - if self.status: - return self.status - - self.__log_msg("send", msg) - - was_empty = len(self.output) == 0 - self.output += ovs.json.to_string(msg.to_json()) - if was_empty: - self.run() - return self.status - - def send_block(self, msg): - error = self.send(msg) - if error: - return error - - while True: - self.run() - if not self.get_backlog() or self.get_status(): - return self.status - - poller = ovs.poller.Poller() - self.wait(poller) - poller.block() - - def recv(self): - if self.status: - return self.status, None - - while True: - if not self.input: - error, data = self.stream.recv(4096) - if error: - if error == errno.EAGAIN: - return error, None - else: - # XXX rate-limit - vlog.warn("%s: receive error: %s" - % (self.name, os.strerror(error))) - self.error(error) - return self.status, None - elif not data: - self.error(EOF) - return EOF, None - else: - self.input += data - self.received_bytes += len(data) - else: - if self.parser is None: - self.parser = ovs.json.Parser() - self.input = self.input[self.parser.feed(self.input):] - if self.parser.is_done(): - msg = self.__process_msg() - if msg: - return 0, msg - else: - return self.status, None - - def recv_block(self): - while True: - error, msg = self.recv() - if error != errno.EAGAIN: - return error, msg - - self.run() - - poller = ovs.poller.Poller() - self.wait(poller) - self.recv_wait(poller) - poller.block() - - def transact_block(self, request): - id_ = request.id - - error = self.send(request) - reply = None - while not error: - error, reply = self.recv_block() - if (reply - and (reply.type == Message.T_REPLY - or reply.type == Message.T_ERROR) - and reply.id == id_): - break - return error, reply - - def __process_msg(self): - json = self.parser.finish() - self.parser = None - if type(json) in [str, unicode]: - # XXX rate-limit - vlog.warn("%s: error parsing stream: %s" % (self.name, json)) - self.error(errno.EPROTO) - return - - msg = Message.from_json(json) - if not isinstance(msg, Message): - # XXX rate-limit - vlog.warn("%s: received bad JSON-RPC message: %s" - % (self.name, msg)) - self.error(errno.EPROTO) - return - - self.__log_msg("received", msg) - return msg - - def recv_wait(self, poller): - if self.status or self.input: - poller.immediate_wake() - else: - self.stream.recv_wait(poller) - - def error(self, error): - if self.status == 0: - self.status = error - self.stream.close() - self.output = "" - - -class Session(object): - """A JSON-RPC session with reconnection.""" - - def __init__(self, reconnect, rpc): - self.reconnect = reconnect - self.rpc = rpc - self.stream = None - self.pstream = None - self.seqno = 0 - - @staticmethod - def open(name): - """Creates and returns a Session that maintains a JSON-RPC session to - 'name', which should be a string acceptable to ovs.stream.Stream or - ovs.stream.PassiveStream's initializer. - - If 'name' is an active connection method, e.g. "tcp:127.1.2.3", the new - session connects and reconnects, with back-off, to 'name'. - - If 'name' is a passive connection method, e.g. "ptcp:", the new session - listens for connections to 'name'. It maintains at most one connection - at any given time. Any new connection causes the previous one (if any) - to be dropped.""" - reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec()) - reconnect.set_name(name) - reconnect.enable(ovs.timeval.msec()) - - if ovs.stream.PassiveStream.is_valid_name(name): - reconnect.set_passive(True, ovs.timeval.msec()) - - if ovs.stream.stream_or_pstream_needs_probes(name): - reconnect.set_probe_interval(0) - - return Session(reconnect, None) - - @staticmethod - def open_unreliably(jsonrpc): - reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec()) - reconnect.set_quiet(True) - reconnect.set_name(jsonrpc.name) - reconnect.set_max_tries(0) - reconnect.connected(ovs.timeval.msec()) - return Session(reconnect, jsonrpc) - - def close(self): - if self.rpc is not None: - self.rpc.close() - self.rpc = None - if self.stream is not None: - self.stream.close() - self.stream = None - if self.pstream is not None: - self.pstream.close() - self.pstream = None - - def __disconnect(self): - if self.rpc is not None: - self.rpc.error(EOF) - self.rpc.close() - self.rpc = None - self.seqno += 1 - elif self.stream is not None: - self.stream.close() - self.stream = None - self.seqno += 1 - - def __connect(self): - self.__disconnect() - - name = self.reconnect.get_name() - if not self.reconnect.is_passive(): - error, self.stream = ovs.stream.Stream.open(name) - if not error: - self.reconnect.connecting(ovs.timeval.msec()) - else: - self.reconnect.connect_failed(ovs.timeval.msec(), error) - elif self.pstream is not None: - error, self.pstream = ovs.stream.PassiveStream.open(name) - if not error: - self.reconnect.listening(ovs.timeval.msec()) - else: - self.reconnect.connect_failed(ovs.timeval.msec(), error) - - self.seqno += 1 - - def run(self): - if self.pstream is not None: - error, stream = self.pstream.accept() - if error == 0: - if self.rpc or self.stream: - # XXX rate-limit - vlog.info("%s: new connection replacing active " - "connection" % self.reconnect.get_name()) - self.__disconnect() - self.reconnect.connected(ovs.timeval.msec()) - self.rpc = Connection(stream) - elif error != errno.EAGAIN: - self.reconnect.listen_error(ovs.timeval.msec(), error) - self.pstream.close() - self.pstream = None - - if self.rpc: - backlog = self.rpc.get_backlog() - self.rpc.run() - if self.rpc.get_backlog() < backlog: - # Data previously caught in a queue was successfully sent (or - # there's an error, which we'll catch below). - # - # We don't count data that is successfully sent immediately as - # activity, because there's a lot of queuing downstream from - # us, which means that we can push a lot of data into a - # connection that has stalled and won't ever recover. - self.reconnect.activity(ovs.timeval.msec()) - - error = self.rpc.get_status() - if error != 0: - self.reconnect.disconnected(ovs.timeval.msec(), error) - self.__disconnect() - elif self.stream is not None: - self.stream.run() - error = self.stream.connect() - if error == 0: - self.reconnect.connected(ovs.timeval.msec()) - self.rpc = Connection(self.stream) - self.stream = None - elif error != errno.EAGAIN: - self.reconnect.connect_failed(ovs.timeval.msec(), error) - self.stream.close() - self.stream = None - - action = self.reconnect.run(ovs.timeval.msec()) - if action == ovs.reconnect.CONNECT: - self.__connect() - elif action == ovs.reconnect.DISCONNECT: - self.reconnect.disconnected(ovs.timeval.msec(), 0) - self.__disconnect() - elif action == ovs.reconnect.PROBE: - if self.rpc: - request = Message.create_request("echo", []) - request.id = "echo" - self.rpc.send(request) - else: - assert action == None - - def wait(self, poller): - if self.rpc is not None: - self.rpc.wait(poller) - elif self.stream is not None: - self.stream.run_wait(poller) - self.stream.connect_wait(poller) - if self.pstream is not None: - self.pstream.wait(poller) - self.reconnect.wait(poller, ovs.timeval.msec()) - - def get_backlog(self): - if self.rpc is not None: - return self.rpc.get_backlog() - else: - return 0 - - def get_name(self): - return self.reconnect.get_name() - - def send(self, msg): - if self.rpc is not None: - return self.rpc.send(msg) - else: - return errno.ENOTCONN - - def recv(self): - if self.rpc is not None: - received_bytes = self.rpc.get_received_bytes() - error, msg = self.rpc.recv() - if received_bytes != self.rpc.get_received_bytes(): - # Data was successfully received. - # - # Previously we only counted receiving a full message as - # activity, but with large messages or a slow connection that - # policy could time out the session mid-message. - self.reconnect.activity(ovs.timeval.msec()) - - if not error: - if msg.type == Message.T_REQUEST and msg.method == "echo": - # Echo request. Send reply. - self.send(Message.create_reply(msg.params, msg.id)) - elif msg.type == Message.T_REPLY and msg.id == "echo": - # It's a reply to our echo request. Suppress it. - pass - else: - return msg - return None - - def recv_wait(self, poller): - if self.rpc is not None: - self.rpc.recv_wait(poller) - - def is_alive(self): - if self.rpc is not None or self.stream is not None: - return True - else: - max_tries = self.reconnect.get_max_tries() - return max_tries is None or max_tries > 0 - - def is_connected(self): - return self.rpc is not None - - def get_seqno(self): - return self.seqno - - def force_reconnect(self): - self.reconnect.force_reconnect(ovs.timeval.msec()) diff --git a/ryu/contrib/ovs/ovsuuid.py b/ryu/contrib/ovs/ovsuuid.py deleted file mode 100644 index 56fdad05..00000000 --- a/ryu/contrib/ovs/ovsuuid.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2009, 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import uuid - -from ovs.db import error -import ovs.db.parser - -uuidRE = re.compile("^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$" - .replace('x', '[0-9a-fA-F]')) - - -def zero(): - return uuid.UUID(int=0) - - -def is_valid_string(s): - return uuidRE.match(s) is not None - - -def from_string(s): - if not is_valid_string(s): - raise error.Error("%s is not a valid UUID" % s) - return uuid.UUID(s) - - -def from_json(json, symtab=None): - try: - s = ovs.db.parser.unwrap_json(json, "uuid", [str, unicode], "string") - if not uuidRE.match(s): - raise error.Error("\"%s\" is not a valid UUID" % s, json) - return uuid.UUID(s) - except error.Error, e: - if not symtab: - raise e - try: - name = ovs.db.parser.unwrap_json(json, "named-uuid", - [str, unicode], "string") - except error.Error: - raise e - - if name not in symtab: - symtab[name] = uuid.uuid4() - return symtab[name] - - -def to_json(uuid_): - return ["uuid", str(uuid_)] - - -def to_c_assignment(uuid_, var): - """Returns an array of strings, each of which contain a C statement. The - statements assign 'uuid_' to a "struct uuid" as defined in Open vSwitch - lib/uuid.h.""" - - hex_string = uuid_.hex - return ["%s.parts[%d] = 0x%s;" % (var, x, hex_string[x * 8:(x + 1) * 8]) - for x in range(4)] diff --git a/ryu/contrib/ovs/poller.py b/ryu/contrib/ovs/poller.py deleted file mode 100644 index ffd6a399..00000000 --- a/ryu/contrib/ovs/poller.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2010 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import ovs.timeval -import ovs.vlog -import select -import socket - -try: - import eventlet.patcher - - def _using_eventlet_green_select(): - return eventlet.patcher.is_monkey_patched(select) -except: - def _using_eventlet_green_select(): - return False - -vlog = ovs.vlog.Vlog("poller") - -POLLIN = 0x001 -POLLOUT = 0x004 -POLLERR = 0x008 -POLLHUP = 0x010 -POLLNVAL = 0x020 - -# eventlet/gevent doesn't support select.poll. If select.poll is used, -# python interpreter is blocked as a whole instead of switching from the -# current thread that is about to block to other runnable thread. -# So emulate select.poll by select.select because using python means that -# performance isn't so important. -class _SelectSelect(object): - """ select.poll emulation by using select.select. - Only register and poll are needed at the moment. - """ - def __init__(self): - self.rlist = [] - self.wlist = [] - self.xlist = [] - - def register(self, fd, events): - if isinstance(fd, socket.socket): - fd = fd.fileno() - assert isinstance(fd, int) - if events & POLLIN: - self.rlist.append(fd) - events &= ~POLLIN - if events & POLLOUT: - self.wlist.append(fd) - events &= ~POLLOUT - if events: - self.xlist.append(fd) - - def poll(self, timeout): - if timeout == -1: - # epoll uses -1 for infinite timeout, select uses None. - timeout = None - else: - timeout = float(timeout) / 1000 - # XXX workaround a bug in eventlet - # see https://github.com/eventlet/eventlet/pull/25 - if timeout == 0 and _using_eventlet_green_select(): - timeout = 0.1 - - rlist, wlist, xlist = select.select(self.rlist, self.wlist, self.xlist, - timeout) - # collections.defaultdict is introduced by python 2.5 and - # XenServer uses python 2.4. We don't use it for XenServer. - # events_dict = collections.defaultdict(int) - # events_dict[fd] |= event - events_dict = {} - for fd in rlist: - events_dict[fd] = events_dict.get(fd, 0) | POLLIN - for fd in wlist: - events_dict[fd] = events_dict.get(fd, 0) | POLLOUT - for fd in xlist: - events_dict[fd] = events_dict.get(fd, 0) | (POLLERR | - POLLHUP | - POLLNVAL) - return events_dict.items() - - -SelectPoll = _SelectSelect -# If eventlet/gevent isn't used, we can use select.poll by replacing -# _SelectPoll with select.poll class -# _SelectPoll = select.poll - - -class Poller(object): - """High-level wrapper around the "poll" system call. - - Intended usage is for the program's main loop to go about its business - servicing whatever events it needs to. Then, when it runs out of immediate - tasks, it calls each subordinate module or object's "wait" function, which - in turn calls one (or more) of the functions Poller.fd_wait(), - Poller.immediate_wake(), and Poller.timer_wait() to register to be awakened - when the appropriate event occurs. Then the main loop calls - Poller.block(), which blocks until one of the registered events happens.""" - - def __init__(self): - self.__reset() - - def fd_wait(self, fd, events): - """Registers 'fd' as waiting for the specified 'events' (which should - be select.POLLIN or select.POLLOUT or their bitwise-OR). The following - call to self.block() will wake up when 'fd' becomes ready for one or - more of the requested events. - - The event registration is one-shot: only the following call to - self.block() is affected. The event will need to be re-registered - after self.block() is called if it is to persist. - - 'fd' may be an integer file descriptor or an object with a fileno() - method that returns an integer file descriptor.""" - self.poll.register(fd, events) - - def __timer_wait(self, msec): - if self.timeout < 0 or msec < self.timeout: - self.timeout = msec - - def timer_wait(self, msec): - """Causes the following call to self.block() to block for no more than - 'msec' milliseconds. If 'msec' is nonpositive, the following call to - self.block() will not block at all. - - The timer registration is one-shot: only the following call to - self.block() is affected. The timer will need to be re-registered - after self.block() is called if it is to persist.""" - if msec <= 0: - self.immediate_wake() - else: - self.__timer_wait(msec) - - def timer_wait_until(self, msec): - """Causes the following call to self.block() to wake up when the - current time, as returned by ovs.timeval.msec(), reaches 'msec' or - later. If 'msec' is earlier than the current time, the following call - to self.block() will not block at all. - - The timer registration is one-shot: only the following call to - self.block() is affected. The timer will need to be re-registered - after self.block() is called if it is to persist.""" - now = ovs.timeval.msec() - if msec <= now: - self.immediate_wake() - else: - self.__timer_wait(msec - now) - - def immediate_wake(self): - """Causes the following call to self.block() to wake up immediately, - without blocking.""" - self.timeout = 0 - - def block(self): - """Blocks until one or more of the events registered with - self.fd_wait() occurs, or until the minimum duration registered with - self.timer_wait() elapses, or not at all if self.immediate_wake() has - been called.""" - try: - try: - events = self.poll.poll(self.timeout) - self.__log_wakeup(events) - except select.error, e: - # XXX rate-limit - error, msg = e - if error != errno.EINTR: - vlog.err("poll: %s" % e[1]) - finally: - self.__reset() - - def __log_wakeup(self, events): - if not events: - vlog.dbg("%d-ms timeout" % self.timeout) - else: - for fd, revents in events: - if revents != 0: - s = "" - if revents & POLLIN: - s += "[POLLIN]" - if revents & POLLOUT: - s += "[POLLOUT]" - if revents & POLLERR: - s += "[POLLERR]" - if revents & POLLHUP: - s += "[POLLHUP]" - if revents & POLLNVAL: - s += "[POLLNVAL]" - vlog.dbg("%s on fd %d" % (s, fd)) - - def __reset(self): - self.poll = SelectPoll() - self.timeout = -1 diff --git a/ryu/contrib/ovs/process.py b/ryu/contrib/ovs/process.py deleted file mode 100644 index d7561310..00000000 --- a/ryu/contrib/ovs/process.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2010, 2011 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import signal - - -def _signal_status_msg(type_, signr): - s = "%s by signal %d" % (type_, signr) - for name in signal.__dict__: - if name.startswith("SIG") and getattr(signal, name) == signr: - return "%s (%s)" % (s, name) - return s - - -def status_msg(status): - """Given 'status', which is a process status in the form reported by - waitpid(2) and returned by process_status(), returns a string describing - how the process terminated.""" - if os.WIFEXITED(status): - s = "exit status %d" % os.WEXITSTATUS(status) - elif os.WIFSIGNALED(status): - s = _signal_status_msg("killed", os.WTERMSIG(status)) - elif os.WIFSTOPPED(status): - s = _signal_status_msg("stopped", os.WSTOPSIG(status)) - else: - s = "terminated abnormally (%x)" % status - if os.WCOREDUMP(status): - s += ", core dumped" - return s diff --git a/ryu/contrib/ovs/reconnect.py b/ryu/contrib/ovs/reconnect.py deleted file mode 100644 index 39dd556d..00000000 --- a/ryu/contrib/ovs/reconnect.py +++ /dev/null @@ -1,588 +0,0 @@ -# Copyright (c) 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import ovs.vlog -import ovs.util - -# Values returned by Reconnect.run() -CONNECT = 'connect' -DISCONNECT = 'disconnect' -PROBE = 'probe' - -EOF = ovs.util.EOF -vlog = ovs.vlog.Vlog("reconnect") - - -class Reconnect(object): - """A finite-state machine for connecting and reconnecting to a network - resource with exponential backoff. It also provides optional support for - detecting a connection on which the peer is no longer responding. - - The library does not implement anything networking related, only an FSM for - networking code to use. - - Many Reconnect methods take a "now" argument. This makes testing easier - since there is no hidden state. When not testing, just pass the return - value of ovs.time.msec(). (Perhaps this design should be revisited - later.)""" - - class Void(object): - name = "VOID" - is_connected = False - - @staticmethod - def deadline(fsm): - return None - - @staticmethod - def run(fsm, now): - return None - - class Listening(object): - name = "LISTENING" - is_connected = False - - @staticmethod - def deadline(fsm): - return None - - @staticmethod - def run(fsm, now): - return None - - class Backoff(object): - name = "BACKOFF" - is_connected = False - - @staticmethod - def deadline(fsm): - return fsm.state_entered + fsm.backoff - - @staticmethod - def run(fsm, now): - return CONNECT - - class ConnectInProgress(object): - name = "CONNECTING" - is_connected = False - - @staticmethod - def deadline(fsm): - return fsm.state_entered + max(1000, fsm.backoff) - - @staticmethod - def run(fsm, now): - return DISCONNECT - - class Active(object): - name = "ACTIVE" - is_connected = True - - @staticmethod - def deadline(fsm): - if fsm.probe_interval: - base = max(fsm.last_activity, fsm.state_entered) - return base + fsm.probe_interval - return None - - @staticmethod - def run(fsm, now): - vlog.dbg("%s: idle %d ms, sending inactivity probe" - % (fsm.name, - now - max(fsm.last_activity, fsm.state_entered))) - fsm._transition(now, Reconnect.Idle) - return PROBE - - class Idle(object): - name = "IDLE" - is_connected = True - - @staticmethod - def deadline(fsm): - if fsm.probe_interval: - return fsm.state_entered + fsm.probe_interval - return None - - @staticmethod - def run(fsm, now): - vlog.err("%s: no response to inactivity probe after %.3g " - "seconds, disconnecting" - % (fsm.name, (now - fsm.state_entered) / 1000.0)) - return DISCONNECT - - class Reconnect(object): - name = "RECONNECT" - is_connected = False - - @staticmethod - def deadline(fsm): - return fsm.state_entered - - @staticmethod - def run(fsm, now): - return DISCONNECT - - def __init__(self, now): - """Creates and returns a new reconnect FSM with default settings. The - FSM is initially disabled. The caller will likely want to call - self.enable() and self.set_name() on the returned object.""" - - self.name = "void" - self.min_backoff = 1000 - self.max_backoff = 8000 - self.probe_interval = 5000 - self.passive = False - self.info_level = vlog.info - - self.state = Reconnect.Void - self.state_entered = now - self.backoff = 0 - self.last_activity = now - self.last_connected = None - self.last_disconnected = None - self.max_tries = None - - self.creation_time = now - self.n_attempted_connections = 0 - self.n_successful_connections = 0 - self.total_connected_duration = 0 - self.seqno = 0 - - def set_quiet(self, quiet): - """If 'quiet' is true, this object will log informational messages at - debug level, by default keeping them out of log files. This is - appropriate if the connection is one that is expected to be - short-lived, so that the log messages are merely distracting. - - If 'quiet' is false, this object logs informational messages at info - level. This is the default. - - This setting has no effect on the log level of debugging, warning, or - error messages.""" - if quiet: - self.info_level = vlog.dbg - else: - self.info_level = vlog.info - - def get_name(self): - return self.name - - def set_name(self, name): - """Sets this object's name to 'name'. If 'name' is None, then "void" - is used instead. - - The name is used in log messages.""" - if name is None: - self.name = "void" - else: - self.name = name - - def get_min_backoff(self): - """Return the minimum number of milliseconds to back off between - consecutive connection attempts. The default is 1000 ms.""" - return self.min_backoff - - def get_max_backoff(self): - """Return the maximum number of milliseconds to back off between - consecutive connection attempts. The default is 8000 ms.""" - return self.max_backoff - - def get_probe_interval(self): - """Returns the "probe interval" in milliseconds. If this is zero, it - disables the connection keepalive feature. If it is nonzero, then if - the interval passes while the FSM is connected and without - self.activity() being called, self.run() returns ovs.reconnect.PROBE. - If the interval passes again without self.activity() being called, - self.run() returns ovs.reconnect.DISCONNECT.""" - return self.probe_interval - - def set_max_tries(self, max_tries): - """Limits the maximum number of times that this object will ask the - client to try to reconnect to 'max_tries'. None (the default) means an - unlimited number of tries. - - After the number of tries has expired, the FSM will disable itself - instead of backing off and retrying.""" - self.max_tries = max_tries - - def get_max_tries(self): - """Returns the current remaining number of connection attempts, - None if the number is unlimited.""" - return self.max_tries - - def set_backoff(self, min_backoff, max_backoff): - """Configures the backoff parameters for this FSM. 'min_backoff' is - the minimum number of milliseconds, and 'max_backoff' is the maximum, - between connection attempts. - - 'min_backoff' must be at least 1000, and 'max_backoff' must be greater - than or equal to 'min_backoff'.""" - self.min_backoff = max(min_backoff, 1000) - if self.max_backoff: - self.max_backoff = max(max_backoff, 1000) - else: - self.max_backoff = 8000 - if self.min_backoff > self.max_backoff: - self.max_backoff = self.min_backoff - - if (self.state == Reconnect.Backoff and - self.backoff > self.max_backoff): - self.backoff = self.max_backoff - - def set_probe_interval(self, probe_interval): - """Sets the "probe interval" to 'probe_interval', in milliseconds. If - this is zero, it disables the connection keepalive feature. If it is - nonzero, then if the interval passes while this FSM is connected and - without self.activity() being called, self.run() returns - ovs.reconnect.PROBE. If the interval passes again without - self.activity() being called, self.run() returns - ovs.reconnect.DISCONNECT. - - If 'probe_interval' is nonzero, then it will be forced to a value of at - least 1000 ms.""" - if probe_interval: - self.probe_interval = max(1000, probe_interval) - else: - self.probe_interval = 0 - - def is_passive(self): - """Returns true if 'fsm' is in passive mode, false if 'fsm' is in - active mode (the default).""" - return self.passive - - def set_passive(self, passive, now): - """Configures this FSM for active or passive mode. In active mode (the - default), the FSM is attempting to connect to a remote host. In - passive mode, the FSM is listening for connections from a remote - host.""" - if self.passive != passive: - self.passive = passive - - if ((passive and self.state in (Reconnect.ConnectInProgress, - Reconnect.Reconnect)) or - (not passive and self.state == Reconnect.Listening - and self.__may_retry())): - self._transition(now, Reconnect.Backoff) - self.backoff = 0 - - def is_enabled(self): - """Returns true if this FSM has been enabled with self.enable(). - Calling another function that indicates a change in connection state, - such as self.disconnected() or self.force_reconnect(), will also enable - a reconnect FSM.""" - return self.state != Reconnect.Void - - def enable(self, now): - """If this FSM is disabled (the default for newly created FSMs), - enables it, so that the next call to reconnect_run() for 'fsm' will - return ovs.reconnect.CONNECT. - - If this FSM is not disabled, this function has no effect.""" - if self.state == Reconnect.Void and self.__may_retry(): - self._transition(now, Reconnect.Backoff) - self.backoff = 0 - - def disable(self, now): - """Disables this FSM. Until 'fsm' is enabled again, self.run() will - always return 0.""" - if self.state != Reconnect.Void: - self._transition(now, Reconnect.Void) - - def force_reconnect(self, now): - """If this FSM is enabled and currently connected (or attempting to - connect), forces self.run() to return ovs.reconnect.DISCONNECT the next - time it is called, which should cause the client to drop the connection - (or attempt), back off, and then reconnect.""" - if self.state in (Reconnect.ConnectInProgress, - Reconnect.Active, - Reconnect.Idle): - self._transition(now, Reconnect.Reconnect) - - def disconnected(self, now, error): - """Tell this FSM that the connection dropped or that a connection - attempt failed. 'error' specifies the reason: a positive value - represents an errno value, EOF indicates that the connection was closed - by the peer (e.g. read() returned 0), and 0 indicates no specific - error. - - The FSM will back off, then reconnect.""" - if self.state not in (Reconnect.Backoff, Reconnect.Void): - # Report what happened - if self.state in (Reconnect.Active, Reconnect.Idle): - if error > 0: - vlog.warn("%s: connection dropped (%s)" - % (self.name, os.strerror(error))) - elif error == EOF: - self.info_level("%s: connection closed by peer" - % self.name) - else: - self.info_level("%s: connection dropped" % self.name) - elif self.state == Reconnect.Listening: - if error > 0: - vlog.warn("%s: error listening for connections (%s)" - % (self.name, os.strerror(error))) - else: - self.info_level("%s: error listening for connections" - % self.name) - else: - if self.passive: - type_ = "listen" - else: - type_ = "connection" - if error > 0: - vlog.warn("%s: %s attempt failed (%s)" - % (self.name, type_, os.strerror(error))) - else: - self.info_level("%s: %s attempt timed out" - % (self.name, type_)) - - if (self.state in (Reconnect.Active, Reconnect.Idle)): - self.last_disconnected = now - - # Back off - if (self.state in (Reconnect.Active, Reconnect.Idle) and - (self.last_activity - self.last_connected >= self.backoff or - self.passive)): - if self.passive: - self.backoff = 0 - else: - self.backoff = self.min_backoff - else: - if self.backoff < self.min_backoff: - self.backoff = self.min_backoff - elif self.backoff >= self.max_backoff / 2: - self.backoff = self.max_backoff - else: - self.backoff *= 2 - - if self.passive: - self.info_level("%s: waiting %.3g seconds before trying " - "to listen again" - % (self.name, self.backoff / 1000.0)) - else: - self.info_level("%s: waiting %.3g seconds before reconnect" - % (self.name, self.backoff / 1000.0)) - - if self.__may_retry(): - self._transition(now, Reconnect.Backoff) - else: - self._transition(now, Reconnect.Void) - - def connecting(self, now): - """Tell this FSM that a connection or listening attempt is in progress. - - The FSM will start a timer, after which the connection or listening - attempt will be aborted (by returning ovs.reconnect.DISCONNECT from - self.run()).""" - if self.state != Reconnect.ConnectInProgress: - if self.passive: - self.info_level("%s: listening..." % self.name) - else: - self.info_level("%s: connecting..." % self.name) - self._transition(now, Reconnect.ConnectInProgress) - - def listening(self, now): - """Tell this FSM that the client is listening for connection attempts. - This state last indefinitely until the client reports some change. - - The natural progression from this state is for the client to report - that a connection has been accepted or is in progress of being - accepted, by calling self.connecting() or self.connected(). - - The client may also report that listening failed (e.g. accept() - returned an unexpected error such as ENOMEM) by calling - self.listen_error(), in which case the FSM will back off and eventually - return ovs.reconnect.CONNECT from self.run() to tell the client to try - listening again.""" - if self.state != Reconnect.Listening: - self.info_level("%s: listening..." % self.name) - self._transition(now, Reconnect.Listening) - - def listen_error(self, now, error): - """Tell this FSM that the client's attempt to accept a connection - failed (e.g. accept() returned an unexpected error such as ENOMEM). - - If the FSM is currently listening (self.listening() was called), it - will back off and eventually return ovs.reconnect.CONNECT from - self.run() to tell the client to try listening again. If there is an - active connection, this will be delayed until that connection drops.""" - if self.state == Reconnect.Listening: - self.disconnected(now, error) - - def connected(self, now): - """Tell this FSM that the connection was successful. - - The FSM will start the probe interval timer, which is reset by - self.activity(). If the timer expires, a probe will be sent (by - returning ovs.reconnect.PROBE from self.run(). If the timer expires - again without being reset, the connection will be aborted (by returning - ovs.reconnect.DISCONNECT from self.run().""" - if not self.state.is_connected: - self.connecting(now) - - self.info_level("%s: connected" % self.name) - self._transition(now, Reconnect.Active) - self.last_connected = now - - def connect_failed(self, now, error): - """Tell this FSM that the connection attempt failed. - - The FSM will back off and attempt to reconnect.""" - self.connecting(now) - self.disconnected(now, error) - - def activity(self, now): - """Tell this FSM that some activity occurred on the connection. This - resets the probe interval timer, so that the connection is known not to - be idle.""" - if self.state != Reconnect.Active: - self._transition(now, Reconnect.Active) - self.last_activity = now - - def _transition(self, now, state): - if self.state == Reconnect.ConnectInProgress: - self.n_attempted_connections += 1 - if state == Reconnect.Active: - self.n_successful_connections += 1 - - connected_before = self.state.is_connected - connected_now = state.is_connected - if connected_before != connected_now: - if connected_before: - self.total_connected_duration += now - self.last_connected - self.seqno += 1 - - vlog.dbg("%s: entering %s" % (self.name, state.name)) - self.state = state - self.state_entered = now - - def run(self, now): - """Assesses whether any action should be taken on this FSM. The return - value is one of: - - - None: The client need not take any action. - - - Active client, ovs.reconnect.CONNECT: The client should start a - connection attempt and indicate this by calling - self.connecting(). If the connection attempt has definitely - succeeded, it should call self.connected(). If the connection - attempt has definitely failed, it should call - self.connect_failed(). - - The FSM is smart enough to back off correctly after successful - connections that quickly abort, so it is OK to call - self.connected() after a low-level successful connection - (e.g. connect()) even if the connection might soon abort due to a - failure at a high-level (e.g. SSL negotiation failure). - - - Passive client, ovs.reconnect.CONNECT: The client should try to - listen for a connection, if it is not already listening. It - should call self.listening() if successful, otherwise - self.connecting() or reconnected_connect_failed() if the attempt - is in progress or definitely failed, respectively. - - A listening passive client should constantly attempt to accept a - new connection and report an accepted connection with - self.connected(). - - - ovs.reconnect.DISCONNECT: The client should abort the current - connection or connection attempt or listen attempt and call - self.disconnected() or self.connect_failed() to indicate it. - - - ovs.reconnect.PROBE: The client should send some kind of request - to the peer that will elicit a response, to ensure that the - connection is indeed in working order. (This will only be - returned if the "probe interval" is nonzero--see - self.set_probe_interval()).""" - - deadline = self.state.deadline(self) - if deadline is not None and now >= deadline: - return self.state.run(self, now) - else: - return None - - def wait(self, poller, now): - """Causes the next call to poller.block() to wake up when self.run() - should be called.""" - timeout = self.timeout(now) - if timeout >= 0: - poller.timer_wait(timeout) - - def timeout(self, now): - """Returns the number of milliseconds after which self.run() should be - called if nothing else notable happens in the meantime, or None if this - is currently unnecessary.""" - deadline = self.state.deadline(self) - if deadline is not None: - remaining = deadline - now - return max(0, remaining) - else: - return None - - def is_connected(self): - """Returns True if this FSM is currently believed to be connected, that - is, if self.connected() was called more recently than any call to - self.connect_failed() or self.disconnected() or self.disable(), and - False otherwise.""" - return self.state.is_connected - - def get_last_connect_elapsed(self, now): - """Returns the number of milliseconds since 'fsm' was last connected - to its peer. Returns None if never connected.""" - if self.last_connected: - return now - self.last_connected - else: - return None - - def get_last_disconnect_elapsed(self, now): - """Returns the number of milliseconds since 'fsm' was last disconnected - from its peer. Returns None if never disconnected.""" - if self.last_disconnected: - return now - self.last_disconnected - else: - return None - - def get_stats(self, now): - class Stats(object): - pass - stats = Stats() - stats.creation_time = self.creation_time - stats.last_connected = self.last_connected - stats.last_disconnected = self.last_disconnected - stats.last_activity = self.last_activity - stats.backoff = self.backoff - stats.seqno = self.seqno - stats.is_connected = self.is_connected() - stats.msec_since_connect = self.get_last_connect_elapsed(now) - stats.msec_since_disconnect = self.get_last_disconnect_elapsed(now) - stats.total_connected_duration = self.total_connected_duration - if self.is_connected(): - stats.total_connected_duration += ( - self.get_last_connect_elapsed(now)) - stats.n_attempted_connections = self.n_attempted_connections - stats.n_successful_connections = self.n_successful_connections - stats.state = self.state.name - stats.state_elapsed = now - self.state_entered - return stats - - def __may_retry(self): - if self.max_tries is None: - return True - elif self.max_tries > 0: - self.max_tries -= 1 - return True - else: - return False diff --git a/ryu/contrib/ovs/socket_util.py b/ryu/contrib/ovs/socket_util.py deleted file mode 100644 index 1fc80fd3..00000000 --- a/ryu/contrib/ovs/socket_util.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) 2010, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import os -import select -import socket -import sys - -import ovs.fatal_signal -import ovs.poller -import ovs.vlog - -vlog = ovs.vlog.Vlog("socket_util") - - -def make_unix_socket(style, nonblock, bind_path, connect_path): - """Creates a Unix domain socket in the given 'style' (either - socket.SOCK_DGRAM or socket.SOCK_STREAM) that is bound to 'bind_path' (if - 'bind_path' is not None) and connected to 'connect_path' (if 'connect_path' - is not None). If 'nonblock' is true, the socket is made non-blocking. - - Returns (error, socket): on success 'error' is 0 and 'socket' is a new - socket object, on failure 'error' is a positive errno value and 'socket' is - None.""" - - try: - sock = socket.socket(socket.AF_UNIX, style) - except socket.error, e: - return get_exception_errno(e), None - - try: - if nonblock: - set_nonblocking(sock) - if bind_path is not None: - # Delete bind_path but ignore ENOENT. - try: - os.unlink(bind_path) - except OSError, e: - if e.errno != errno.ENOENT: - return e.errno, None - - ovs.fatal_signal.add_file_to_unlink(bind_path) - sock.bind(bind_path) - - try: - if sys.hexversion >= 0x02060000: - os.fchmod(sock.fileno(), 0700) - else: - os.chmod("/dev/fd/%d" % sock.fileno(), 0700) - except OSError, e: - pass - if connect_path is not None: - try: - sock.connect(connect_path) - except socket.error, e: - if get_exception_errno(e) != errno.EINPROGRESS: - raise - return 0, sock - except socket.error, e: - sock.close() - if bind_path is not None: - ovs.fatal_signal.unlink_file_now(bind_path) - return get_exception_errno(e), None - - -def check_connection_completion(sock): - p = ovs.poller.SelectPoll() - p.register(sock, ovs.poller.POLLOUT) - if len(p.poll(0)) == 1: - return get_socket_error(sock) - else: - return errno.EAGAIN - - -def inet_parse_active(target, default_port): - address = target.split(":") - host_name = address[0] - if not host_name: - raise ValueError("%s: bad peer name format" % target) - if len(address) >= 2: - port = int(address[1]) - elif default_port: - port = default_port - else: - raise ValueError("%s: port number must be specified" % target) - return (host_name, port) - - -def inet_open_active(style, target, default_port, dscp): - address = inet_parse_active(target, default_port) - try: - sock = socket.socket(socket.AF_INET, style, 0) - except socket.error, e: - return get_exception_errno(e), None - - try: - set_nonblocking(sock) - set_dscp(sock, dscp) - try: - sock.connect(address) - except socket.error, e: - if get_exception_errno(e) != errno.EINPROGRESS: - raise - return 0, sock - except socket.error, e: - sock.close() - return get_exception_errno(e), None - - -def get_socket_error(sock): - """Returns the errno value associated with 'socket' (0 if no error) and - resets the socket's error status.""" - return sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - - -def get_exception_errno(e): - """A lot of methods on Python socket objects raise socket.error, but that - exception is documented as having two completely different forms of - arguments: either a string or a (errno, string) tuple. We only want the - errno.""" - if type(e.args) == tuple: - return e.args[0] - else: - return errno.EPROTO - - -null_fd = -1 - - -def get_null_fd(): - """Returns a readable and writable fd for /dev/null, if successful, - otherwise a negative errno value. The caller must not close the returned - fd (because the same fd will be handed out to subsequent callers).""" - global null_fd - if null_fd < 0: - try: - null_fd = os.open("/dev/null", os.O_RDWR) - except OSError, e: - vlog.err("could not open /dev/null: %s" % os.strerror(e.errno)) - return -e.errno - return null_fd - - -def write_fully(fd, buf): - """Returns an (error, bytes_written) tuple where 'error' is 0 on success, - otherwise a positive errno value, and 'bytes_written' is the number of - bytes that were written before the error occurred. 'error' is 0 if and - only if 'bytes_written' is len(buf).""" - bytes_written = 0 - if len(buf) == 0: - return 0, 0 - while True: - try: - retval = os.write(fd, buf) - assert retval >= 0 - if retval == len(buf): - return 0, bytes_written + len(buf) - elif retval == 0: - vlog.warn("write returned 0") - return errno.EPROTO, bytes_written - else: - bytes_written += retval - buf = buf[:retval] - except OSError, e: - return e.errno, bytes_written - - -def set_nonblocking(sock): - try: - sock.setblocking(0) - except socket.error, e: - vlog.err("could not set nonblocking mode on socket: %s" - % os.strerror(get_socket_error(e))) - - -def set_dscp(sock, dscp): - if dscp > 63: - raise ValueError("Invalid dscp %d" % dscp) - val = dscp << 2 - sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, val) diff --git a/ryu/contrib/ovs/stream.py b/ryu/contrib/ovs/stream.py deleted file mode 100644 index c640ebf5..00000000 --- a/ryu/contrib/ovs/stream.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (c) 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import os -import socket - -import ovs.poller -import ovs.socket_util -import ovs.vlog - -vlog = ovs.vlog.Vlog("stream") - - -def stream_or_pstream_needs_probes(name): - """ 1 if the stream or pstream specified by 'name' needs periodic probes to - verify connectivity. For [p]streams which need probes, it can take a long - time to notice the connection was dropped. Returns 0 if probes aren't - needed, and -1 if 'name' is invalid""" - - if PassiveStream.is_valid_name(name) or Stream.is_valid_name(name): - # Only unix and punix are supported currently. - return 0 - else: - return -1 - - -class Stream(object): - """Bidirectional byte stream. Currently only Unix domain sockets - are implemented.""" - - # States. - __S_CONNECTING = 0 - __S_CONNECTED = 1 - __S_DISCONNECTED = 2 - - # Kinds of events that one might wait for. - W_CONNECT = 0 # Connect complete (success or failure). - W_RECV = 1 # Data received. - W_SEND = 2 # Send buffer room available. - - _SOCKET_METHODS = {} - - @staticmethod - def register_method(method, cls): - Stream._SOCKET_METHODS[method + ":"] = cls - - @staticmethod - def _find_method(name): - for method, cls in Stream._SOCKET_METHODS.items(): - if name.startswith(method): - return cls - return None - - @staticmethod - def is_valid_name(name): - """Returns True if 'name' is a stream name in the form "TYPE:ARGS" and - TYPE is a supported stream type (currently only "unix:" and "tcp:"), - otherwise False.""" - return bool(Stream._find_method(name)) - - def __init__(self, socket, name, status): - self.socket = socket - self.name = name - if status == errno.EAGAIN: - self.state = Stream.__S_CONNECTING - elif status == 0: - self.state = Stream.__S_CONNECTED - else: - self.state = Stream.__S_DISCONNECTED - - self.error = 0 - - # Default value of dscp bits for connection between controller and manager. - # Value of IPTOS_PREC_INTERNETCONTROL = 0xc0 which is defined - # in is used. - IPTOS_PREC_INTERNETCONTROL = 0xc0 - DSCP_DEFAULT = IPTOS_PREC_INTERNETCONTROL >> 2 - - @staticmethod - def open(name, dscp=DSCP_DEFAULT): - """Attempts to connect a stream to a remote peer. 'name' is a - connection name in the form "TYPE:ARGS", where TYPE is an active stream - class's name and ARGS are stream class-specific. Currently the only - supported TYPEs are "unix" and "tcp". - - Returns (error, stream): on success 'error' is 0 and 'stream' is the - new Stream, on failure 'error' is a positive errno value and 'stream' - is None. - - Never returns errno.EAGAIN or errno.EINPROGRESS. Instead, returns 0 - and a new Stream. The connect() method can be used to check for - successful connection completion.""" - cls = Stream._find_method(name) - if not cls: - return errno.EAFNOSUPPORT, None - - suffix = name.split(":", 1)[1] - error, sock = cls._open(suffix, dscp) - if error: - return error, None - else: - status = ovs.socket_util.check_connection_completion(sock) - return 0, Stream(sock, name, status) - - @staticmethod - def _open(suffix, dscp): - raise NotImplementedError("This method must be overrided by subclass") - - @staticmethod - def open_block((error, stream)): - """Blocks until a Stream completes its connection attempt, either - succeeding or failing. (error, stream) should be the tuple returned by - Stream.open(). Returns a tuple of the same form. - - Typical usage: - error, stream = Stream.open_block(Stream.open("unix:/tmp/socket"))""" - - if not error: - while True: - error = stream.connect() - if error != errno.EAGAIN: - break - stream.run() - poller = ovs.poller.Poller() - stream.run_wait(poller) - stream.connect_wait(poller) - poller.block() - assert error != errno.EINPROGRESS - - if error and stream: - stream.close() - stream = None - return error, stream - - def close(self): - self.socket.close() - - def __scs_connecting(self): - retval = ovs.socket_util.check_connection_completion(self.socket) - assert retval != errno.EINPROGRESS - if retval == 0: - self.state = Stream.__S_CONNECTED - elif retval != errno.EAGAIN: - self.state = Stream.__S_DISCONNECTED - self.error = retval - - def connect(self): - """Tries to complete the connection on this stream. If the connection - is complete, returns 0 if the connection was successful or a positive - errno value if it failed. If the connection is still in progress, - returns errno.EAGAIN.""" - - if self.state == Stream.__S_CONNECTING: - self.__scs_connecting() - - if self.state == Stream.__S_CONNECTING: - return errno.EAGAIN - elif self.state == Stream.__S_CONNECTED: - return 0 - else: - assert self.state == Stream.__S_DISCONNECTED - return self.error - - def recv(self, n): - """Tries to receive up to 'n' bytes from this stream. Returns a - (error, string) tuple: - - - If successful, 'error' is zero and 'string' contains between 1 - and 'n' bytes of data. - - - On error, 'error' is a positive errno value. - - - If the connection has been closed in the normal fashion or if 'n' - is 0, the tuple is (0, ""). - - The recv function will not block waiting for data to arrive. If no - data have been received, it returns (errno.EAGAIN, "") immediately.""" - - retval = self.connect() - if retval != 0: - return (retval, "") - elif n == 0: - return (0, "") - - try: - return (0, self.socket.recv(n)) - except socket.error, e: - return (ovs.socket_util.get_exception_errno(e), "") - - def send(self, buf): - """Tries to send 'buf' on this stream. - - If successful, returns the number of bytes sent, between 1 and - len(buf). 0 is only a valid return value if len(buf) is 0. - - On error, returns a negative errno value. - - Will not block. If no bytes can be immediately accepted for - transmission, returns -errno.EAGAIN immediately.""" - - retval = self.connect() - if retval != 0: - return -retval - elif len(buf) == 0: - return 0 - - try: - return self.socket.send(buf) - except socket.error, e: - return -ovs.socket_util.get_exception_errno(e) - - def run(self): - pass - - def run_wait(self, poller): - pass - - def wait(self, poller, wait): - assert wait in (Stream.W_CONNECT, Stream.W_RECV, Stream.W_SEND) - - if self.state == Stream.__S_DISCONNECTED: - poller.immediate_wake() - return - - if self.state == Stream.__S_CONNECTING: - wait = Stream.W_CONNECT - if wait == Stream.W_RECV: - poller.fd_wait(self.socket, ovs.poller.POLLIN) - else: - poller.fd_wait(self.socket, ovs.poller.POLLOUT) - - def connect_wait(self, poller): - self.wait(poller, Stream.W_CONNECT) - - def recv_wait(self, poller): - self.wait(poller, Stream.W_RECV) - - def send_wait(self, poller): - self.wait(poller, Stream.W_SEND) - - def __del__(self): - # Don't delete the file: we might have forked. - self.socket.close() - - -class PassiveStream(object): - @staticmethod - def is_valid_name(name): - """Returns True if 'name' is a passive stream name in the form - "TYPE:ARGS" and TYPE is a supported passive stream type (currently only - "punix:"), otherwise False.""" - return name.startswith("punix:") - - def __init__(self, sock, name, bind_path): - self.name = name - self.socket = sock - self.bind_path = bind_path - - @staticmethod - def open(name): - """Attempts to start listening for remote stream connections. 'name' - is a connection name in the form "TYPE:ARGS", where TYPE is an passive - stream class's name and ARGS are stream class-specific. Currently the - only supported TYPE is "punix". - - Returns (error, pstream): on success 'error' is 0 and 'pstream' is the - new PassiveStream, on failure 'error' is a positive errno value and - 'pstream' is None.""" - if not PassiveStream.is_valid_name(name): - return errno.EAFNOSUPPORT, None - - bind_path = name[6:] - error, sock = ovs.socket_util.make_unix_socket(socket.SOCK_STREAM, - True, bind_path, None) - if error: - return error, None - - try: - sock.listen(10) - except socket.error, e: - vlog.err("%s: listen: %s" % (name, os.strerror(e.error))) - sock.close() - return e.error, None - - return 0, PassiveStream(sock, name, bind_path) - - def close(self): - """Closes this PassiveStream.""" - self.socket.close() - if self.bind_path is not None: - ovs.fatal_signal.unlink_file_now(self.bind_path) - self.bind_path = None - - def accept(self): - """Tries to accept a new connection on this passive stream. Returns - (error, stream): if successful, 'error' is 0 and 'stream' is the new - Stream object, and on failure 'error' is a positive errno value and - 'stream' is None. - - Will not block waiting for a connection. If no connection is ready to - be accepted, returns (errno.EAGAIN, None) immediately.""" - - while True: - try: - sock, addr = self.socket.accept() - ovs.socket_util.set_nonblocking(sock) - return 0, Stream(sock, "unix:%s" % addr, 0) - except socket.error, e: - error = ovs.socket_util.get_exception_errno(e) - if error != errno.EAGAIN: - # XXX rate-limit - vlog.dbg("accept: %s" % os.strerror(error)) - return error, None - - def wait(self, poller): - poller.fd_wait(self.socket, ovs.poller.POLLIN) - - def __del__(self): - # Don't delete the file: we might have forked. - self.socket.close() - - -def usage(name): - return """ -Active %s connection methods: - unix:FILE Unix domain socket named FILE - tcp:IP:PORT TCP socket to IP with port no of PORT - -Passive %s connection methods: - punix:FILE Listen on Unix domain socket FILE""" % (name, name) - - -class UnixStream(Stream): - @staticmethod - def _open(suffix, dscp): - connect_path = suffix - return ovs.socket_util.make_unix_socket(socket.SOCK_STREAM, - True, None, connect_path) -Stream.register_method("unix", UnixStream) - - -class TCPStream(Stream): - @staticmethod - def _open(suffix, dscp): - error, sock = ovs.socket_util.inet_open_active(socket.SOCK_STREAM, - suffix, 0, dscp) - if not error: - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - return error, sock -Stream.register_method("tcp", TCPStream) diff --git a/ryu/contrib/ovs/unixctl/__init__.py b/ryu/contrib/ovs/unixctl/__init__.py deleted file mode 100644 index 715f2db5..00000000 --- a/ryu/contrib/ovs/unixctl/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import types - -import ovs.util - -commands = {} -strtypes = types.StringTypes - - -class _UnixctlCommand(object): - def __init__(self, usage, min_args, max_args, callback, aux): - self.usage = usage - self.min_args = min_args - self.max_args = max_args - self.callback = callback - self.aux = aux - - -def _unixctl_help(conn, unused_argv, unused_aux): - reply = "The available commands are:\n" - command_names = sorted(commands.keys()) - for name in command_names: - reply += " " - usage = commands[name].usage - if usage: - reply += "%-23s %s" % (name, usage) - else: - reply += name - reply += "\n" - conn.reply(reply) - - -def command_register(name, usage, min_args, max_args, callback, aux): - """ Registers a command with the given 'name' to be exposed by the - UnixctlServer. 'usage' describes the arguments to the command; it is used - only for presentation to the user in "help" output. - - 'callback' is called when the command is received. It is passed a - UnixctlConnection object, the list of arguments as unicode strings, and - 'aux'. Normally 'callback' should reply by calling - UnixctlConnection.reply() or UnixctlConnection.reply_error() before it - returns, but if the command cannot be handled immediately, then it can - defer the reply until later. A given connection can only process a single - request at a time, so a reply must be made eventually to avoid blocking - that connection.""" - - assert isinstance(name, strtypes) - assert isinstance(usage, strtypes) - assert isinstance(min_args, int) - assert isinstance(max_args, int) - assert isinstance(callback, types.FunctionType) - - if name not in commands: - commands[name] = _UnixctlCommand(usage, min_args, max_args, callback, - aux) - -def socket_name_from_target(target): - assert isinstance(target, strtypes) - - if target.startswith("/"): - return 0, target - - pidfile_name = "%s/%s.pid" % (ovs.dirs.RUNDIR, target) - pid = ovs.daemon.read_pidfile(pidfile_name) - if pid < 0: - return -pid, "cannot read pidfile \"%s\"" % pidfile_name - - return 0, "%s/%s.%d.ctl" % (ovs.dirs.RUNDIR, target, pid) - -command_register("help", "", 0, 0, _unixctl_help, None) diff --git a/ryu/contrib/ovs/unixctl/client.py b/ryu/contrib/ovs/unixctl/client.py deleted file mode 100644 index 2176009a..00000000 --- a/ryu/contrib/ovs/unixctl/client.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import errno -import os -import types - -import ovs.jsonrpc -import ovs.stream -import ovs.util - - -vlog = ovs.vlog.Vlog("unixctl_client") -strtypes = types.StringTypes - - -class UnixctlClient(object): - def __init__(self, conn): - assert isinstance(conn, ovs.jsonrpc.Connection) - self._conn = conn - - def transact(self, command, argv): - assert isinstance(command, strtypes) - assert isinstance(argv, list) - for arg in argv: - assert isinstance(arg, strtypes) - - request = ovs.jsonrpc.Message.create_request(command, argv) - error, reply = self._conn.transact_block(request) - - if error: - vlog.warn("error communicating with %s: %s" - % (self._conn.name, os.strerror(error))) - return error, None, None - - if reply.error is not None: - return 0, str(reply.error), None - else: - assert reply.result is not None - return 0, None, str(reply.result) - - def close(self): - self._conn.close() - self.conn = None - - @staticmethod - def create(path): - assert isinstance(path, str) - - unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path) - error, stream = ovs.stream.Stream.open_block( - ovs.stream.Stream.open(unix)) - - if error: - vlog.warn("failed to connect to %s" % path) - return error, None - - return 0, UnixctlClient(ovs.jsonrpc.Connection(stream)) diff --git a/ryu/contrib/ovs/unixctl/server.py b/ryu/contrib/ovs/unixctl/server.py deleted file mode 100644 index 18e1cf20..00000000 --- a/ryu/contrib/ovs/unixctl/server.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (c) 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import errno -import os -import types - -import ovs.dirs -import ovs.jsonrpc -import ovs.stream -import ovs.unixctl -import ovs.util -import ovs.version -import ovs.vlog - -Message = ovs.jsonrpc.Message -vlog = ovs.vlog.Vlog("unixctl_server") -strtypes = types.StringTypes - - -class UnixctlConnection(object): - def __init__(self, rpc): - assert isinstance(rpc, ovs.jsonrpc.Connection) - self._rpc = rpc - self._request_id = None - - def run(self): - self._rpc.run() - error = self._rpc.get_status() - if error or self._rpc.get_backlog(): - return error - - for _ in range(10): - if error or self._request_id: - break - - error, msg = self._rpc.recv() - if msg: - if msg.type == Message.T_REQUEST: - self._process_command(msg) - else: - # XXX: rate-limit - vlog.warn("%s: received unexpected %s message" - % (self._rpc.name, - Message.type_to_string(msg.type))) - error = errno.EINVAL - - if not error: - error = self._rpc.get_status() - - return error - - def reply(self, body): - self._reply_impl(True, body) - - def reply_error(self, body): - self._reply_impl(False, body) - - # Called only by unixctl classes. - def _close(self): - self._rpc.close() - self._request_id = None - - def _wait(self, poller): - self._rpc.wait(poller) - if not self._rpc.get_backlog(): - self._rpc.recv_wait(poller) - - def _reply_impl(self, success, body): - assert isinstance(success, bool) - assert body is None or isinstance(body, strtypes) - - assert self._request_id is not None - - if body is None: - body = "" - - if body and not body.endswith("\n"): - body += "\n" - - if success: - reply = Message.create_reply(body, self._request_id) - else: - reply = Message.create_error(body, self._request_id) - - self._rpc.send(reply) - self._request_id = None - - def _process_command(self, request): - assert isinstance(request, ovs.jsonrpc.Message) - assert request.type == ovs.jsonrpc.Message.T_REQUEST - - self._request_id = request.id - - error = None - params = request.params - method = request.method - command = ovs.unixctl.commands.get(method) - if command is None: - error = '"%s" is not a valid command' % method - elif len(params) < command.min_args: - error = '"%s" command requires at least %d arguments' \ - % (method, command.min_args) - elif len(params) > command.max_args: - error = '"%s" command takes at most %d arguments' \ - % (method, command.max_args) - else: - for param in params: - if not isinstance(param, strtypes): - error = '"%s" command has non-string argument' % method - break - - if error is None: - unicode_params = [unicode(p) for p in params] - command.callback(self, unicode_params, command.aux) - - if error: - self.reply_error(error) - - -def _unixctl_version(conn, unused_argv, version): - assert isinstance(conn, UnixctlConnection) - version = "%s (Open vSwitch) %s" % (ovs.util.PROGRAM_NAME, version) - conn.reply(version) - -class UnixctlServer(object): - def __init__(self, listener): - assert isinstance(listener, ovs.stream.PassiveStream) - self._listener = listener - self._conns = [] - - def run(self): - for _ in range(10): - error, stream = self._listener.accept() - if not error: - rpc = ovs.jsonrpc.Connection(stream) - self._conns.append(UnixctlConnection(rpc)) - elif error == errno.EAGAIN: - break - else: - # XXX: rate-limit - vlog.warn("%s: accept failed: %s" % (self._listener.name, - os.strerror(error))) - - for conn in copy.copy(self._conns): - error = conn.run() - if error and error != errno.EAGAIN: - conn._close() - self._conns.remove(conn) - - def wait(self, poller): - self._listener.wait(poller) - for conn in self._conns: - conn._wait(poller) - - def close(self): - for conn in self._conns: - conn._close() - self._conns = None - - self._listener.close() - self._listener = None - - @staticmethod - def create(path, version=None): - """Creates a new UnixctlServer which listens on a unixctl socket - created at 'path'. If 'path' is None, the default path is chosen. - 'version' contains the version of the server as reported by the unixctl - version command. If None, ovs.version.VERSION is used.""" - - assert path is None or isinstance(path, strtypes) - - if path is not None: - path = "punix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path) - else: - path = "punix:%s/%s.%d.ctl" % (ovs.dirs.RUNDIR, - ovs.util.PROGRAM_NAME, os.getpid()) - - if version is None: - version = ovs.version.VERSION - - error, listener = ovs.stream.PassiveStream.open(path) - if error: - ovs.util.ovs_error(error, "could not initialize control socket %s" - % path) - return error, None - - ovs.unixctl.command_register("version", "", 0, 0, _unixctl_version, - version) - - return 0, UnixctlServer(listener) - - -class UnixctlClient(object): - def __init__(self, conn): - assert isinstance(conn, ovs.jsonrpc.Connection) - self._conn = conn - - def transact(self, command, argv): - assert isinstance(command, strtypes) - assert isinstance(argv, list) - for arg in argv: - assert isinstance(arg, strtypes) - - request = Message.create_request(command, argv) - error, reply = self._conn.transact_block(request) - - if error: - vlog.warn("error communicating with %s: %s" - % (self._conn.name, os.strerror(error))) - return error, None, None - - if reply.error is not None: - return 0, str(reply.error), None - else: - assert reply.result is not None - return 0, None, str(reply.result) - - def close(self): - self._conn.close() - self.conn = None - - @staticmethod - def create(path): - assert isinstance(path, str) - - unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path) - error, stream = ovs.stream.Stream.open_block( - ovs.stream.Stream.open(unix)) - - if error: - vlog.warn("failed to connect to %s" % path) - return error, None - - return 0, UnixctlClient(ovs.jsonrpc.Connection(stream)) diff --git a/ryu/contrib/ovs/util.py b/ryu/contrib/ovs/util.py deleted file mode 100644 index cb0574bf..00000000 --- a/ryu/contrib/ovs/util.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2010, 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import os.path -import sys - -PROGRAM_NAME = os.path.basename(sys.argv[0]) -EOF = -1 - - -def abs_file_name(dir_, file_name): - """If 'file_name' starts with '/', returns a copy of 'file_name'. - Otherwise, returns an absolute path to 'file_name' considering it relative - to 'dir_', which itself must be absolute. 'dir_' may be None or the empty - string, in which case the current working directory is used. - - Returns None if 'dir_' is None and getcwd() fails. - - This differs from os.path.abspath() in that it will never change the - meaning of a file name.""" - if file_name.startswith('/'): - return file_name - else: - if dir_ is None or dir_ == "": - try: - dir_ = os.getcwd() - except OSError: - return None - - if dir_.endswith('/'): - return dir_ + file_name - else: - return "%s/%s" % (dir_, file_name) - - -def ovs_retval_to_string(retval): - """Many OVS functions return an int which is one of: - - 0: no error yet - - >0: errno value - - EOF: end of file (not necessarily an error; depends on the function - called) - - Returns the appropriate human-readable string.""" - - if not retval: - return "" - if retval > 0: - return os.strerror(retval) - if retval == EOF: - return "End of file" - return "***unknown return value: %s***" % retval - - -def ovs_error(err_no, message, vlog=None): - """Prints 'message' on stderr and emits an ERROR level log message to - 'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with - ovs_retval_to_string() and appended to the message inside parentheses. - - 'message' should not end with a new-line, because this function will add - one itself.""" - - err_msg = "%s: %s" % (PROGRAM_NAME, message) - if err_no: - err_msg += " (%s)" % ovs_retval_to_string(err_no) - - sys.stderr.write("%s\n" % err_msg) - if vlog: - vlog.err(err_msg) - - -def ovs_fatal(*args, **kwargs): - """Prints 'message' on stderr and emits an ERROR level log message to - 'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with - ovs_retval_to_string() and appended to the message inside parentheses. - Then, terminates with exit code 1 (indicating a failure). - - 'message' should not end with a new-line, because this function will add - one itself.""" - - ovs_error(*args, **kwargs) - sys.exit(1) diff --git a/ryu/contrib/ovs/version.py b/ryu/contrib/ovs/version.py deleted file mode 100644 index aa9c9eb3..00000000 --- a/ryu/contrib/ovs/version.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated automatically -- do not modify! -*- buffer-read-only: t -*- -VERSION = "1.7.90" diff --git a/ryu/contrib/ovs/vlog.py b/ryu/contrib/ovs/vlog.py deleted file mode 100644 index f7ace66f..00000000 --- a/ryu/contrib/ovs/vlog.py +++ /dev/null @@ -1,267 +0,0 @@ - -# Copyright (c) 2011, 2012 Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import logging -import logging.handlers -import re -import socket -import sys - -import ovs.dirs -import ovs.unixctl -import ovs.util - -FACILITIES = {"console": "info", "file": "info", "syslog": "info"} -LEVELS = { - "dbg": logging.DEBUG, - "info": logging.INFO, - "warn": logging.WARNING, - "err": logging.ERROR, - "emer": logging.CRITICAL, - "off": logging.CRITICAL -} - - -def get_level(level_str): - return LEVELS.get(level_str.lower()) - - -class Vlog: - __inited = False - __msg_num = 0 - __mfl = {} # Module -> facility -> level - __log_file = None - __file_handler = None - - def __init__(self, name): - """Creates a new Vlog object representing a module called 'name'. The - created Vlog object will do nothing until the Vlog.init() static method - is called. Once called, no more Vlog objects may be created.""" - - assert not Vlog.__inited - self.name = name.lower() - if name not in Vlog.__mfl: - Vlog.__mfl[self.name] = FACILITIES.copy() - - def __log(self, level, message, **kwargs): - if not Vlog.__inited: - return - - now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") - message = ("%s|%s|%s|%s|%s" - % (now, Vlog.__msg_num, self.name, level, message)) - - level = LEVELS.get(level.lower(), logging.DEBUG) - Vlog.__msg_num += 1 - - for f, f_level in Vlog.__mfl[self.name].iteritems(): - f_level = LEVELS.get(f_level, logging.CRITICAL) - if level >= f_level: - logging.getLogger(f).log(level, message, **kwargs) - - def emer(self, message, **kwargs): - self.__log("EMER", message, **kwargs) - - def err(self, message, **kwargs): - self.__log("ERR", message, **kwargs) - - def warn(self, message, **kwargs): - self.__log("WARN", message, **kwargs) - - def info(self, message, **kwargs): - self.__log("INFO", message, **kwargs) - - def dbg(self, message, **kwargs): - self.__log("DBG", message, **kwargs) - - def exception(self, message): - """Logs 'message' at ERR log level. Includes a backtrace when in - exception context.""" - self.err(message, exc_info=True) - - @staticmethod - def init(log_file=None): - """Intializes the Vlog module. Causes Vlog to write to 'log_file' if - not None. Should be called after all Vlog objects have been created. - No logging will occur until this function is called.""" - - if Vlog.__inited: - return - - Vlog.__inited = True - logging.raiseExceptions = False - Vlog.__log_file = log_file - for f in FACILITIES: - logger = logging.getLogger(f) - logger.setLevel(logging.DEBUG) - - try: - if f == "console": - logger.addHandler(logging.StreamHandler(sys.stderr)) - elif f == "syslog": - logger.addHandler(logging.handlers.SysLogHandler( - address="/dev/log", - facility=logging.handlers.SysLogHandler.LOG_DAEMON)) - elif f == "file" and Vlog.__log_file: - Vlog.__file_handler = logging.FileHandler(Vlog.__log_file) - logger.addHandler(Vlog.__file_handler) - except (IOError, socket.error): - logger.setLevel(logging.CRITICAL) - - ovs.unixctl.command_register("vlog/reopen", "", 0, 0, - Vlog._unixctl_vlog_reopen, None) - ovs.unixctl.command_register("vlog/set", "spec", 1, sys.maxint, - Vlog._unixctl_vlog_set, None) - ovs.unixctl.command_register("vlog/list", "", 0, 0, - Vlog._unixctl_vlog_list, None) - - @staticmethod - def set_level(module, facility, level): - """ Sets the log level of the 'module'-'facility' tuple to 'level'. - All three arguments are strings which are interpreted the same as - arguments to the --verbose flag. Should be called after all Vlog - objects have already been created.""" - - module = module.lower() - facility = facility.lower() - level = level.lower() - - if facility != "any" and facility not in FACILITIES: - return - - if module != "any" and module not in Vlog.__mfl: - return - - if level not in LEVELS: - return - - if module == "any": - modules = Vlog.__mfl.keys() - else: - modules = [module] - - if facility == "any": - facilities = FACILITIES.keys() - else: - facilities = [facility] - - for m in modules: - for f in facilities: - Vlog.__mfl[m][f] = level - - @staticmethod - def set_levels_from_string(s): - module = None - level = None - facility = None - - for word in [w.lower() for w in re.split('[ :]', s)]: - if word == "any": - pass - elif word in FACILITIES: - if facility: - return "cannot specify multiple facilities" - facility = word - elif word in LEVELS: - if level: - return "cannot specify multiple levels" - level = word - elif word in Vlog.__mfl: - if module: - return "cannot specify multiple modules" - module = word - else: - return "no facility, level, or module \"%s\"" % word - - Vlog.set_level(module or "any", facility or "any", level or "any") - - @staticmethod - def get_levels(): - lines = [" console syslog file\n", - " ------- ------ ------\n"] - lines.extend(sorted(["%-16s %4s %4s %4s\n" - % (m, - Vlog.__mfl[m]["console"], - Vlog.__mfl[m]["syslog"], - Vlog.__mfl[m]["file"]) for m in Vlog.__mfl])) - return ''.join(lines) - - @staticmethod - def reopen_log_file(): - """Closes and then attempts to re-open the current log file. (This is - useful just after log rotation, to ensure that the new log file starts - being used.)""" - - if Vlog.__log_file: - logger = logging.getLogger("file") - logger.removeHandler(Vlog.__file_handler) - Vlog.__file_handler = logging.FileHandler(Vlog.__log_file) - logger.addHandler(Vlog.__file_handler) - - @staticmethod - def _unixctl_vlog_reopen(conn, unused_argv, unused_aux): - if Vlog.__log_file: - Vlog.reopen_log_file() - conn.reply(None) - else: - conn.reply("Logging to file not configured") - - @staticmethod - def _unixctl_vlog_set(conn, argv, unused_aux): - for arg in argv: - msg = Vlog.set_levels_from_string(arg) - if msg: - conn.reply(msg) - return - conn.reply(None) - - @staticmethod - def _unixctl_vlog_list(conn, unused_argv, unused_aux): - conn.reply(Vlog.get_levels()) - -def add_args(parser): - """Adds vlog related options to 'parser', an ArgumentParser object. The - resulting arguments parsed by 'parser' should be passed to handle_args.""" - - group = parser.add_argument_group(title="Logging Options") - group.add_argument("--log-file", nargs="?", const="default", - help="Enables logging to a file. Default log file" - " is used if LOG_FILE is omitted.") - group.add_argument("-v", "--verbose", nargs="*", - help="Sets logging levels, see ovs-vswitchd(8)." - " Defaults to dbg.") - - -def handle_args(args): - """ Handles command line arguments ('args') parsed by an ArgumentParser. - The ArgumentParser should have been primed by add_args(). Also takes care - of initializing the Vlog module.""" - - log_file = args.log_file - if log_file == "default": - log_file = "%s/%s.log" % (ovs.dirs.LOGDIR, ovs.util.PROGRAM_NAME) - - if args.verbose is None: - args.verbose = [] - elif args.verbose == []: - args.verbose = ["any:any:dbg"] - - for verbose in args.verbose: - msg = Vlog.set_levels_from_string(verbose) - if msg: - ovs.util.ovs_fatal(0, "processing \"%s\": %s" % (verbose, msg)) - - Vlog.init(log_file) diff --git a/ryu/controller/controller.py b/ryu/controller/controller.py index 25b8776d..54fb1c95 100644 --- a/ryu/controller/controller.py +++ b/ryu/controller/controller.py @@ -30,7 +30,7 @@ from ryu.lib.hub import StreamServer import traceback import random import ssl -from socket import IPPROTO_TCP, TCP_NODELAY, timeout as SocketTimeout, error as SocketError +from socket import IPPROTO_TCP, TCP_NODELAY, SHUT_RDWR, timeout as SocketTimeout import warnings import ryu.base.app_manager @@ -41,8 +41,8 @@ from ryu.ofproto import ofproto_protocol from ryu.ofproto import ofproto_v1_0 from ryu.ofproto import nx_match -from ryu.controller import handler from ryu.controller import ofp_event +from ryu.controller.handler import HANDSHAKE_DISPATCHER, DEAD_DISPATCHER from ryu.lib.dpid import dpid_to_str @@ -51,31 +51,56 @@ LOG = logging.getLogger('ryu.controller.controller') CONF = cfg.CONF CONF.register_cli_opts([ cfg.StrOpt('ofp-listen-host', default='', help='openflow listen host'), - cfg.IntOpt('ofp-tcp-listen-port', default=ofproto_common.OFP_TCP_PORT, - help='openflow tcp listen port'), - cfg.IntOpt('ofp-ssl-listen-port', default=ofproto_common.OFP_SSL_PORT, - help='openflow ssl listen port'), + cfg.IntOpt('ofp-tcp-listen-port', default=None, + help='openflow tcp listen port ' + '(default: %d)' % ofproto_common.OFP_TCP_PORT), + cfg.IntOpt('ofp-ssl-listen-port', default=None, + help='openflow ssl listen port ' + '(default: %d)' % ofproto_common.OFP_SSL_PORT), cfg.StrOpt('ctl-privkey', default=None, help='controller private key'), cfg.StrOpt('ctl-cert', default=None, help='controller certificate'), - cfg.StrOpt('ca-certs', default=None, help='CA certificates'), - cfg.FloatOpt('socket-timeout', default=5.0, help='Time, in seconds, to await completion of socket operations.') + cfg.StrOpt('ca-certs', default=None, help='CA certificates') +]) +CONF.register_opts([ + cfg.FloatOpt('socket-timeout', + default=5.0, + help='Time, in seconds, to await completion of socket operations.'), + cfg.FloatOpt('echo-request-interval', + default=15.0, + help='Time, in seconds, between sending echo requests to a datapath.'), + cfg.IntOpt('maximum-unreplied-echo-requests', + default=0, + min=0, + help='Maximum number of unreplied echo requests before datapath is disconnected.') ]) class OpenFlowController(object): def __init__(self): super(OpenFlowController, self).__init__() + if not CONF.ofp_tcp_listen_port and not CONF.ofp_ssl_listen_port: + self.ofp_tcp_listen_port = ofproto_common.OFP_TCP_PORT + self.ofp_ssl_listen_port = ofproto_common.OFP_SSL_PORT + # For the backward compatibility, we spawn a server loop + # listening on the old OpenFlow listen port 6633. + hub.spawn(self.server_loop, + ofproto_common.OFP_TCP_PORT_OLD, + ofproto_common.OFP_SSL_PORT_OLD) + else: + self.ofp_tcp_listen_port = CONF.ofp_tcp_listen_port + self.ofp_ssl_listen_port = CONF.ofp_ssl_listen_port # entry point def __call__(self): # LOG.debug('call') - self.server_loop() + self.server_loop(self.ofp_tcp_listen_port, + self.ofp_ssl_listen_port) - def server_loop(self): + def server_loop(self, ofp_tcp_listen_port, ofp_ssl_listen_port): if CONF.ctl_privkey is not None and CONF.ctl_cert is not None: if CONF.ca_certs is not None: server = StreamServer((CONF.ofp_listen_host, - CONF.ofp_ssl_listen_port), + ofp_ssl_listen_port), datapath_connection_factory, keyfile=CONF.ctl_privkey, certfile=CONF.ctl_cert, @@ -84,14 +109,14 @@ class OpenFlowController(object): ssl_version=ssl.PROTOCOL_TLSv1) else: server = StreamServer((CONF.ofp_listen_host, - CONF.ofp_ssl_listen_port), + ofp_ssl_listen_port), datapath_connection_factory, keyfile=CONF.ctl_privkey, certfile=CONF.ctl_cert, ssl_version=ssl.PROTOCOL_TLSv1) else: server = StreamServer((CONF.ofp_listen_host, - CONF.ofp_tcp_listen_port), + ofp_tcp_listen_port), datapath_connection_factory) # LOG.debug('loop') @@ -103,12 +128,67 @@ def _deactivate(method): try: method(self) finally: - self.send_active = False - self.set_state(handler.DEAD_DISPATCHER) + try: + self.socket.shutdown(SHUT_RDWR) + except (EOFError, IOError): + pass + + if not self.is_active: + self.socket.close() return deactivate class Datapath(ofproto_protocol.ProtocolDesc): + """ + A class to describe an OpenFlow switch connected to this controller. + + An instance has the following attributes. + + .. tabularcolumns:: |l|L| + + ==================================== ====================================== + Attribute Description + ==================================== ====================================== + id 64-bit OpenFlow Datapath ID. + Only available for + ryu.controller.handler.MAIN_DISPATCHER + phase. + ofproto A module which exports OpenFlow + definitions, mainly constants appeared + in the specification, for the + negotiated OpenFlow version. For + example, ryu.ofproto.ofproto_v1_0 for + OpenFlow 1.0. + ofproto_parser A module which exports OpenFlow wire + message encoder and decoder for the + negotiated OpenFlow version. + For example, + ryu.ofproto.ofproto_v1_0_parser + for OpenFlow 1.0. + ofproto_parser.OFPxxxx(datapath,...) A callable to prepare an OpenFlow + message for the given switch. It can + be sent with Datapath.send_msg later. + xxxx is a name of the message. For + example OFPFlowMod for flow-mod + message. Arguemnts depend on the + message. + set_xid(self, msg) Generate an OpenFlow XID and put it + in msg.xid. + send_msg(self, msg) Queue an OpenFlow message to send to + the corresponding switch. If msg.xid + is None, set_xid is automatically + called on the message before queueing. + send_packet_out deprecated + send_flow_mod deprecated + send_flow_del deprecated + send_delete_all_flows deprecated + send_barrier Queue an OpenFlow barrier message to + send to the switch. + send_nxt_set_flow_format deprecated + is_reserved_port deprecated + ==================================== ====================================== + """ + def __init__(self, socket, address): super(Datapath, self).__init__() @@ -116,43 +196,28 @@ class Datapath(ofproto_protocol.ProtocolDesc): self.socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) self.socket.settimeout(CONF.socket_timeout) self.address = address - - self.send_active = True - self.close_requested = False + self.is_active = True # The limit is arbitrary. We need to limit queue size to - # prevent it from eating memory up + # prevent it from eating memory up. self.send_q = hub.Queue(16) + self._send_q_sem = hub.BoundedSemaphore(self.send_q.maxsize) + + self.echo_request_interval = CONF.echo_request_interval + self.max_unreplied_echo_requests = CONF.maximum_unreplied_echo_requests + self.unreplied_echo_requests = [] self.xid = random.randint(0, self.ofproto.MAX_XID) self.id = None # datapath_id is unknown yet self._ports = None self.flow_format = ofproto_v1_0.NXFF_OPENFLOW10 self.ofp_brick = ryu.base.app_manager.lookup_service_brick('ofp_event') - self.set_state(handler.HANDSHAKE_DISPATCHER) - - def _get_ports(self): - if (self.ofproto_parser is not None and - self.ofproto_parser.ofproto.OFP_VERSION >= 0x04): - message = ( - 'Datapath#ports is kept for compatibility with the previous ' - 'openflow versions (< 1.3). ' - 'This not be updated by EventOFPPortStatus message. ' - 'If you want to be updated, you can use ' - '\'ryu.controller.dpset\' or \'ryu.topology.switches\'.' - ) - warnings.warn(message, stacklevel=2) - return self._ports - - def _set_ports(self, ports): - self._ports = ports - - # To show warning when Datapath#ports is read - ports = property(_get_ports, _set_ports) + self.set_state(HANDSHAKE_DISPATCHER) @_deactivate def close(self): - self.close_requested = True + if self.state != DEAD_DISPATCHER: + self.set_state(DEAD_DISPATCHER) def set_state(self, state): self.state = state @@ -167,19 +232,19 @@ class Datapath(ofproto_protocol.ProtocolDesc): required_len = ofproto_common.OFP_HEADER_SIZE count = 0 - while True: - ret = "" - + while self.state != DEAD_DISPATCHER: try: ret = self.socket.recv(required_len) except SocketTimeout: - if not self.close_requested: - continue - except SocketError: - self.close_requested = True + continue + except ssl.SSLError: + # eventlet throws SSLError (which is a subclass of IOError) + # on SSL socket read timeout; re-try the loop in this case. + continue + except (EOFError, IOError): + break - if (len(ret) == 0) or (self.close_requested): - self.socket.close() + if len(ret) == 0: break buf += ret @@ -215,30 +280,45 @@ class Datapath(ofproto_protocol.ProtocolDesc): count = 0 hub.sleep(0) - @_deactivate def _send_loop(self): try: - while self.send_active: + while self.state != DEAD_DISPATCHER: buf = self.send_q.get() + self._send_q_sem.release() self.socket.sendall(buf) + except SocketTimeout: + LOG.debug("Socket timed out while sending data to switch at address %s", + self.address) except IOError as ioe: - LOG.debug("Socket error while sending data to switch at address %s: [%d] %s", - self.address, ioe.errno, ioe.strerror) + # Convert ioe.errno to a string, just in case it was somehow set to None. + errno = "%s" % ioe.errno + LOG.debug("Socket error while sending data to switch at address %s: [%s] %s", + self.address, errno, ioe.strerror) finally: q = self.send_q - # first, clear self.send_q to prevent new references. + # First, clear self.send_q to prevent new references. self.send_q = None - # there might be threads currently blocking in send_q.put(). - # unblock them by draining the queue. + # Now, drain the send_q, releasing the associated semaphore for each entry. + # This should release all threads waiting to acquire the semaphore. try: while q.get(block=False): - pass + self._send_q_sem.release() except hub.QueueEmpty: pass + # Finally, ensure the _recv_loop terminates. + self.close() def send(self, buf): + msg_enqueued = False + self._send_q_sem.acquire() if self.send_q: self.send_q.put(buf) + msg_enqueued = True + else: + self._send_q_sem.release() + if not msg_enqueued: + LOG.debug('Datapath in process of terminating; send() to %s discarded.', + self.address) def set_xid(self, msg): self.xid += 1 @@ -254,6 +334,23 @@ class Datapath(ofproto_protocol.ProtocolDesc): # LOG.debug('send_msg %s', msg) self.send(msg.buf) + def _echo_request_loop(self): + if not self.max_unreplied_echo_requests: + return + while (self.send_q and + (len(self.unreplied_echo_requests) <= self.max_unreplied_echo_requests)): + echo_req = self.ofproto_parser.OFPEchoRequest(self) + self.unreplied_echo_requests.append(self.set_xid(echo_req)) + self.send_msg(echo_req) + hub.sleep(self.echo_request_interval) + self.close() + + def acknowledge_echo_reply(self, xid): + try: + self.unreplied_echo_requests.remove(xid) + except: + pass + def serve(self): send_thr = hub.spawn(self._send_loop) @@ -261,11 +358,15 @@ class Datapath(ofproto_protocol.ProtocolDesc): hello = self.ofproto_parser.OFPHello(self) self.send_msg(hello) + echo_thr = hub.spawn(self._echo_request_loop) + try: self._recv_loop() finally: hub.kill(send_thr) - hub.joinall([send_thr]) + hub.kill(echo_thr) + hub.joinall([send_thr, echo_thr]) + self.is_active = False # # Utility methods for convenience diff --git a/ryu/controller/dpset.py b/ryu/controller/dpset.py index 1ec6a338..5af6835a 100644 --- a/ryu/controller/dpset.py +++ b/ryu/controller/dpset.py @@ -44,6 +44,23 @@ class EventDPBase(event.EventBase): class EventDP(EventDPBase): + """ + An event class to notify connect/disconnect of a switch. + + For OpenFlow switches, one can get the same notification by observing + ryu.controller.ofp_event.EventOFPStateChange. + An instance has at least the following attributes. + + ========= ================================================================= + Attribute Description + ========= ================================================================= + dp A ryu.controller.controller.Datapath instance of the switch + enter True when the switch connected to our controller. False for + disconnect. + ports A list of port instances. + ========= ================================================================= + """ + def __init__(self, dp, enter_leave): # enter_leave # True: dp entered @@ -67,16 +84,64 @@ class EventPortBase(EventDPBase): class EventPortAdd(EventPortBase): + """ + An event class for switch port status "ADD" notification. + + This event is generated when a new port is added to a switch. + For OpenFlow switches, one can get the same notification by observing + ryu.controller.ofp_event.EventOFPPortStatus. + An instance has at least the following attributes. + + ========= ================================================================= + Attribute Description + ========= ================================================================= + dp A ryu.controller.controller.Datapath instance of the switch + port port number + ========= ================================================================= + """ + def __init__(self, dp, port): super(EventPortAdd, self).__init__(dp, port) class EventPortDelete(EventPortBase): + """ + An event class for switch port status "DELETE" notification. + + This event is generated when a port is removed from a switch. + For OpenFlow switches, one can get the same notification by observing + ryu.controller.ofp_event.EventOFPPortStatus. + An instance has at least the following attributes. + + ========= ================================================================= + Attribute Description + ========= ================================================================= + dp A ryu.controller.controller.Datapath instance of the switch + port port number + ========= ================================================================= + """ + def __init__(self, dp, port): super(EventPortDelete, self).__init__(dp, port) class EventPortModify(EventPortBase): + """ + An event class for switch port status "MODIFY" notification. + + This event is generated when some attribute of a port is changed. + For OpenFlow switches, one can get the same notification by observing + ryu.controller.ofp_event.EventOFPPortStatus. + An instance has at least the following attributes. + + ========= ==================================================================== + Attribute Description + ========= ==================================================================== + dp A ryu.controller.controller.Datapath instance of the switch + port port number + ========= ==================================================================== + """ + def __init__(self, dp, new_port): super(EventPortModify, self).__init__(dp, new_port) diff --git a/ryu/controller/event.py b/ryu/controller/event.py index a90bf660..e9d49211 100644 --- a/ryu/controller/event.py +++ b/ryu/controller/event.py @@ -16,11 +16,20 @@ class EventBase(object): - # Nothing yet - pass + """ + The base of all event classes. + + A Ryu application can define its own event type by creating a subclass. + """ + + def __init__(self): + super(EventBase, self).__init__() class EventRequestBase(EventBase): + """ + The base class for synchronous request for RyuApp.send_request. + """ def __init__(self): super(EventRequestBase, self).__init__() self.dst = None # app.name of provide the event. @@ -30,6 +39,9 @@ class EventRequestBase(EventBase): class EventReplyBase(EventBase): + """ + The base class for synchronous request reply for RyuApp.send_reply. + """ def __init__(self, dst): super(EventReplyBase, self).__init__() self.dst = dst diff --git a/ryu/controller/handler.py b/ryu/controller/handler.py index a0782a12..f1fb7076 100644 --- a/ryu/controller/handler.py +++ b/ryu/controller/handler.py @@ -47,6 +47,33 @@ class _Caller(object): # should be named something like 'observe_event' def set_ev_cls(ev_cls, dispatchers=None): + """ + A decorator for Ryu application to declare an event handler. + + Decorated method will become an event handler. + ev_cls is an event class whose instances this RyuApp wants to receive. + dispatchers argument specifies one of the following negotiation phases + (or a list of them) for which events should be generated for this handler. + Note that, in case an event changes the phase, the phase before the change + is used to check the interest. + + .. tabularcolumns:: |l|L| + + =========================================== =============================== + Negotiation phase Description + =========================================== =============================== + ryu.controller.handler.HANDSHAKE_DISPATCHER Sending and waiting for hello + message + ryu.controller.handler.CONFIG_DISPATCHER Version negotiated and sent + features-request message + ryu.controller.handler.MAIN_DISPATCHER Switch-features message + received and sent set-config + message + ryu.controller.handler.DEAD_DISPATCHER Disconnect from the peer. Or + disconnecting due to some + unrecoverable errors. + =========================================== =============================== + """ def _set_ev_cls_dec(handler): if 'callers' not in dir(handler): handler.callers = {} diff --git a/ryu/controller/mac_to_network.py b/ryu/controller/mac_to_network.py index dd2c4efe..2cae912f 100644 --- a/ryu/controller/mac_to_network.py +++ b/ryu/controller/mac_to_network.py @@ -49,8 +49,8 @@ class MacToNetwork(object): # VM-> tap-> ovs-> ext-port-> wire-> ext-port-> ovs-> tap-> VM return - LOG.warn('duplicated nw_id: mac %s nw old %s new %s', - haddr_to_str(mac), _nw_id, nw_id) + LOG.warning('duplicated nw_id: mac %s nw old %s new %s', + haddr_to_str(mac), _nw_id, nw_id) raise MacAddressDuplicated(mac=mac) diff --git a/ryu/controller/network.py b/ryu/controller/network.py index ac247ffd..83c91b4d 100644 --- a/ryu/controller/network.py +++ b/ryu/controller/network.py @@ -18,23 +18,55 @@ import collections from ryu.base import app_manager import ryu.exception as ryu_exc -from ryu.app.rest_nw_id import NW_ID_UNKNOWN from ryu.controller import event from ryu.exception import NetworkNotFound, NetworkAlreadyExist from ryu.exception import PortAlreadyExist, PortNotFound, PortUnknown +NW_ID_UNKNOWN = '__NW_ID_UNKNOWN__' + + class MacAddressAlreadyExist(ryu_exc.RyuException): message = 'port (%(dpid)s, %(port)s) has already mac %(mac_address)s' class EventNetworkDel(event.EventBase): + """ + An event class for network deletion. + + This event is generated when a network is deleted by the REST API. + An instance has at least the following attributes. + + ========== =================================================================== + Attribute Description + ========== =================================================================== + network_id Network ID + ========== =================================================================== + """ + def __init__(self, network_id): super(EventNetworkDel, self).__init__() self.network_id = network_id class EventNetworkPort(event.EventBase): + """ + An event class for notification of port arrival and deperture. + + This event is generated when a port is introduced to or removed from a + network by the REST API. + An instance has at least the following attributes. + + ========== ================================================================ + Attribute Description + ========== ================================================================ + network_id Network ID + dpid OpenFlow Datapath ID of the switch to which the port belongs. + port_no OpenFlow port number of the port + add_del True for adding a port. False for removing a port. + ========== ================================================================ + """ + def __init__(self, network_id, dpid, port_no, add_del): super(EventNetworkPort, self).__init__() self.network_id = network_id @@ -44,6 +76,26 @@ class EventNetworkPort(event.EventBase): class EventMacAddress(event.EventBase): + """ + An event class for end-point MAC address registration. + + This event is generated when a end-point MAC address is updated + by the REST API. + An instance has at least the following attributes. + + =========== =============================================================== + Attribute Description + =========== =============================================================== + network_id Network ID + dpid OpenFlow Datapath ID of the switch to which the port belongs. + port_no OpenFlow port number of the port + mac_address The old MAC address of the port if add_del is False. Otherwise + the new MAC address. + add_del False if this event is a result of a port removal. Otherwise + True. + =========== =============================================================== + """ + def __init__(self, dpid, port_no, network_id, mac_address, add_del): super(EventMacAddress, self).__init__() assert network_id is not None diff --git a/ryu/controller/ofp_event.py b/ryu/controller/ofp_event.py index 16eb493a..6b1c8b3e 100644 --- a/ryu/controller/ofp_event.py +++ b/ryu/controller/ofp_event.py @@ -27,6 +27,25 @@ from . import event class EventOFPMsgBase(event.EventBase): + """ + The base class of OpenFlow event class. + + OpenFlow event classes have at least the following attributes. + + .. tabularcolumns:: |l|L| + + ============ ============================================================== + Attribute Description + ============ ============================================================== + msg An object which describes the corresponding OpenFlow message. + msg.datapath A ryu.controller.controller.Datapath instance + which describes an OpenFlow switch from which we received + this OpenFlow message. + ============ ============================================================== + + The msg object has some more additional members whose values are extracted + from the original OpenFlow message. + """ def __init__(self, msg): super(EventOFPMsgBase, self).__init__() self.msg = msg @@ -81,9 +100,45 @@ for ofp_mods in ofproto.get_ofp_modules().values(): class EventOFPStateChange(event.EventBase): + """ + An event class for negotiation phase change notification. + + An instance of this class is sent to observer after changing + the negotiation phase. + An instance has at least the following attributes. + + ========= ================================================================= + Attribute Description + ========= ================================================================= + datapath ryu.controller.controller.Datapath instance of the switch + ========= ================================================================= + """ def __init__(self, dp): super(EventOFPStateChange, self).__init__() self.datapath = dp +class EventOFPPortStateChange(event.EventBase): + """ + An event class to notify the port state changes of Dtatapath instance. + + This event performs like EventOFPPortStatus, but Ryu will + send this event after updating ``ports`` dict of Datapath instances. + An instance has at least the following attributes. + + ========= ================================================================= + Attribute Description + ========= ================================================================= + datapath ryu.controller.controller.Datapath instance of the switch + reason one of OFPPR_* + port_no Port number which state was changed + ========= ================================================================= + """ + def __init__(self, dp, reason, port_no): + super(EventOFPPortStateChange, self).__init__() + self.datapath = dp + self.reason = reason + self.port_no = port_no + + handler.register_service('ryu.controller.ofp_handler') diff --git a/ryu/controller/ofp_handler.py b/ryu/controller/ofp_handler.py index b3c63dfd..b524a285 100644 --- a/ryu/controller/ofp_handler.py +++ b/ryu/controller/ofp_handler.py @@ -238,27 +238,59 @@ class OFPHandler(ryu.base.app_manager.RyuApp): echo_reply.data = msg.data datapath.send_msg(echo_reply) + @set_ev_handler(ofp_event.EventOFPEchoReply, + [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER]) + def echo_reply_handler(self, ev): + msg = ev.msg + datapath = msg.datapath + datapath.acknowledge_echo_reply(msg.xid) + + @set_ev_handler(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) + def port_status_handler(self, ev): + msg = ev.msg + datapath = msg.datapath + ofproto = datapath.ofproto + + if msg.reason in [ofproto.OFPPR_ADD, ofproto.OFPPR_MODIFY]: + datapath.ports[msg.desc.port_no] = msg.desc + elif msg.reason == ofproto.OFPPR_DELETE: + datapath.ports.pop(msg.desc.port_no, None) + else: + return + + self.send_event_to_observers( + ofp_event.EventOFPPortStateChange( + datapath, msg.reason, msg.desc.port_no), + datapath.state) + @set_ev_handler(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER]) def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto - (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) - self.logger.debug('EventOFPErrorMsg received.') self.logger.debug( - 'version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(msg.version), - hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid)) - self.logger.debug( - ' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg.msg_type)) - self.logger.debug( - "OFPErrorMsg(type=%s, code=%s, data=b'%s')", hex(msg.type), - hex(msg.code), utils.binary_str(msg.data)) - self.logger.debug( - ' |-- type: %s', ofp.ofp_error_type_to_str(msg.type)) - self.logger.debug( - ' |-- code: %s', ofp.ofp_error_code_to_str(msg.type, msg.code)) - self.logger.debug( - ' `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s', - hex(version), hex(msg_type), hex(msg_len), hex(xid)) - self.logger.debug( - ' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg_type)) + "EventOFPErrorMsg received.\n" + "version=%s, msg_type=%s, msg_len=%s, xid=%s\n" + " `-- msg_type: %s\n" + "OFPErrorMsg(type=%s, code=%s, data=b'%s')\n" + " |-- type: %s\n" + " |-- code: %s", + hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), + hex(msg.xid), ofp.ofp_msg_type_to_str(msg.msg_type), + hex(msg.type), hex(msg.code), utils.binary_str(msg.data), + ofp.ofp_error_type_to_str(msg.type), + ofp.ofp_error_code_to_str(msg.type, msg.code)) + if len(msg.data) >= ofp.OFP_HEADER_SIZE: + (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) + self.logger.debug( + " `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s\n" + " `-- msg_type: %s", + hex(version), hex(msg_type), hex(msg_len), hex(xid), + ofp.ofp_msg_type_to_str(msg_type)) + else: + self.logger.warning( + "The data field sent from the switch is too short: " + "len(msg.data) < OFP_HEADER_SIZE\n" + "The OpenFlow Spec says that the data field should contain " + "at least 64 bytes of the failed request.\n" + "Please check the settings or implementation of your switch.") diff --git a/ryu/controller/tunnels.py b/ryu/controller/tunnels.py index 0a946738..61f0c160 100644 --- a/ryu/controller/tunnels.py +++ b/ryu/controller/tunnels.py @@ -43,16 +43,61 @@ class EventTunnelKeyBase(event.EventBase): class EventTunnelKeyAdd(EventTunnelKeyBase): + """ + An event class for tunnel key registration. + + This event is generated when a tunnel key is registered or updated + by the REST API. + An instance has at least the following attributes. + + =========== =============================================================== + Attribute Description + =========== =============================================================== + network_id Network ID + tunnel_key Tunnel Key + =========== =============================================================== + """ + def __init__(self, network_id, tunnel_key): super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key) class EventTunnelKeyDel(EventTunnelKeyBase): + """ + An event class for tunnel key registration. + + This event is generated when a tunnel key is removed by the REST API. + An instance has at least the following attributes. + + =========== =============================================================== + Attribute Description + =========== =============================================================== + network_id Network ID + tunnel_key Tunnel Key + =========== =============================================================== + """ + def __init__(self, network_id, tunnel_key): super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key) class EventTunnelPort(event.EventBase): + """ + An event class for tunnel port registration. + + This event is generated when a tunnel port is added or removed + by the REST API. + An instance has at least the following attributes. + + =========== =============================================================== + Attribute Description + =========== =============================================================== + dpid OpenFlow Datapath ID + port_no OpenFlow port number + remote_dpid OpenFlow port number of the tunnel peer + add_del True for adding a tunnel. False for removal. + =========== =============================================================== + """ def __init__(self, dpid, port_no, remote_dpid, add_del): super(EventTunnelPort, self).__init__() self.dpid = dpid diff --git a/ryu/hooks.py b/ryu/hooks.py index dcb5cc90..d7a2a8dd 100644 --- a/ryu/hooks.py +++ b/ryu/hooks.py @@ -41,8 +41,6 @@ def setup_hook(config): metadata = config['metadata'] if sys.platform == 'win32': requires = metadata.get('requires_dist', '').split('\n') - requires.append('pywin32') - requires.append('wmi') metadata['requires_dist'] = "\n".join(requires) config['metadata'] = metadata diff --git a/ryu/lib/hub.py b/ryu/lib/hub.py index 56211479..2ec8d691 100644 --- a/ryu/lib/hub.py +++ b/ryu/lib/hub.py @@ -18,8 +18,8 @@ import logging import os -# we don't bother to use cfg.py because monkey patch needs to be -# called very early. instead, we use an environment variable to +# We don't bother to use cfg.py because monkey patch needs to be +# called very early. Instead, we use an environment variable to # select the type of hub. HUB_TYPE = os.getenv('RYU_HUB_TYPE', 'eventlet') @@ -45,34 +45,42 @@ if HUB_TYPE == 'eventlet': connect = eventlet.connect def spawn(*args, **kwargs): + raise_error = kwargs.pop('raise_error', False) + def _launch(func, *args, **kwargs): - # mimic gevent's default raise_error=False behaviour - # by not propergating an exception to the joiner. + # Mimic gevent's default raise_error=False behaviour + # by not propagating an exception to the joiner. try: - func(*args, **kwargs) - except greenlet.GreenletExit: + return func(*args, **kwargs) + except TaskExit: pass except: - # log uncaught exception. - # note: this is an intentional divergence from gevent - # behaviour. gevent silently ignores such exceptions. + if raise_error: + raise + # Log uncaught exception. + # Note: this is an intentional divergence from gevent + # behaviour; gevent silently ignores such exceptions. LOG.error('hub: uncaught exception: %s', traceback.format_exc()) return eventlet.spawn(_launch, *args, **kwargs) def spawn_after(seconds, *args, **kwargs): + raise_error = kwargs.pop('raise_error', False) + def _launch(func, *args, **kwargs): - # mimic gevent's default raise_error=False behaviour - # by not propergating an exception to the joiner. + # Mimic gevent's default raise_error=False behaviour + # by not propagating an exception to the joiner. try: - func(*args, **kwargs) - except greenlet.GreenletExit: + return func(*args, **kwargs) + except TaskExit: pass except: - # log uncaught exception. - # note: this is an intentional divergence from gevent - # behaviour. gevent silently ignores such exceptions. + if raise_error: + raise + # Log uncaught exception. + # Note: this is an intentional divergence from gevent + # behaviour; gevent silently ignores such exceptions. LOG.error('hub: uncaught exception: %s', traceback.format_exc()) @@ -83,17 +91,18 @@ if HUB_TYPE == 'eventlet': def joinall(threads): for t in threads: - # this try-except is necessary when killing an inactive - # greenthread + # This try-except is necessary when killing an inactive + # greenthread. try: t.wait() - except greenlet.GreenletExit: + except TaskExit: pass - Queue = eventlet.queue.Queue + Queue = eventlet.queue.LightQueue QueueEmpty = eventlet.queue.Empty Semaphore = eventlet.semaphore.Semaphore BoundedSemaphore = eventlet.semaphore.BoundedSemaphore + TaskExit = greenlet.GreenletExit class StreamServer(object): def __init__(self, listen_info, handle=None, backlog=None, @@ -144,9 +153,9 @@ if HUB_TYPE == 'eventlet': def _broadcast(self): self._ev.send() - # because eventlet Event doesn't allow mutiple send() on an event, - # re-create the underlying event. - # note: _ev.reset() is obsolete. + # Since eventlet Event doesn't allow multiple send() operations + # on an event, re-create the underlying event. + # Note: _ev.reset() is obsolete. self._ev = eventlet.event.Event() def is_set(self): diff --git a/ryu/lib/ofctl_nicira_ext.py b/ryu/lib/ofctl_nicira_ext.py new file mode 100644 index 00000000..3a5c6be1 --- /dev/null +++ b/ryu/lib/ofctl_nicira_ext.py @@ -0,0 +1,156 @@ +# Copyright (C) 2016 Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import logging + +from ryu.ofproto import nicira_ext + + +LOG = logging.getLogger(__name__) + + +def action_to_str(act, ofctl_action_to_str): + sub_type = act.subtype + + if sub_type == nicira_ext.NXAST_RESUBMIT: + return 'NX_RESUBMIT: {port: %s, table: %s}' % (act.in_port, + act.table_id) + + elif sub_type == nicira_ext.NXAST_REG_MOVE: + src_start = act.src_ofs + dst_start = act.dst_ofs + src_end = src_start + act.n_bits + dst_end = dst_start + act.n_bits + return 'NX_MOVE: {%s[%s..%s]: %s[%s..%s]}' % (act.dst_field, dst_start, + dst_end, act.src_field, + src_start, src_end) + + elif sub_type == nicira_ext.NXAST_REG_LOAD: + start = act.ofs + end = start + act.nbits + return 'NX_LOAD: {%s[%s..%s]: %x}' % (act.dst, start, end, act.value) + + elif sub_type == nicira_ext.NXAST_LEARN: + specs = [] + add_spec = specs.append + + for spec in act.specs: + dst_type = spec._dst_type + + if dst_type == 0: # match + if isinstance(spec.src, (tuple, list)): + src = spec.src[0] + start = spec.src[1] + end = start + spec.n_bits + start_end = '%s..%s' % (start, end) + + else: + src = spec.src + start_end = '[]' + + add_spec('%s[%s]' % (src, start_end)) + + elif dst_type == 1: # load + if isinstance(spec.src, (tuple, list)): + src = spec.src[0] + start = spec.src[1] + end = start + spec.n_bits + src_start_end = '[%s..%s]' % (start, end) + + else: + src = spec.src + start_end = '' + + if isinstance(spec.dst, (tuple, list)): + dst = spec.dst[0] + start = spec.dst[1] + end = start + spec.n_bits + dst_start_end = '[%s..%s]' % (start, end) + + else: + dst = spec.dst + start_end = '[]' + + add_spec('NX_LOAD {%s%s: %s%s}' % (dst, dst_start_end, + src, src_start_end)) + + elif dst_type == 2: # output + if isinstance(spec.src, (tuple, list)): + src = spec.src[0] + start = spec.src[1] + end = start + spec.n_bits + start_end = '%s..%s' % (start, end) + + else: + src = spec.src + start_end = '[]' + + add_spec('output:%s%s' % (src, start_end)) + + return ('NX_LEARN: {idle_timeout: %s, ' + 'hard_timeouts: %s, ' + 'priority: %s, ' + 'cookie: %s, ' + 'flags: %s, ' + 'table_id: %s, ' + 'fin_idle_timeout: %s, ' + 'fin_hard_timeout: %s, ' + 'specs: %s}' % (act.idle_timeout, act.hard_timeout, + act.priority, act.cookie, act.flags, + act.fin_idle_timeout, + act.self.fin_hard_timeout, + specs)) + + elif sub_type == nicira_ext.NXAST_CONJUNCTION: + return ('NX_CONJUNCTION: {clause: %s, number_of_clauses: %s, id: %s}' % + (act.clause, act.n_clauses, act.id)) + + elif sub_type == nicira_ext.NXAST_CT: + if act.zone_ofs_nbits != 0: + start = act.zone_ofs_nbits + end = start + 16 + zone = act.zone_src + ('[%s..%s]' % (start, end)) + + else: + zone = act.zone_src + + actions = [ofctl_action_to_str(action) for action in act.actions] + + return ('NX_CT: {flags: %s, ' + 'zone: %s, ' + 'table: %s, ' + 'alg: %s, ' + 'actions: %s}' % (act.flags, zone, act.recirc_table, act.alg, + actions)) + + elif sub_type == nicira_ext.NXAST_NAT: + return ('NX_NAT: {flags: %s, ' + 'range_ipv4_min: %s, ' + 'range_ipv4_max: %s, ' + 'range_ipv6_min: %s, ' + 'range_ipv6_max: %s, ' + 'range_proto_min: %s, ' + 'range_proto_max: %s}' % (act.flags, + act.range_ipv4_min, + act.range_ipv4_max, + act.range_ipv6_min, + act.range_ipv6_max, + act.range_proto_min, + act.range_proto_max)) + + data_str = base64.b64encode(act.data) + return 'NX_UNKNOWN: {subtype: %s, data: %s}' % (sub_type, + data_str.decode('utf-8')) diff --git a/ryu/lib/ofctl_utils.py b/ryu/lib/ofctl_utils.py index 978d1f6e..89cd5c83 100644 --- a/ryu/lib/ofctl_utils.py +++ b/ryu/lib/ofctl_utils.py @@ -13,10 +13,250 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 import logging +import netaddr +import six + +from ryu.lib import dpid +from ryu.lib import hub + LOG = logging.getLogger(__name__) +DEFAULT_TIMEOUT = 1.0 + +# NOTE(jkoelker) Constants for converting actions +OUTPUT = 'OUTPUT' +COPY_TTL_OUT = 'COPY_TTL_OUT' +COPY_TTL_IN = 'COPY_TTL_IN' +SET_MPLS_TTL = 'SET_MPLS_TTL' +DEC_MPLS_TTL = 'DEC_MPLS_TTL' +PUSH_VLAN = 'PUSH_VLAN' +POP_VLAN = 'POP_VLAN' +PUSH_MPLS = 'PUSH_MPLS' +POP_MPLS = 'POP_MPLS' +SET_QUEUE = 'SET_QUEUE' +GROUP = 'GROUP' +SET_NW_TTL = 'SET_NW_TTL' +DEC_NW_TTL = 'DEC_NW_TTL' +SET_FIELD = 'SET_FIELD' +PUSH_PBB = 'PUSH_PBB' # OpenFlow 1.3 or later +POP_PBB = 'POP_PBB' # OpenFlow 1.3 or later +COPY_FIELD = 'COPY_FIELD' # OpenFlow 1.5 or later +METER = 'METER' # OpenFlow 1.5 or later +EXPERIMENTER = 'EXPERIMENTER' + + +def get_logger(logger=None): + # NOTE(jkoelker) use the logger the calling code wants us to + if logger is not None: + return logger + + return LOG + + +def match_vid_to_str(value, mask, ofpvid_present): + if mask is not None: + return '0x%04x/0x%04x' % (value, mask) + + if value & ofpvid_present: + return str(value & ~ofpvid_present) + + return '0x%04x' % value + + +def to_action(dic, ofp, parser, action_type, util): + actions = {COPY_TTL_OUT: parser.OFPActionCopyTtlOut, + COPY_TTL_IN: parser.OFPActionCopyTtlIn, + DEC_MPLS_TTL: parser.OFPActionDecMplsTtl, + POP_VLAN: parser.OFPActionPopVlan, + DEC_NW_TTL: parser.OFPActionDecNwTtl, + POP_PBB: parser.OFPActionPopPbb} + + need_ethertype = {PUSH_VLAN: parser.OFPActionPushVlan, + PUSH_MPLS: parser.OFPActionPushMpls, + POP_MPLS: parser.OFPActionPopMpls, + PUSH_PBB: parser.OFPActionPushPbb} + + if action_type in actions: + return actions[action_type]() + + elif action_type in need_ethertype: + ethertype = int(dic.get('ethertype')) + return need_ethertype[action_type](ethertype) + + elif action_type == OUTPUT: + out_port = util.ofp_port_from_user(dic.get('port', ofp.OFPP_ANY)) + max_len = util.ofp_cml_from_user(dic.get('max_len', ofp.OFPCML_MAX)) + return parser.OFPActionOutput(out_port, max_len) + + elif action_type == SET_MPLS_TTL: + mpls_ttl = int(dic.get('mpls_ttl')) + return parser.OFPActionSetMplsTtl(mpls_ttl) + + elif action_type == SET_QUEUE: + queue_id = util.ofp_queue_from_user(dic.get('queue_id')) + return parser.OFPActionSetQueue(queue_id) + + elif action_type == GROUP: + group_id = util.ofp_group_from_user(dic.get('group_id')) + return parser.OFPActionGroup(group_id) + + elif action_type == SET_NW_TTL: + nw_ttl = int(dic.get('nw_ttl')) + return parser.OFPActionSetNwTtl(nw_ttl) + + elif action_type == SET_FIELD: + field = dic.get('field') + value = dic.get('value') + return parser.OFPActionSetField(**{field: value}) + + elif action_type == 'COPY_FIELD': + n_bits = int(dic.get('n_bits')) + src_offset = int(dic.get('src_offset')) + dst_offset = int(dic.get('dst_offset')) + oxm_ids = [parser.OFPOxmId(str(dic.get('src_oxm_id'))), + parser.OFPOxmId(str(dic.get('dst_oxm_id')))] + return parser.OFPActionCopyField( + n_bits, src_offset, dst_offset, oxm_ids) + + elif action_type == 'METER': + if hasattr(parser, 'OFPActionMeter'): + # OpenFlow 1.5 or later + meter_id = int(dic.get('meter_id')) + return parser.OFPActionMeter(meter_id) + else: + # OpenFlow 1.4 or earlier + return None + + elif action_type == EXPERIMENTER: + experimenter = int(dic.get('experimenter')) + data_type = dic.get('data_type', 'ascii') + + if data_type not in ('ascii', 'base64'): + LOG.error('Unknown data type: %s', data_type) + return None + + data = dic.get('data', '') + if data_type == 'base64': + data = base64.b64decode(data) + return parser.OFPActionExperimenterUnknown(experimenter, data) + + return None + + +def to_match_eth(value): + if '/' in value: + value = value.split('/') + return value[0], value[1] + + return value + + +def to_match_ip(value): + if '/' in value: + (ip_addr, ip_mask) = value.split('/') + + if ip_mask.isdigit(): + ip = netaddr.ip.IPNetwork(value) + ip_addr = str(ip.ip) + ip_mask = str(ip.netmask) + + return ip_addr, ip_mask + + return value + + +def to_match_vid(value, ofpvid_present): + # NOTE: If "vlan_id" field is described as decimal int value + # (and decimal string value), it is treated as values of + # VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically + # applied. OTOH, If it is described as hexadecimal string, + # treated as values of oxm_value (including OFPVID_PRESENT + # bit), and OFPVID_PRESENT bit is NOT automatically applied + if isinstance(value, six.integer_types): + # described as decimal int value + return value | ofpvid_present + + else: + if '/' in value: + val = value.split('/') + return int(val[0], 0), int(val[1], 0) + + else: + if value.isdigit(): + # described as decimal string value + return int(value, 10) | ofpvid_present + + return int(value, 0) + + +def to_match_masked_int(value): + if isinstance(value, str) and '/' in value: + value = value.split('/') + return (str_to_int(value[0]), str_to_int(value[1])) + + return str_to_int(value) + + +def to_match_packet_type(value): + if isinstance(value, (list, tuple)): + return str_to_int(value[0]) << 16 | str_to_int(value[1]) + else: + return str_to_int(value) + + +def send_experimenter(dp, exp, logger=None): + experimenter = exp.get('experimenter', 0) + exp_type = exp.get('exp_type', 0) + data_type = exp.get('data_type', 'ascii') + + data = exp.get('data', '') + if data_type == 'base64': + data = base64.b64decode(data) + elif data_type == 'ascii': + data = data.encode('ascii') + else: + get_logger(logger).error('Unknown data type: %s', data_type) + return + + expmsg = dp.ofproto_parser.OFPExperimenter( + dp, experimenter, exp_type, data) + send_msg(dp, expmsg, logger) + + +def send_msg(dp, msg, logger=None): + if msg.xid is None: + dp.set_xid(msg) + + log = get_logger(logger) + # NOTE(jkoelker) Prevent unnecessary string formating by including the + # format rules in the log_msg + log_msg = ('Sending message with xid(%x) to ' + 'datapath(' + dpid._DPID_FMT + '): %s') + log.debug(log_msg, msg.xid, dp.id, msg) + dp.send_msg(msg) + + +def send_stats_request(dp, stats, waiters, msgs, logger=None): + dp.set_xid(stats) + waiters_per_dp = waiters.setdefault(dp.id, {}) + lock = hub.Event() + previous_msg_len = len(msgs) + waiters_per_dp[stats.xid] = (lock, msgs) + send_msg(dp, stats, logger) + + lock.wait(timeout=DEFAULT_TIMEOUT) + current_msg_len = len(msgs) + + while current_msg_len > previous_msg_len: + previous_msg_len = current_msg_len + lock.wait(timeout=DEFAULT_TIMEOUT) + current_msg_len = len(msgs) + + if not lock.is_set(): + del waiters_per_dp[stats.xid] def str_to_int(str_num): @@ -27,22 +267,104 @@ class OFCtlUtil(object): def __init__(self, ofproto): self.ofproto = ofproto + self.deprecated_value = [ + 'OFPTFPT_EXPERIMENTER_SLAVE', + 'OFPTFPT_EXPERIMENTER_MASTER', + 'OFPQCFC_EPERM'] def _reserved_num_from_user(self, num, prefix): - if isinstance(num, int): - return num - else: - if num.startswith(prefix): - return getattr(self.ofproto, num) - else: - return getattr(self.ofproto, prefix + num.upper()) + try: + return str_to_int(num) + except ValueError: + try: + if num.startswith(prefix): + return getattr(self.ofproto, num.upper()) + else: + return getattr(self.ofproto, prefix + num.upper()) + except AttributeError: + LOG.warning( + "Cannot convert argument to reserved number: %s", num) + return num def _reserved_num_to_user(self, num, prefix): for k, v in self.ofproto.__dict__.items(): - if k.startswith(prefix) and v == num: - return k.replace(prefix, '') + if k not in self.deprecated_value and \ + k.startswith(prefix) and v == num: + return k.replace(prefix, '') return num + def ofp_port_features_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPPF_') + + def ofp_port_features_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPPF_') + + def ofp_port_mod_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPPMPT_') + + def ofp_port_mod_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPPMPT_') + + def ofp_port_desc_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPPDPT_') + + def ofp_port_desc_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPPDPT_') + + def ofp_action_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPAT_') + + def ofp_action_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPAT_') + + def ofp_instruction_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPIT_') + + def ofp_instruction_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPIT_') + + def ofp_group_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPGT_') + + def ofp_group_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPGT_') + + def ofp_meter_band_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPMBT_') + + def ofp_meter_band_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPMBT_') + + def ofp_table_feature_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPTFPT_') + + def ofp_table_feature_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPTFPT_') + + def ofp_port_stats_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPPSPT_') + + def ofp_port_stats_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPPSPT_') + + def ofp_queue_desc_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPQDPT_') + + def ofp_queue_desc_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPQDPT_') + + def ofp_queue_stats_prop_type_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPQSPT_') + + def ofp_queue_stats_prop_type_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPQSPT_') + + def ofp_meter_flags_from_user(self, act): + return self._reserved_num_from_user(act, 'OFPMF_') + + def ofp_meter_flags_to_user(self, act): + return self._reserved_num_to_user(act, 'OFPMF_') + def ofp_port_from_user(self, port): return self._reserved_num_from_user(port, 'OFPP_') @@ -67,6 +389,18 @@ class OFCtlUtil(object): def ofp_group_to_user(self, group): return self._reserved_num_to_user(group, 'OFPG_') + def ofp_group_capabilities_from_user(self, group): + return self._reserved_num_from_user(group, 'OFPGFC_') + + def ofp_group_capabilities_to_user(self, group): + return self._reserved_num_to_user(group, 'OFPGFC_') + + def ofp_group_bucket_prop_type_from_user(self, group): + return self._reserved_num_from_user(group, 'OFPGBPT_') + + def ofp_group_bucket_prop_type_to_user(self, group): + return self._reserved_num_to_user(group, 'OFPGBPT_') + def ofp_buffer_from_user(self, buffer): if buffer in ['OFP_NO_BUFFER', 'NO_BUFFER']: return self.ofproto.OFP_NO_BUFFER diff --git a/ryu/lib/ofctl_v1_0.py b/ryu/lib/ofctl_v1_0.py index 97ffa7b2..b38cc12b 100644 --- a/ryu/lib/ofctl_v1_0.py +++ b/ryu/lib/ofctl_v1_0.py @@ -18,7 +18,6 @@ import socket import logging from ryu.ofproto import ofproto_v1_0 -from ryu.lib import hub from ryu.lib import ofctl_utils from ryu.lib.mac import haddr_to_bin, haddr_to_str @@ -258,8 +257,8 @@ def match_to_str(m): def nw_src_to_str(wildcards, addr): ip = socket.inet_ntoa(struct.pack('!I', addr)) - mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK) - >> ofproto_v1_0.OFPFW_NW_SRC_SHIFT) + mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK) >> + ofproto_v1_0.OFPFW_NW_SRC_SHIFT) if mask == 32: mask = 0 if mask: @@ -269,8 +268,8 @@ def nw_src_to_str(wildcards, addr): def nw_dst_to_str(wildcards, addr): ip = socket.inet_ntoa(struct.pack('!I', addr)) - mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK) - >> ofproto_v1_0.OFPFW_NW_DST_SHIFT) + mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK) >> + ofproto_v1_0.OFPFW_NW_DST_SHIFT) if mask == 32: mask = 0 if mask: @@ -278,30 +277,11 @@ def nw_dst_to_str(wildcards, addr): return ip -def send_stats_request(dp, stats, waiters, msgs): - dp.set_xid(stats) - waiters_per_dp = waiters.setdefault(dp.id, {}) - lock = hub.Event() - previous_msg_len = len(msgs) - waiters_per_dp[stats.xid] = (lock, msgs) - dp.send_msg(stats) - - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - while current_msg_len > previous_msg_len: - previous_msg_len = current_msg_len - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - if not lock.is_set(): - del waiters_per_dp[stats.xid] - - def get_desc_stats(dp, waiters): stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + s = {} for msg in msgs: stats = msg.body @@ -314,11 +294,21 @@ def get_desc_stats(dp, waiters): return desc -def get_queue_stats(dp, waiters): - stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, dp.ofproto.OFPP_ALL, - dp.ofproto.OFPQ_ALL) +def get_queue_stats(dp, waiters, port=None, queue_id=None): + if port is None: + port = dp.ofproto.OFPP_ALL + else: + port = int(str(port), 0) + + if queue_id is None: + queue_id = dp.ofproto.OFPQ_ALL + else: + queue_id = int(str(queue_id), 0) + + stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port, + queue_id) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = [] for msg in msgs: @@ -345,7 +335,7 @@ def get_flow_stats(dp, waiters, flow=None): dp, 0, match, table_id, out_port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: @@ -381,7 +371,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None): dp, 0, match, table_id, out_port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: @@ -390,7 +380,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None): s = {'packet_count': st.packet_count, 'byte_count': st.byte_count, 'flow_count': st.flow_count} - flows.append(s) + flows.append(s) flows = {str(dp.id): flows} return flows @@ -400,7 +390,7 @@ def get_table_stats(dp, waiters): stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0) ofp = dp.ofproto msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) match_convert = {ofp.OFPFW_IN_PORT: 'IN_PORT', ofp.OFPFW_DL_VLAN: 'DL_VLAN', @@ -447,11 +437,16 @@ def get_table_stats(dp, waiters): return desc -def get_port_stats(dp, waiters): +def get_port_stats(dp, waiters, port=None): + if port is None: + port = dp.ofproto.OFPP_NONE + else: + port = int(str(port), 0) + stats = dp.ofproto_parser.OFPPortStatsRequest( - dp, 0, dp.ofproto.OFPP_NONE) + dp, 0, port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) ports = [] for msg in msgs: @@ -478,7 +473,7 @@ def get_port_desc(dp, waiters): stats = dp.ofproto_parser.OFPFeaturesRequest(dp) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] @@ -521,7 +516,7 @@ def mod_flow_entry(dp, flow, cmd): flags=flags, actions=actions) - dp.send_msg(flow_mod) + ofctl_utils.send_msg(dp, flow_mod, LOG) def delete_flow_entry(dp): @@ -532,7 +527,7 @@ def delete_flow_entry(dp): datapath=dp, match=match, cookie=0, command=dp.ofproto.OFPFC_DELETE) - dp.send_msg(flow_mod) + ofctl_utils.send_msg(dp, flow_mod, LOG) def mod_port_behavior(dp, port_config): @@ -545,4 +540,4 @@ def mod_port_behavior(dp, port_config): port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) - dp.send_msg(port_mod) + ofctl_utils.send_msg(dp, port_mod, LOG) diff --git a/ryu/lib/ofctl_v1_2.py b/ryu/lib/ofctl_v1_2.py index 23ba30c2..3015b305 100644 --- a/ryu/lib/ofctl_v1_2.py +++ b/ryu/lib/ofctl_v1_2.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import base64 import logging import netaddr @@ -21,7 +20,6 @@ from ryu.ofproto import ether from ryu.ofproto import inet from ryu.ofproto import ofproto_v1_2 from ryu.ofproto import ofproto_v1_2_parser -from ryu.lib import hub from ryu.lib import ofctl_utils @@ -104,8 +102,9 @@ def to_actions(dp, acts): else: LOG.error('Unknown action type: %s', action_type) if write_actions: - inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, - write_actions)) + inst.append( + parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, + write_actions)) elif action_type == 'CLEAR_ACTIONS': inst.append(parser.OFPInstructionActions( ofp.OFPIT_CLEAR_ACTIONS, [])) @@ -395,30 +394,10 @@ def match_vid_to_str(value, mask): return value -def send_stats_request(dp, stats, waiters, msgs): - dp.set_xid(stats) - waiters_per_dp = waiters.setdefault(dp.id, {}) - lock = hub.Event() - previous_msg_len = len(msgs) - waiters_per_dp[stats.xid] = (lock, msgs) - dp.send_msg(stats) - - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - while current_msg_len > previous_msg_len: - previous_msg_len = current_msg_len - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - if not lock.is_set(): - del waiters_per_dp[stats.xid] - - def get_desc_stats(dp, waiters): stats = dp.ofproto_parser.OFPDescStatsRequest(dp) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = {} for msg in msgs: @@ -432,12 +411,23 @@ def get_desc_stats(dp, waiters): return desc -def get_queue_stats(dp, waiters): +def get_queue_stats(dp, waiters, port=None, queue_id=None): ofp = dp.ofproto - stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, ofp.OFPP_ANY, - ofp.OFPQ_ALL, 0) + + if port is None: + port = ofp.OFPP_ANY + else: + port = int(str(port), 0) + + if queue_id is None: + queue_id = ofp.OFPQ_ALL + else: + queue_id = int(str(queue_id), 0) + + stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, port, + queue_id, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = [] for msg in msgs: @@ -452,12 +442,15 @@ def get_queue_stats(dp, waiters): return desc -def get_queue_config(dp, port, waiters): +def get_queue_config(dp, waiters, port=None): ofp = dp.ofproto - port = UTIL.ofp_port_from_user(port) + if port is None: + port = ofp.OFPP_ANY + else: + port = UTIL.ofp_port_from_user(int(str(port), 0)) stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE', dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE', @@ -506,7 +499,7 @@ def get_flow_stats(dp, waiters, flow=None): dp, table_id, out_port, out_group, cookie, cookie_mask, match) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: @@ -547,7 +540,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None): dp, table_id, out_port, out_group, cookie, cookie_mask, match) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: @@ -565,7 +558,7 @@ def get_table_stats(dp, waiters): stats = dp.ofproto_parser.OFPTableStatsRequest(dp) ofp = dp.ofproto msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) oxm_type_convert = {ofp.OFPXMT_OFB_IN_PORT: 'IN_PORT', ofp.OFPXMT_OFB_IN_PHY_PORT: 'IN_PHY_PORT', @@ -686,11 +679,16 @@ def get_table_stats(dp, waiters): return desc -def get_port_stats(dp, waiters): +def get_port_stats(dp, waiters, port=None): + if port is None: + port = dp.ofproto.OFPP_ANY + else: + port = int(str(port), 0) + stats = dp.ofproto_parser.OFPPortStatsRequest( - dp, dp.ofproto.OFPP_ANY, 0) + dp, port, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) ports = [] for msg in msgs: @@ -713,11 +711,16 @@ def get_port_stats(dp, waiters): return ports -def get_group_stats(dp, waiters): +def get_group_stats(dp, waiters, group_id=None): + if group_id is None: + group_id = dp.ofproto.OFPG_ALL + else: + group_id = int(str(group_id), 0) + stats = dp.ofproto_parser.OFPGroupStatsRequest( - dp, dp.ofproto.OFPG_ALL, 0) + dp, group_id, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) groups = [] for msg in msgs: @@ -766,7 +769,7 @@ def get_group_features(dp, waiters): stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) features = [] for msg in msgs: @@ -807,7 +810,7 @@ def get_group_desc(dp, waiters): stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] for msg in msgs: @@ -834,7 +837,7 @@ def get_port_desc(dp, waiters): stats = dp.ofproto_parser.OFPFeaturesRequest(dp) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] @@ -879,7 +882,7 @@ def mod_flow_entry(dp, flow, cmd): hard_timeout, priority, buffer_id, out_port, out_group, flags, match, inst) - dp.send_msg(flow_mod) + ofctl_utils.send_msg(dp, flow_mod, LOG) def mod_group_entry(dp, group, cmd): @@ -911,7 +914,7 @@ def mod_group_entry(dp, group, cmd): group_mod = dp.ofproto_parser.OFPGroupMod( dp, cmd, type_, group_id, buckets) - dp.send_msg(group_mod) + ofctl_utils.send_msg(dp, group_mod, LOG) def mod_port_behavior(dp, port_config): @@ -924,20 +927,8 @@ def mod_port_behavior(dp, port_config): port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) - dp.send_msg(port_mod) + ofctl_utils.send_msg(dp, port_mod, LOG) -def send_experimenter(dp, exp): - experimenter = exp.get('experimenter', 0) - exp_type = exp.get('exp_type', 0) - data_type = exp.get('data_type', 'ascii') - if data_type != 'ascii' and data_type != 'base64': - LOG.error('Unknown data type: %s', data_type) - data = exp.get('data', '') - if data_type == 'base64': - data = base64.b64decode(data) - - expmsg = dp.ofproto_parser.OFPExperimenter( - dp, experimenter, exp_type, data) - - dp.send_msg(expmsg) +# NOTE(jkoelker) Alias common funcitons +send_experimenter = ofctl_utils.send_experimenter diff --git a/ryu/lib/ofctl_v1_3.py b/ryu/lib/ofctl_v1_3.py index 2de9e37f..3d768ce2 100644 --- a/ryu/lib/ofctl_v1_3.py +++ b/ryu/lib/ofctl_v1_3.py @@ -15,13 +15,13 @@ import base64 import logging -import netaddr from ryu.ofproto import ether from ryu.ofproto import inet +from ryu.ofproto import ofproto_common from ryu.ofproto import ofproto_v1_3 from ryu.ofproto import ofproto_v1_3_parser -from ryu.lib import hub +from ryu.lib import ofctl_nicira_ext from ryu.lib import ofctl_utils @@ -35,56 +35,8 @@ UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_3) def to_action(dp, dic): ofp = dp.ofproto parser = dp.ofproto_parser - action_type = dic.get('type') - if action_type == 'OUTPUT': - out_port = UTIL.ofp_port_from_user(dic.get('port', ofp.OFPP_ANY)) - max_len = UTIL.ofp_cml_from_user(dic.get('max_len', ofp.OFPCML_MAX)) - result = parser.OFPActionOutput(out_port, max_len) - elif action_type == 'COPY_TTL_OUT': - result = parser.OFPActionCopyTtlOut() - elif action_type == 'COPY_TTL_IN': - result = parser.OFPActionCopyTtlIn() - elif action_type == 'SET_MPLS_TTL': - mpls_ttl = int(dic.get('mpls_ttl')) - result = parser.OFPActionSetMplsTtl(mpls_ttl) - elif action_type == 'DEC_MPLS_TTL': - result = parser.OFPActionDecMplsTtl() - elif action_type == 'PUSH_VLAN': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPushVlan(ethertype) - elif action_type == 'POP_VLAN': - result = parser.OFPActionPopVlan() - elif action_type == 'PUSH_MPLS': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPushMpls(ethertype) - elif action_type == 'POP_MPLS': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPopMpls(ethertype) - elif action_type == 'SET_QUEUE': - queue_id = UTIL.ofp_queue_from_user(dic.get('queue_id')) - result = parser.OFPActionSetQueue(queue_id) - elif action_type == 'GROUP': - group_id = UTIL.ofp_group_from_user(dic.get('group_id')) - result = parser.OFPActionGroup(group_id) - elif action_type == 'SET_NW_TTL': - nw_ttl = int(dic.get('nw_ttl')) - result = parser.OFPActionSetNwTtl(nw_ttl) - elif action_type == 'DEC_NW_TTL': - result = parser.OFPActionDecNwTtl() - elif action_type == 'SET_FIELD': - field = dic.get('field') - value = dic.get('value') - result = parser.OFPActionSetField(**{field: value}) - elif action_type == 'PUSH_PBB': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPushPbb(ethertype) - elif action_type == 'POP_PBB': - result = parser.OFPActionPopPbb() - else: - result = None - - return result + return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL) def to_actions(dp, acts): @@ -110,8 +62,9 @@ def to_actions(dp, acts): else: LOG.error('Unknown action type: %s', action_type) if write_actions: - inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, - write_actions)) + inst.append( + parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, + write_actions)) elif action_type == 'CLEAR_ACTIONS': inst.append(parser.OFPInstructionActions( ofp.OFPIT_CLEAR_ACTIONS, [])) @@ -176,6 +129,17 @@ def action_to_str(act): buf = 'PUSH_PBB:' + str(act.ethertype) elif action_type == ofproto_v1_3.OFPAT_POP_PBB: buf = 'POP_PBB' + elif action_type == ofproto_v1_3.OFPAT_EXPERIMENTER: + if act.experimenter == ofproto_common.NX_EXPERIMENTER_ID: + try: + return ofctl_nicira_ext.action_to_str(act, action_to_str) + except: + LOG.debug('Error parsing NX_ACTION(%s)', + act.__class__.__name__, exc_info=True) + + data_str = base64.b64encode(act.data) + buf = 'EXPERIMENTER: {experimenter:%s, data:%s}' % \ + (act.experimenter, data_str.decode('utf-8')) else: buf = 'UNKNOWN' return buf @@ -229,11 +193,11 @@ def actions_to_str(instructions): def to_match(dp, attrs): convert = {'in_port': UTIL.ofp_port_from_user, 'in_phy_port': int, - 'metadata': to_match_masked_int, - 'dl_dst': to_match_eth, - 'dl_src': to_match_eth, - 'eth_dst': to_match_eth, - 'eth_src': to_match_eth, + 'metadata': ofctl_utils.to_match_masked_int, + 'dl_dst': ofctl_utils.to_match_eth, + 'dl_src': ofctl_utils.to_match_eth, + 'eth_dst': ofctl_utils.to_match_eth, + 'eth_src': ofctl_utils.to_match_eth, 'dl_type': int, 'eth_type': int, 'dl_vlan': to_match_vid, @@ -243,10 +207,10 @@ def to_match(dp, attrs): 'ip_ecn': int, 'nw_proto': int, 'ip_proto': int, - 'nw_src': to_match_ip, - 'nw_dst': to_match_ip, - 'ipv4_src': to_match_ip, - 'ipv4_dst': to_match_ip, + 'nw_src': ofctl_utils.to_match_ip, + 'nw_dst': ofctl_utils.to_match_ip, + 'ipv4_src': ofctl_utils.to_match_ip, + 'ipv4_dst': ofctl_utils.to_match_ip, 'tp_src': int, 'tp_dst': int, 'tcp_src': int, @@ -258,24 +222,24 @@ def to_match(dp, attrs): 'icmpv4_type': int, 'icmpv4_code': int, 'arp_op': int, - 'arp_spa': to_match_ip, - 'arp_tpa': to_match_ip, - 'arp_sha': to_match_eth, - 'arp_tha': to_match_eth, - 'ipv6_src': to_match_ip, - 'ipv6_dst': to_match_ip, + 'arp_spa': ofctl_utils.to_match_ip, + 'arp_tpa': ofctl_utils.to_match_ip, + 'arp_sha': ofctl_utils.to_match_eth, + 'arp_tha': ofctl_utils.to_match_eth, + 'ipv6_src': ofctl_utils.to_match_ip, + 'ipv6_dst': ofctl_utils.to_match_ip, 'ipv6_flabel': int, 'icmpv6_type': int, 'icmpv6_code': int, - 'ipv6_nd_target': to_match_ip, - 'ipv6_nd_sll': to_match_eth, - 'ipv6_nd_tll': to_match_eth, + 'ipv6_nd_target': ofctl_utils.to_match_ip, + 'ipv6_nd_sll': ofctl_utils.to_match_eth, + 'ipv6_nd_tll': ofctl_utils.to_match_eth, 'mpls_label': int, 'mpls_tc': int, 'mpls_bos': int, - 'pbb_isid': to_match_masked_int, - 'tunnel_id': to_match_masked_int, - 'ipv6_exthdr': to_match_masked_int} + 'pbb_isid': ofctl_utils.to_match_masked_int, + 'tunnel_id': ofctl_utils.to_match_masked_int, + 'ipv6_exthdr': ofctl_utils.to_match_masked_int} keys = {'dl_dst': 'eth_dst', 'dl_src': 'eth_src', @@ -319,55 +283,8 @@ def to_match(dp, attrs): return dp.ofproto_parser.OFPMatch(**kwargs) -def to_match_eth(value): - if '/' in value: - value = value.split('/') - return value[0], value[1] - else: - return value - - -def to_match_ip(value): - if '/' in value: - (ip_addr, ip_mask) = value.split('/') - if ip_mask.isdigit(): - ip = netaddr.ip.IPNetwork(value) - ip_addr = str(ip.ip) - ip_mask = str(ip.netmask) - return ip_addr, ip_mask - else: - return value - - def to_match_vid(value): - # NOTE: If "vlan_id/dl_vlan" field is described as decimal int value - # (and decimal string value), it is treated as values of - # VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically - # applied. OTOH, If it is described as hexadecimal string, - # treated as values of oxm_value (including OFPVID_PRESENT - # bit), and OFPVID_PRESENT bit is NOT automatically applied. - if isinstance(value, int): - # described as decimal int value - return value | ofproto_v1_3.OFPVID_PRESENT - else: - if '/' in value: - val = value.split('/') - return int(val[0], 0), int(val[1], 0) - else: - if value.isdigit(): - # described as decimal string value - return int(value, 10) | ofproto_v1_3.OFPVID_PRESENT - else: - return int(value, 0) - - -def to_match_masked_int(value): - if isinstance(value, str) and '/' in value: - value = value.split('/') - return (ofctl_utils.str_to_int(value[0]), - ofctl_utils.str_to_int(value[1])) - else: - return ofctl_utils.str_to_int(value) + return ofctl_utils.to_match_vid(value, ofproto_v1_3.OFPVID_PRESENT) def match_to_str(ofmatch): @@ -397,7 +314,8 @@ def match_to_str(ofmatch): mask = match_field['OXMTlv']['mask'] value = match_field['OXMTlv']['value'] if key == 'dl_vlan': - value = match_vid_to_str(value, mask) + value = ofctl_utils.match_vid_to_str(value, mask, + ofproto_v1_3.OFPVID_PRESENT) elif key == 'in_port': value = UTIL.ofp_port_to_user(value) else: @@ -408,41 +326,17 @@ def match_to_str(ofmatch): return match -def match_vid_to_str(value, mask): - if mask is not None: - value = '0x%04x/0x%04x' % (value, mask) - else: - if value & ofproto_v1_3.OFPVID_PRESENT: - value = str(value & ~ofproto_v1_3.OFPVID_PRESENT) - else: - value = '0x%04x' % value - return value +def wrap_dpid_dict(dp, value, to_user=True): + if to_user: + return {str(dp.id): value} + + return {dp.id: value} -def send_stats_request(dp, stats, waiters, msgs): - dp.set_xid(stats) - waiters_per_dp = waiters.setdefault(dp.id, {}) - lock = hub.Event() - previous_msg_len = len(msgs) - waiters_per_dp[stats.xid] = (lock, msgs) - dp.send_msg(stats) - - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - while current_msg_len > previous_msg_len: - previous_msg_len = current_msg_len - lock.wait(timeout=DEFAULT_TIMEOUT) - current_msg_len = len(msgs) - - if not lock.is_set(): - del waiters_per_dp[stats.xid] - - -def get_desc_stats(dp, waiters): +def get_desc_stats(dp, waiters, to_user=True): stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = {} for msg in msgs: @@ -452,16 +346,27 @@ def get_desc_stats(dp, waiters): 'sw_desc': stats.sw_desc, 'serial_num': stats.serial_num, 'dp_desc': stats.dp_desc} - desc = {str(dp.id): s} - return desc + + return wrap_dpid_dict(dp, s, to_user) -def get_queue_stats(dp, waiters): +def get_queue_stats(dp, waiters, port=None, queue_id=None, to_user=True): ofp = dp.ofproto - stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY, - ofp.OFPQ_ALL) + + if port is None: + port = ofp.OFPP_ANY + else: + port = int(str(port), 0) + + if queue_id is None: + queue_id = ofp.OFPQ_ALL + else: + queue_id = int(str(queue_id), 0) + + stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port, + queue_id) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) s = [] for msg in msgs: @@ -474,16 +379,19 @@ def get_queue_stats(dp, waiters): 'tx_bytes': stat.tx_bytes, 'tx_errors': stat.tx_errors, 'tx_packets': stat.tx_packets}) - desc = {str(dp.id): s} - return desc + + return wrap_dpid_dict(dp, s, to_user) -def get_queue_config(dp, port, waiters): +def get_queue_config(dp, waiters, port=None, to_user=True): ofp = dp.ofproto - port = UTIL.ofp_port_from_user(port) + if port is None: + port = ofp.OFPP_ANY + else: + port = UTIL.ofp_port_from_user(int(str(port), 0)) stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE', dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE', @@ -504,19 +412,33 @@ def get_queue_config(dp, port, waiters): p['experimenter'] = prop.experimenter p['data'] = prop.data prop_list.append(p) - q = {'port': UTIL.ofp_port_to_user(queue.port), - 'properties': prop_list, - 'queue_id': UTIL.ofp_queue_to_user(queue.queue_id)} + + q = {'properties': prop_list} + + if to_user: + q['port'] = UTIL.ofp_port_to_user(queue.port) + q['queue_id'] = UTIL.ofp_queue_to_user(queue.queue_id) + + else: + q['port'] = queue.port + q['queue_id'] = queue.queue_id + queue_list.append(q) - c = {'port': UTIL.ofp_port_to_user(config.port), - 'queues': queue_list} + + c = {'queues': queue_list} + + if to_user: + c['port'] = UTIL.ofp_port_to_user(config.port) + + else: + c['port'] = config.port + configs.append(c) - configs = {str(dp.id): configs} - return configs + return wrap_dpid_dict(dp, configs, to_user) -def get_flow_stats(dp, waiters, flow=None): +def get_flow_stats(dp, waiters, flow=None, to_user=True): flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) @@ -534,34 +456,39 @@ def get_flow_stats(dp, waiters, flow=None): match) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: for stats in msg.body: - actions = actions_to_str(stats.instructions) - match = match_to_str(stats.match) - s = {'priority': stats.priority, 'cookie': stats.cookie, 'idle_timeout': stats.idle_timeout, 'hard_timeout': stats.hard_timeout, - 'actions': actions, - 'match': match, 'byte_count': stats.byte_count, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec, 'packet_count': stats.packet_count, - 'table_id': UTIL.ofp_table_to_user(stats.table_id), 'length': stats.length, 'flags': stats.flags} + + if to_user: + s['actions'] = actions_to_str(stats.instructions) + s['match'] = match_to_str(stats.match) + s['table_id'] = UTIL.ofp_table_to_user(stats.table_id) + + else: + s['actions'] = stats.instructions + s['instructions'] = stats.instructions + s['match'] = stats.match + s['table_id'] = stats.table_id + flows.append(s) - flows = {str(dp.id): flows} - return flows + return wrap_dpid_dict(dp, flows, to_user) -def get_aggregate_flow_stats(dp, waiters, flow=None): +def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True): flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) @@ -579,7 +506,7 @@ def get_aggregate_flow_stats(dp, waiters, flow=None): match) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) flows = [] for msg in msgs: @@ -588,35 +515,39 @@ def get_aggregate_flow_stats(dp, waiters, flow=None): 'byte_count': stats.byte_count, 'flow_count': stats.flow_count} flows.append(s) - flows = {str(dp.id): flows} - return flows + return wrap_dpid_dict(dp, flows, to_user) -def get_table_stats(dp, waiters): +def get_table_stats(dp, waiters, to_user=True): stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) tables = [] for msg in msgs: stats = msg.body for stat in stats: - s = {'table_id': UTIL.ofp_table_to_user(stat.table_id), - 'active_count': stat.active_count, + s = {'active_count': stat.active_count, 'lookup_count': stat.lookup_count, 'matched_count': stat.matched_count} + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + else: + s['table_id'] = stat.table_id + tables.append(s) - desc = {str(dp.id): tables} - return desc + return wrap_dpid_dict(dp, tables, to_user) -def get_table_features(dp, waiters): +def get_table_features(dp, waiters, to_user=True): stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, []) msgs = [] ofproto = dp.ofproto - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) prop_type = {ofproto.OFPTFPT_INSTRUCTIONS: 'INSTRUCTIONS', ofproto.OFPTFPT_INSTRUCTIONS_MISS: 'INSTRUCTIONS_MISS', @@ -636,6 +567,9 @@ def get_table_features(dp, waiters): ofproto.OFPTFPT_EXPERIMENTER_MISS: 'EXPERIMENTER_MISS' } + if not to_user: + prop_type = dict((k, k) for k in prop_type.keys()) + p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS, ofproto.OFPTFPT_INSTRUCTIONS_MISS] @@ -694,31 +628,40 @@ def get_table_features(dp, waiters): elif prop.type in p_type_experimenter: pass properties.append(p) - s = {'table_id': UTIL.ofp_table_to_user(stat.table_id), - 'name': stat.name.decode('utf-8'), + s = {'name': stat.name.decode('utf-8'), 'metadata_match': stat.metadata_match, 'metadata_write': stat.metadata_write, 'config': stat.config, 'max_entries': stat.max_entries, 'properties': properties, } + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + else: + s['table_id'] = stat.table_id + tables.append(s) - desc = {str(dp.id): tables} - return desc + return wrap_dpid_dict(dp, tables, to_user) -def get_port_stats(dp, waiters): +def get_port_stats(dp, waiters, port=None, to_user=True): + if port is None: + port = dp.ofproto.OFPP_ANY + else: + port = int(str(port), 0) + stats = dp.ofproto_parser.OFPPortStatsRequest( - dp, 0, dp.ofproto.OFPP_ANY) + dp, 0, port) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) ports = [] for msg in msgs: for stats in msg.body: - s = {'port_no': UTIL.ofp_port_to_user(stats.port_no), - 'rx_packets': stats.rx_packets, + s = {'rx_packets': stats.rx_packets, 'tx_packets': stats.tx_packets, 'rx_bytes': stats.rx_bytes, 'tx_bytes': stats.tx_bytes, @@ -732,16 +675,28 @@ def get_port_stats(dp, waiters): 'collisions': stats.collisions, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec} + + if to_user: + s['port_no'] = UTIL.ofp_port_to_user(stats.port_no) + + else: + s['port_no'] = stats.port_no + ports.append(s) - ports = {str(dp.id): ports} - return ports + + return wrap_dpid_dict(dp, ports, to_user) -def get_meter_stats(dp, waiters): +def get_meter_stats(dp, waiters, meter_id=None, to_user=True): + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = int(str(meter_id), 0) + stats = dp.ofproto_parser.OFPMeterStatsRequest( - dp, 0, dp.ofproto.OFPM_ALL) + dp, 0, meter_id) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) meters = [] for msg in msgs: @@ -751,20 +706,26 @@ def get_meter_stats(dp, waiters): b = {'packet_band_count': band.packet_band_count, 'byte_band_count': band.byte_band_count} bands.append(b) - s = {'meter_id': UTIL.ofp_meter_to_user(stats.meter_id), - 'len': stats.len, + s = {'len': stats.len, 'flow_count': stats.flow_count, 'packet_in_count': stats.packet_in_count, 'byte_in_count': stats.byte_in_count, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec, 'band_stats': bands} + + if to_user: + s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id) + + else: + s['meter_id'] = stats.meter_id + meters.append(s) - meters = {str(dp.id): meters} - return meters + + return wrap_dpid_dict(dp, meters, to_user) -def get_meter_features(dp, waiters): +def get_meter_features(dp, waiters, to_user=True): ofp = dp.ofproto type_convert = {ofp.OFPMBT_DROP: 'DROP', @@ -777,7 +738,7 @@ def get_meter_features(dp, waiters): stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) features = [] for msg in msgs: @@ -785,22 +746,34 @@ def get_meter_features(dp, waiters): band_types = [] for k, v in type_convert.items(): if (1 << k) & feature.band_types: - band_types.append(v) + + if to_user: + band_types.append(v) + + else: + band_types.append(k) + capabilities = [] - for k, v in capa_convert.items(): + for k, v in sorted(capa_convert.items()): if k & feature.capabilities: - capabilities.append(v) + + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + f = {'max_meter': feature.max_meter, 'band_types': band_types, 'capabilities': capabilities, 'max_bands': feature.max_bands, 'max_color': feature.max_color} features.append(f) - features = {str(dp.id): features} - return features + + return wrap_dpid_dict(dp, features, to_user) -def get_meter_config(dp, waiters): +def get_meter_config(dp, waiters, meter_id=None, to_user=True): flags = {dp.ofproto.OFPMF_KBPS: 'KBPS', dp.ofproto.OFPMF_PKTPS: 'PKTPS', dp.ofproto.OFPMF_BURST: 'BURST', @@ -810,41 +783,68 @@ def get_meter_config(dp, waiters): dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK', dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'} + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = int(str(meter_id), 0) + stats = dp.ofproto_parser.OFPMeterConfigStatsRequest( - dp, 0, dp.ofproto.OFPM_ALL) + dp, 0, meter_id) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) configs = [] for msg in msgs: for config in msg.body: bands = [] for band in config.bands: - b = {'type': band_type.get(band.type, ''), - 'rate': band.rate, + b = {'rate': band.rate, 'burst_size': band.burst_size} + + if to_user: + b['type'] = band_type.get(band.type, '') + + else: + b['type'] = band.type + if band.type == dp.ofproto.OFPMBT_DSCP_REMARK: b['prec_level'] = band.prec_level elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER: b['experimenter'] = band.experimenter bands.append(b) c_flags = [] - for k, v in flags.items(): + for k, v in sorted(flags.items()): if k & config.flags: - c_flags.append(v) + if to_user: + c_flags.append(v) + + else: + c_flags.append(k) + c = {'flags': c_flags, - 'meter_id': UTIL.ofp_meter_to_user(config.meter_id), 'bands': bands} + + if to_user: + c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id) + + else: + c['meter_id'] = config.meter_id + configs.append(c) - configs = {str(dp.id): configs} - return configs + + return wrap_dpid_dict(dp, configs, to_user) -def get_group_stats(dp, waiters): +def get_group_stats(dp, waiters, group_id=None, to_user=True): + if group_id is None: + group_id = dp.ofproto.OFPG_ALL + else: + group_id = int(str(group_id), 0) + stats = dp.ofproto_parser.OFPGroupStatsRequest( - dp, 0, dp.ofproto.OFPG_ALL) + dp, 0, group_id) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) groups = [] for msg in msgs: @@ -855,19 +855,25 @@ def get_group_stats(dp, waiters): 'byte_count': bucket_stat.byte_count} bucket_stats.append(c) g = {'length': stats.length, - 'group_id': UTIL.ofp_group_to_user(stats.group_id), 'ref_count': stats.ref_count, 'packet_count': stats.packet_count, 'byte_count': stats.byte_count, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec, 'bucket_stats': bucket_stats} + + if to_user: + g['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + + else: + g['group_id'] = stats.group_id + groups.append(g) - groups = {str(dp.id): groups} - return groups + + return wrap_dpid_dict(dp, groups, to_user) -def get_group_features(dp, waiters): +def get_group_features(dp, waiters, to_user=True): ofp = dp.ofproto type_convert = {ofp.OFPGT_ALL: 'ALL', @@ -897,7 +903,7 @@ def get_group_features(dp, waiters): stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) features = [] for msg in msgs: @@ -905,31 +911,56 @@ def get_group_features(dp, waiters): types = [] for k, v in type_convert.items(): if (1 << k) & feature.types: - types.append(v) + if to_user: + types.append(v) + + else: + types.append(k) + capabilities = [] for k, v in cap_convert.items(): if k & feature.capabilities: - capabilities.append(v) - max_groups = [] - for k, v in type_convert.items(): - max_groups.append({v: feature.max_groups[k]}) + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + + if to_user: + max_groups = [] + for k, v in type_convert.items(): + max_groups.append({v: feature.max_groups[k]}) + + else: + max_groups = feature.max_groups + actions = [] for k1, v1 in type_convert.items(): acts = [] for k2, v2 in act_convert.items(): if (1 << k2) & feature.actions[k1]: - acts.append(v2) - actions.append({v1: acts}) + if to_user: + acts.append(v2) + + else: + acts.append(k2) + + if to_user: + actions.append({v1: acts}) + + else: + actions.append({k1: acts}) + f = {'types': types, 'capabilities': capabilities, 'max_groups': max_groups, 'actions': actions} features.append(f) - features = {str(dp.id): features} - return features + + return wrap_dpid_dict(dp, features, to_user) -def get_group_desc(dp, waiters): +def get_group_desc(dp, waiters, to_user=True): type_convert = {dp.ofproto.OFPGT_ALL: 'ALL', dp.ofproto.OFPGT_SELECT: 'SELECT', @@ -938,7 +969,7 @@ def get_group_desc(dp, waiters): stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] for msg in msgs: @@ -947,33 +978,44 @@ def get_group_desc(dp, waiters): for bucket in stats.buckets: actions = [] for action in bucket.actions: - actions.append(action_to_str(action)) + if to_user: + actions.append(action_to_str(action)) + + else: + actions.append(action) + b = {'weight': bucket.weight, 'watch_port': bucket.watch_port, 'watch_group': bucket.watch_group, 'actions': actions} buckets.append(b) - d = {'type': type_convert.get(stats.type), - 'group_id': UTIL.ofp_group_to_user(stats.group_id), - 'buckets': buckets} + + d = {'buckets': buckets} + if to_user: + d['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + d['type'] = type_convert.get(stats.type) + + else: + d['group_id'] = stats.group_id + d['type'] = stats.type + descs.append(d) - descs = {str(dp.id): descs} - return descs + + return wrap_dpid_dict(dp, descs, to_user) -def get_port_desc(dp, waiters): +def get_port_desc(dp, waiters, to_user=True): stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0) msgs = [] - send_stats_request(dp, stats, waiters, msgs) + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) descs = [] for msg in msgs: stats = msg.body for stat in stats: - d = {'port_no': UTIL.ofp_port_to_user(stat.port_no), - 'hw_addr': stat.hw_addr, + d = {'hw_addr': stat.hw_addr, 'name': stat.name.decode('utf-8'), 'config': stat.config, 'state': stat.state, @@ -983,9 +1025,16 @@ def get_port_desc(dp, waiters): 'peer': stat.peer, 'curr_speed': stat.curr_speed, 'max_speed': stat.max_speed} + + if to_user: + d['port_no'] = UTIL.ofp_port_to_user(stat.port_no) + + else: + d['port_no'] = stat.port_no + descs.append(d) - descs = {str(dp.id): descs} - return descs + + return wrap_dpid_dict(dp, descs, to_user) def mod_flow_entry(dp, flow, cmd): @@ -1010,7 +1059,7 @@ def mod_flow_entry(dp, flow, cmd): hard_timeout, priority, buffer_id, out_port, out_group, flags, match, inst) - dp.send_msg(flow_mod) + ofctl_utils.send_msg(dp, flow_mod, LOG) def mod_meter_entry(dp, meter, cmd): @@ -1057,7 +1106,7 @@ def mod_meter_entry(dp, meter, cmd): meter_mod = dp.ofproto_parser.OFPMeterMod( dp, cmd, flags, meter_id, bands) - dp.send_msg(meter_mod) + ofctl_utils.send_msg(dp, meter_mod, LOG) def mod_group_entry(dp, group, cmd): @@ -1089,7 +1138,7 @@ def mod_group_entry(dp, group, cmd): group_mod = dp.ofproto_parser.OFPGroupMod( dp, cmd, type_, group_id, buckets) - dp.send_msg(group_mod) + ofctl_utils.send_msg(dp, group_mod, LOG) def mod_port_behavior(dp, port_config): @@ -1102,20 +1151,8 @@ def mod_port_behavior(dp, port_config): port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) - dp.send_msg(port_mod) + ofctl_utils.send_msg(dp, port_mod, LOG) -def send_experimenter(dp, exp): - experimenter = exp.get('experimenter', 0) - exp_type = exp.get('exp_type', 0) - data_type = exp.get('data_type', 'ascii') - if data_type != 'ascii' and data_type != 'base64': - LOG.error('Unknown data type: %s', data_type) - data = exp.get('data', '') - if data_type == 'base64': - data = base64.b64decode(data) - - expmsg = dp.ofproto_parser.OFPExperimenter( - dp, experimenter, exp_type, data) - - dp.send_msg(expmsg) +# NOTE(jkoelker) Alias common funcitons +send_experimenter = ofctl_utils.send_experimenter diff --git a/ryu/lib/ofctl_v1_4.py b/ryu/lib/ofctl_v1_4.py new file mode 100644 index 00000000..98ac94d8 --- /dev/null +++ b/ryu/lib/ofctl_v1_4.py @@ -0,0 +1,945 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from ryu.ofproto import ether +from ryu.ofproto import ofproto_v1_4 +from ryu.ofproto import ofproto_v1_4_parser +from ryu.lib import ofctl_utils + +LOG = logging.getLogger(__name__) + +DEFAULT_TIMEOUT = 1.0 + +UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4) + + +def to_action(dp, dic): + ofp = dp.ofproto + parser = dp.ofproto_parser + action_type = dic.get('type') + return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL) + + +def _get_actions(dp, dics): + actions = [] + for d in dics: + action = to_action(dp, d) + if action is not None: + actions.append(action) + else: + LOG.error('Unknown action type: %s', d) + return actions + + +def to_instructions(dp, insts): + instructions = [] + ofp = dp.ofproto + parser = dp.ofproto_parser + + for i in insts: + inst_type = i.get('type') + if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']: + dics = i.get('actions', []) + actions = _get_actions(dp, dics) + if actions: + if inst_type == 'APPLY_ACTIONS': + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, + actions)) + else: + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, + actions)) + elif inst_type == 'CLEAR_ACTIONS': + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, [])) + elif inst_type == 'GOTO_TABLE': + table_id = int(i.get('table_id')) + instructions.append(parser.OFPInstructionGotoTable(table_id)) + elif inst_type == 'WRITE_METADATA': + metadata = ofctl_utils.str_to_int(i.get('metadata')) + metadata_mask = (ofctl_utils.str_to_int(i['metadata_mask']) + if 'metadata_mask' in i + else parser.UINT64_MAX) + instructions.append( + parser.OFPInstructionWriteMetadata( + metadata, metadata_mask)) + elif inst_type == 'METER': + meter_id = int(i.get('meter_id')) + instructions.append(parser.OFPInstructionMeter(meter_id)) + else: + LOG.error('Unknown instruction type: %s', inst_type) + + return instructions + + +def action_to_str(act): + s = act.to_jsondict()[act.__class__.__name__] + t = UTIL.ofp_action_type_to_user(s['type']) + s['type'] = t if t != s['type'] else 'UNKNOWN' + + if 'field' in s: + field = s.pop('field') + s['field'] = field['OXMTlv']['field'] + s['mask'] = field['OXMTlv']['mask'] + s['value'] = field['OXMTlv']['value'] + + return s + + +def instructions_to_str(instructions): + + s = [] + + for i in instructions: + v = i.to_jsondict()[i.__class__.__name__] + t = UTIL.ofp_instruction_type_to_user(v['type']) + inst_type = t if t != v['type'] else 'UNKNOWN' + # apply/write/clear-action instruction + if isinstance(i, ofproto_v1_4_parser.OFPInstructionActions): + acts = [] + for a in i.actions: + acts.append(action_to_str(a)) + v['type'] = inst_type + v['actions'] = acts + s.append(v) + # others + else: + v['type'] = inst_type + s.append(v) + + return s + + +def to_match(dp, attrs): + convert = {'in_port': UTIL.ofp_port_from_user, + 'in_phy_port': int, + 'metadata': ofctl_utils.to_match_masked_int, + 'eth_dst': ofctl_utils.to_match_eth, + 'eth_src': ofctl_utils.to_match_eth, + 'eth_type': int, + 'vlan_vid': to_match_vid, + 'vlan_pcp': int, + 'ip_dscp': int, + 'ip_ecn': int, + 'ip_proto': int, + 'ipv4_src': ofctl_utils.to_match_ip, + 'ipv4_dst': ofctl_utils.to_match_ip, + 'tcp_src': int, + 'tcp_dst': int, + 'udp_src': int, + 'udp_dst': int, + 'sctp_src': int, + 'sctp_dst': int, + 'icmpv4_type': int, + 'icmpv4_code': int, + 'arp_op': int, + 'arp_spa': ofctl_utils.to_match_ip, + 'arp_tpa': ofctl_utils.to_match_ip, + 'arp_sha': ofctl_utils.to_match_eth, + 'arp_tha': ofctl_utils.to_match_eth, + 'ipv6_src': ofctl_utils.to_match_ip, + 'ipv6_dst': ofctl_utils.to_match_ip, + 'ipv6_flabel': int, + 'icmpv6_type': int, + 'icmpv6_code': int, + 'ipv6_nd_target': ofctl_utils.to_match_ip, + 'ipv6_nd_sll': ofctl_utils.to_match_eth, + 'ipv6_nd_tll': ofctl_utils.to_match_eth, + 'mpls_label': int, + 'mpls_tc': int, + 'mpls_bos': int, + 'pbb_isid': ofctl_utils.to_match_masked_int, + 'tunnel_id': ofctl_utils.to_match_masked_int, + 'ipv6_exthdr': ofctl_utils.to_match_masked_int, + 'pbb_uca': int, + } + + keys = {'dl_dst': 'eth_dst', + 'dl_src': 'eth_src', + 'dl_type': 'eth_type', + 'dl_vlan': 'vlan_vid', + 'nw_src': 'ipv4_src', + 'nw_dst': 'ipv4_dst', + 'nw_proto': 'ip_proto'} + + if attrs.get('eth_type') == ether.ETH_TYPE_ARP: + if 'ipv4_src' in attrs and 'arp_spa' not in attrs: + attrs['arp_spa'] = attrs['ipv4_src'] + del attrs['ipv4_src'] + if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs: + attrs['arp_tpa'] = attrs['ipv4_dst'] + del attrs['ipv4_dst'] + + kwargs = {} + for key, value in attrs.items(): + if key in keys: + # For old field name + key = keys[key] + if key in convert: + value = convert[key](value) + kwargs[key] = value + else: + LOG.error('Unknown match field: %s', key) + + return dp.ofproto_parser.OFPMatch(**kwargs) + + +def to_match_vid(value): + return ofctl_utils.to_match_vid(value, ofproto_v1_4.OFPVID_PRESENT) + + +def match_to_str(ofmatch): + match = {} + + ofmatch = ofmatch.to_jsondict()['OFPMatch'] + ofmatch = ofmatch['oxm_fields'] + + for match_field in ofmatch: + key = match_field['OXMTlv']['field'] + mask = match_field['OXMTlv']['mask'] + value = match_field['OXMTlv']['value'] + if key == 'vlan_vid': + value = ofctl_utils.match_vid_to_str(value, mask, + ofproto_v1_4.OFPVID_PRESENT) + elif key == 'in_port': + value = UTIL.ofp_port_to_user(value) + else: + if mask is not None: + value = str(value) + '/' + str(mask) + match.setdefault(key, value) + + return match + + +def wrap_dpid_dict(dp, value, to_user=True): + if to_user: + return {str(dp.id): value} + + return {dp.id: value} + + +def get_desc_stats(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + s = {} + + for msg in msgs: + stats = msg.body + s = stats.to_jsondict()[stats.__class__.__name__] + + return wrap_dpid_dict(dp, s, to_user) + + +def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + if queue_id is None: + queue_id = dp.ofproto.OFPQ_ALL + else: + queue_id = UTIL.ofp_queue_from_user(queue_id) + + stats = dp.ofproto_parser.OFPQueueStatsRequest( + dp, 0, port_no, queue_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + desc = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + if to_user: + t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type) + p['type'] = t if t != p['type'] else 'UNKNOWN' + properties.append(p) + s['properties'] = properties + desc.append(s) + + return wrap_dpid_dict(dp, desc, to_user) + + +def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + if queue_id is None: + queue_id = dp.ofproto.OFPQ_ALL + else: + queue_id = UTIL.ofp_queue_from_user(queue_id) + + stats = dp.ofproto_parser.OFPQueueDescStatsRequest( + dp, 0, port_no, queue_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + configs = [] + for msg in msgs: + for queue in msg.body: + q = queue.to_jsondict()[queue.__class__.__name__] + prop_list = [] + for prop in queue.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + if to_user: + t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + prop_list.append(p) + q['properties'] = prop_list + configs.append(q) + + return wrap_dpid_dict(dp, configs, to_user) + + +def get_flow_stats(dp, waiters, flow=None, to_user=True): + flow = flow if flow else {} + table_id = UTIL.ofp_table_from_user( + flow.get('table_id', dp.ofproto.OFPTT_ALL)) + flags = int(flow.get('flags', 0)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + match = to_match(dp, flow.get('match', {})) + + stats = dp.ofproto_parser.OFPFlowStatsRequest( + dp, flags, table_id, out_port, out_group, cookie, cookie_mask, + match) + + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + flows = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + s['instructions'] = instructions_to_str(stats.instructions) + s['match'] = match_to_str(stats.match) + flows.append(s) + + return wrap_dpid_dict(dp, flows, to_user) + + +def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True): + flow = flow if flow else {} + table_id = UTIL.ofp_table_from_user( + flow.get('table_id', dp.ofproto.OFPTT_ALL)) + flags = int(flow.get('flags', 0)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + match = to_match(dp, flow.get('match', {})) + + stats = dp.ofproto_parser.OFPAggregateStatsRequest( + dp, flags, table_id, out_port, out_group, cookie, cookie_mask, + match) + + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + flows = [] + for msg in msgs: + stats = msg.body + s = stats.to_jsondict()[stats.__class__.__name__] + flows.append(s) + + return wrap_dpid_dict(dp, flows, to_user) + + +def get_table_stats(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + tables = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + tables.append(s) + + return wrap_dpid_dict(dp, tables, to_user) + + +def get_table_features(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, []) + msgs = [] + ofproto = dp.ofproto + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS, + ofproto.OFPTFPT_INSTRUCTIONS_MISS] + + p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES, + ofproto.OFPTFPT_NEXT_TABLES_MISS, + ofproto.OFPTFPT_TABLE_SYNC_FROM] + + p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS, + ofproto.OFPTFPT_WRITE_ACTIONS_MISS, + ofproto.OFPTFPT_APPLY_ACTIONS, + ofproto.OFPTFPT_APPLY_ACTIONS_MISS] + + p_type_oxms = [ofproto.OFPTFPT_MATCH, + ofproto.OFPTFPT_WILDCARDS, + ofproto.OFPTFPT_WRITE_SETFIELD, + ofproto.OFPTFPT_WRITE_SETFIELD_MISS, + ofproto.OFPTFPT_APPLY_SETFIELD, + ofproto.OFPTFPT_APPLY_SETFIELD_MISS] + + p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER, + ofproto.OFPTFPT_EXPERIMENTER_MISS] + + tables = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = {} + t = UTIL.ofp_table_feature_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + if prop.type in p_type_instructions: + instruction_ids = [] + for id in prop.instruction_ids: + i = {'len': id.len, + 'type': id.type} + instruction_ids.append(i) + p['instruction_ids'] = instruction_ids + elif prop.type in p_type_next_tables: + table_ids = [] + for id in prop.table_ids: + table_ids.append(id) + p['table_ids'] = table_ids + elif prop.type in p_type_actions: + action_ids = [] + for id in prop.action_ids: + i = id.to_jsondict()[id.__class__.__name__] + action_ids.append(i) + p['action_ids'] = action_ids + elif prop.type in p_type_oxms: + oxm_ids = [] + for id in prop.oxm_ids: + i = id.to_jsondict()[id.__class__.__name__] + oxm_ids.append(i) + p['oxm_ids'] = oxm_ids + elif prop.type in p_type_experimenter: + pass + properties.append(p) + s['name'] = stat.name.decode('utf-8') + s['properties'] = properties + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + tables.append(s) + + return wrap_dpid_dict(dp, tables, to_user) + + +def get_port_stats(dp, waiters, port_no=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + + stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + ports = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + properties = [] + for prop in stats.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + t = UTIL.ofp_port_stats_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + properties.append(p) + s['properties'] = properties + + if to_user: + s['port_no'] = UTIL.ofp_port_to_user(stats.port_no) + + ports.append(s) + + return wrap_dpid_dict(dp, ports, to_user) + + +def get_meter_stats(dp, waiters, meter_id=None, to_user=True): + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = UTIL.ofp_meter_from_user(meter_id) + + stats = dp.ofproto_parser.OFPMeterStatsRequest( + dp, 0, meter_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + meters = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + bands = [] + for band in stats.band_stats: + b = band.to_jsondict()[band.__class__.__name__] + bands.append(b) + s['band_stats'] = bands + + if to_user: + s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id) + + meters.append(s) + + return wrap_dpid_dict(dp, meters, to_user) + + +def get_meter_features(dp, waiters, to_user=True): + ofp = dp.ofproto + type_convert = {ofp.OFPMBT_DROP: 'DROP', + ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'} + + capa_convert = {ofp.OFPMF_KBPS: 'KBPS', + ofp.OFPMF_PKTPS: 'PKTPS', + ofp.OFPMF_BURST: 'BURST', + ofp.OFPMF_STATS: 'STATS'} + + stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + features = [] + for msg in msgs: + for feature in msg.body: + band_types = [] + for k, v in type_convert.items(): + if (1 << k) & feature.band_types: + + if to_user: + band_types.append(v) + + else: + band_types.append(k) + + capabilities = [] + for k, v in sorted(capa_convert.items()): + if k & feature.capabilities: + + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + + f = {'max_meter': feature.max_meter, + 'band_types': band_types, + 'capabilities': capabilities, + 'max_bands': feature.max_bands, + 'max_color': feature.max_color} + features.append(f) + + return wrap_dpid_dict(dp, features, to_user) + + +def get_meter_config(dp, waiters, meter_id=None, to_user=True): + flags = {dp.ofproto.OFPMF_KBPS: 'KBPS', + dp.ofproto.OFPMF_PKTPS: 'PKTPS', + dp.ofproto.OFPMF_BURST: 'BURST', + dp.ofproto.OFPMF_STATS: 'STATS'} + + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = UTIL.ofp_meter_from_user(meter_id) + + stats = dp.ofproto_parser.OFPMeterConfigStatsRequest( + dp, 0, meter_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + configs = [] + for msg in msgs: + for config in msg.body: + c = config.to_jsondict()[config.__class__.__name__] + bands = [] + for band in config.bands: + b = band.to_jsondict()[band.__class__.__name__] + + if to_user: + t = UTIL.ofp_meter_band_type_to_user(band.type) + b['type'] = t if t != band.type else 'UNKNOWN' + + bands.append(b) + c_flags = [] + for k, v in sorted(flags.items()): + if k & config.flags: + if to_user: + c_flags.append(v) + + else: + c_flags.append(k) + + c['flags'] = c_flags + c['bands'] = bands + + if to_user: + c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id) + + configs.append(c) + + return wrap_dpid_dict(dp, configs, to_user) + + +def get_group_stats(dp, waiters, group_id=None, to_user=True): + if group_id is None: + group_id = dp.ofproto.OFPG_ALL + else: + group_id = UTIL.ofp_group_from_user(group_id) + + stats = dp.ofproto_parser.OFPGroupStatsRequest( + dp, 0, group_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + groups = [] + for msg in msgs: + for stats in msg.body: + g = stats.to_jsondict()[stats.__class__.__name__] + bucket_stats = [] + for bucket_stat in stats.bucket_stats: + c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__] + bucket_stats.append(c) + g['bucket_stats'] = bucket_stats + + if to_user: + g['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + + groups.append(g) + + return wrap_dpid_dict(dp, groups, to_user) + + +def get_group_features(dp, waiters, to_user=True): + + ofp = dp.ofproto + type_convert = {ofp.OFPGT_ALL: 'ALL', + ofp.OFPGT_SELECT: 'SELECT', + ofp.OFPGT_INDIRECT: 'INDIRECT', + ofp.OFPGT_FF: 'FF'} + cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT', + ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS', + ofp.OFPGFC_CHAINING: 'CHAINING', + ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'} + act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT', + ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT', + ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN', + ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL', + ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL', + ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN', + ofp.OFPAT_POP_VLAN: 'POP_VLAN', + ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS', + ofp.OFPAT_POP_MPLS: 'POP_MPLS', + ofp.OFPAT_SET_QUEUE: 'SET_QUEUE', + ofp.OFPAT_GROUP: 'GROUP', + ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL', + ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL', + ofp.OFPAT_SET_FIELD: 'SET_FIELD', + ofp.OFPAT_PUSH_PBB: 'PUSH_PBB', + ofp.OFPAT_POP_PBB: 'POP_PBB', + ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER', + } + + stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + features = [] + for msg in msgs: + feature = msg.body + types = [] + for k, v in type_convert.items(): + if (1 << k) & feature.types: + if to_user: + types.append(v) + + else: + types.append(k) + + capabilities = [] + for k, v in cap_convert.items(): + if k & feature.capabilities: + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + + if to_user: + max_groups = [] + for k, v in type_convert.items(): + max_groups.append({v: feature.max_groups[k]}) + + else: + max_groups = feature.max_groups + + actions = [] + for k1, v1 in type_convert.items(): + acts = [] + for k2, v2 in act_convert.items(): + if (1 << k2) & feature.actions[k1]: + if to_user: + acts.append(v2) + + else: + acts.append(k2) + + if to_user: + actions.append({v1: acts}) + + else: + actions.append({k1: acts}) + + f = {'types': types, + 'capabilities': capabilities, + 'max_groups': max_groups, + 'actions': actions} + features.append(f) + + return wrap_dpid_dict(dp, features, to_user) + + +def get_group_desc(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + descs = [] + for msg in msgs: + for stats in msg.body: + d = stats.to_jsondict()[stats.__class__.__name__] + buckets = [] + for bucket in stats.buckets: + b = bucket.to_jsondict()[bucket.__class__.__name__] + actions = [] + for action in bucket.actions: + if to_user: + actions.append(action_to_str(action)) + + else: + actions.append(action) + b['actions'] = actions + buckets.append(b) + + d['buckets'] = buckets + if to_user: + d['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + t = UTIL.ofp_group_type_to_user(stats.type) + d['type'] = t if t != stats.type else 'UNKNOWN' + + descs.append(d) + + return wrap_dpid_dict(dp, descs, to_user) + + +def get_port_desc(dp, waiters, port_no=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + + stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + descs = [] + + for msg in msgs: + stats = msg.body + for stat in stats: + d = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + + if to_user: + t = UTIL.ofp_port_desc_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + + properties.append(p) + d['name'] = stat.name.decode('utf-8') + d['properties'] = properties + + if to_user: + d['port_no'] = UTIL.ofp_port_to_user(stat.port_no) + + descs.append(d) + + return wrap_dpid_dict(dp, descs, to_user) + + +def mod_flow_entry(dp, flow, cmd): + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) + idle_timeout = int(flow.get('idle_timeout', 0)) + hard_timeout = int(flow.get('hard_timeout', 0)) + priority = int(flow.get('priority', 0)) + buffer_id = UTIL.ofp_buffer_from_user( + flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + importance = int(flow.get('importance', 0)) + flags = int(flow.get('flags', 0)) + match = to_match(dp, flow.get('match', {})) + inst = to_instructions(dp, flow.get('instructions', [])) + + flow_mod = dp.ofproto_parser.OFPFlowMod( + dp, cookie, cookie_mask, table_id, cmd, idle_timeout, + hard_timeout, priority, buffer_id, out_port, out_group, + importance, flags, match, inst) + + ofctl_utils.send_msg(dp, flow_mod, LOG) + + +def mod_meter_entry(dp, meter, cmd): + flags = 0 + if 'flags' in meter: + meter_flags = meter['flags'] + if not isinstance(meter_flags, list): + meter_flags = [meter_flags] + for flag in meter_flags: + t = UTIL.ofp_meter_flags_from_user(flag) + f = t if t != flag else None + if f is None: + LOG.error('Unknown meter flag: %s', flag) + continue + flags |= f + + meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0)) + + bands = [] + for band in meter.get('bands', []): + band_type = band.get('type') + rate = int(band.get('rate', 0)) + burst_size = int(band.get('burst_size', 0)) + if band_type == 'DROP': + bands.append( + dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size)) + elif band_type == 'DSCP_REMARK': + prec_level = int(band.get('prec_level', 0)) + bands.append( + dp.ofproto_parser.OFPMeterBandDscpRemark( + rate, burst_size, prec_level)) + elif band_type == 'EXPERIMENTER': + experimenter = int(band.get('experimenter', 0)) + bands.append( + dp.ofproto_parser.OFPMeterBandExperimenter( + rate, burst_size, experimenter)) + else: + LOG.error('Unknown band type: %s', band_type) + + meter_mod = dp.ofproto_parser.OFPMeterMod( + dp, cmd, flags, meter_id, bands) + + ofctl_utils.send_msg(dp, meter_mod, LOG) + + +def mod_group_entry(dp, group, cmd): + group_type = str(group.get('type', 'ALL')) + t = UTIL.ofp_group_type_from_user(group_type) + group_type = t if t != group_type else None + if group_type is None: + LOG.error('Unknown group type: %s', group.get('type')) + + group_id = UTIL.ofp_group_from_user(group.get('group_id', 0)) + + buckets = [] + for bucket in group.get('buckets', []): + weight = int(bucket.get('weight', 0)) + watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY)) + watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY)) + actions = [] + for dic in bucket.get('actions', []): + action = to_action(dp, dic) + if action is not None: + actions.append(action) + buckets.append(dp.ofproto_parser.OFPBucket( + weight, watch_port, watch_group, actions)) + + group_mod = dp.ofproto_parser.OFPGroupMod( + dp, cmd, group_type, group_id, buckets) + + ofctl_utils.send_msg(dp, group_mod, LOG) + + +def mod_port_behavior(dp, port_config): + ofp = dp.ofproto + parser = dp.ofproto_parser + port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) + hw_addr = str(port_config.get('hw_addr')) + config = int(port_config.get('config', 0)) + mask = int(port_config.get('mask', 0)) + properties = port_config.get('properties') + + prop = [] + for p in properties: + type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type']) + length = None + if type_ == ofp.OFPPDPT_ETHERNET: + advertise = UTIL.ofp_port_features_from_user(p['advertise']) + prop.append( + parser.OFPPortModPropEthernet(type_, length, advertise)) + elif type_ == ofp.OFPPDPT_OPTICAL: + prop.append( + parser.OFPPortModPropOptical( + type_, length, p['configure'], p['freq_lmda'], + p['fl_offset'], p['grid_span'], p['tx_pwr'])) + elif type_ == ofp.OFPPDPT_EXPERIMENTER: + prop.append( + parser.OFPPortModPropExperimenter( + type_, length, p['experimenter'], p['exp_type'], + p['data'])) + else: + LOG.error('Unknown port desc prop type: %s', type_) + + port_mod = dp.ofproto_parser.OFPPortMod( + dp, port_no, hw_addr, config, mask, prop) + + ofctl_utils.send_msg(dp, port_mod, LOG) + + +# NOTE(jkoelker) Alias common funcitons +send_experimenter = ofctl_utils.send_experimenter diff --git a/ryu/lib/ofctl_v1_5.py b/ryu/lib/ofctl_v1_5.py new file mode 100644 index 00000000..52c2de88 --- /dev/null +++ b/ryu/lib/ofctl_v1_5.py @@ -0,0 +1,1090 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import logging + +from ryu.ofproto import ether +from ryu.ofproto import ofproto_v1_5 +from ryu.ofproto import ofproto_v1_5_parser +from ryu.lib import ofctl_utils + +LOG = logging.getLogger(__name__) + +DEFAULT_TIMEOUT = 1.0 + +UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_5) + + +def to_action(dp, dic): + ofp = dp.ofproto + parser = dp.ofproto_parser + action_type = dic.get('type') + return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL) + + +def _get_actions(dp, dics): + actions = [] + for d in dics: + action = to_action(dp, d) + if action is not None: + actions.append(action) + else: + LOG.error('Unknown action type: %s', d) + return actions + + +def to_instructions(dp, insts): + instructions = [] + ofp = dp.ofproto + parser = dp.ofproto_parser + + for i in insts: + inst_type = i.get('type') + if inst_type in ['APPLY_ACTIONS', 'WRITE_ACTIONS']: + dics = i.get('actions', []) + actions = _get_actions(dp, dics) + if actions: + if inst_type == 'APPLY_ACTIONS': + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, + actions)) + else: + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, + actions)) + elif inst_type == 'CLEAR_ACTIONS': + instructions.append( + parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, [])) + elif inst_type == 'GOTO_TABLE': + table_id = int(i.get('table_id')) + instructions.append(parser.OFPInstructionGotoTable(table_id)) + elif inst_type == 'WRITE_METADATA': + metadata = ofctl_utils.str_to_int(i.get('metadata')) + metadata_mask = (ofctl_utils.str_to_int(i['metadata_mask']) + if 'metadata_mask' in i + else parser.UINT64_MAX) + instructions.append( + parser.OFPInstructionWriteMetadata( + metadata, metadata_mask)) + else: + LOG.error('Unknown instruction type: %s', inst_type) + + return instructions + + +def action_to_str(act): + s = act.to_jsondict()[act.__class__.__name__] + t = UTIL.ofp_action_type_to_user(s['type']) + s['type'] = t if t != s['type'] else 'UNKNOWN' + + if t == 'SET_FIELD': + field = s.pop('field') + s['field'] = field['OXMTlv']['field'] + s['mask'] = field['OXMTlv']['mask'] + s['value'] = field['OXMTlv']['value'] + elif t == 'COPY_FIELD': + oxm_ids = s.pop('oxm_ids') + s['src_oxm_id'] = oxm_ids[0]['OFPOxmId']['type'] + s['dst_oxm_id'] = oxm_ids[1]['OFPOxmId']['type'] + + return s + + +def instructions_to_str(instructions): + + s = [] + + for i in instructions: + v = i.to_jsondict()[i.__class__.__name__] + t = UTIL.ofp_instruction_type_to_user(v['type']) + inst_type = t if t != v['type'] else 'UNKNOWN' + # apply/write/clear-action instruction + if isinstance(i, ofproto_v1_5_parser.OFPInstructionActions): + acts = [] + for a in i.actions: + acts.append(action_to_str(a)) + v['type'] = inst_type + v['actions'] = acts + s.append(v) + # others + else: + v['type'] = inst_type + s.append(v) + + return s + + +def to_match(dp, attrs): + convert = {'in_port': UTIL.ofp_port_from_user, + 'in_phy_port': int, + 'metadata': ofctl_utils.to_match_masked_int, + 'eth_dst': ofctl_utils.to_match_eth, + 'eth_src': ofctl_utils.to_match_eth, + 'eth_type': int, + 'vlan_vid': to_match_vid, + 'vlan_pcp': int, + 'ip_dscp': int, + 'ip_ecn': int, + 'ip_proto': int, + 'ipv4_src': ofctl_utils.to_match_ip, + 'ipv4_dst': ofctl_utils.to_match_ip, + 'tcp_src': int, + 'tcp_dst': int, + 'udp_src': int, + 'udp_dst': int, + 'sctp_src': int, + 'sctp_dst': int, + 'icmpv4_type': int, + 'icmpv4_code': int, + 'arp_op': int, + 'arp_spa': ofctl_utils.to_match_ip, + 'arp_tpa': ofctl_utils.to_match_ip, + 'arp_sha': ofctl_utils.to_match_eth, + 'arp_tha': ofctl_utils.to_match_eth, + 'ipv6_src': ofctl_utils.to_match_ip, + 'ipv6_dst': ofctl_utils.to_match_ip, + 'ipv6_flabel': int, + 'icmpv6_type': int, + 'icmpv6_code': int, + 'ipv6_nd_target': ofctl_utils.to_match_ip, + 'ipv6_nd_sll': ofctl_utils.to_match_eth, + 'ipv6_nd_tll': ofctl_utils.to_match_eth, + 'mpls_label': int, + 'mpls_tc': int, + 'mpls_bos': int, + 'pbb_isid': ofctl_utils.to_match_masked_int, + 'tunnel_id': ofctl_utils.to_match_masked_int, + 'ipv6_exthdr': ofctl_utils.to_match_masked_int, + 'pbb_uca': int, + 'tcp_flags': int, + 'actset_output': int, + 'packet_type': ofctl_utils.to_match_packet_type, + } + + keys = {'dl_dst': 'eth_dst', + 'dl_src': 'eth_src', + 'dl_type': 'eth_type', + 'dl_vlan': 'vlan_vid', + 'nw_src': 'ipv4_src', + 'nw_dst': 'ipv4_dst', + 'nw_proto': 'ip_proto'} + + if attrs.get('eth_type') == ether.ETH_TYPE_ARP: + if 'ipv4_src' in attrs and 'arp_spa' not in attrs: + attrs['arp_spa'] = attrs['ipv4_src'] + del attrs['ipv4_src'] + if 'ipv4_dst' in attrs and 'arp_tpa' not in attrs: + attrs['arp_tpa'] = attrs['ipv4_dst'] + del attrs['ipv4_dst'] + + kwargs = {} + for key, value in attrs.items(): + if key in keys: + # For old field name + key = keys[key] + if key in convert: + value = convert[key](value) + kwargs[key] = value + else: + LOG.error('Unknown match field: %s', key) + + return dp.ofproto_parser.OFPMatch(**kwargs) + + +def to_match_vid(value): + return ofctl_utils.to_match_vid(value, ofproto_v1_5.OFPVID_PRESENT) + + +def match_to_str(ofmatch): + match = {} + + ofmatch = ofmatch.to_jsondict()['OFPMatch'] + ofmatch = ofmatch['oxm_fields'] + + for match_field in ofmatch: + key = match_field['OXMTlv']['field'] + mask = match_field['OXMTlv']['mask'] + value = match_field['OXMTlv']['value'] + if key == 'vlan_vid': + value = ofctl_utils.match_vid_to_str(value, mask, + ofproto_v1_5.OFPVID_PRESENT) + elif key == 'in_port': + value = UTIL.ofp_port_to_user(value) + elif key == 'packet_type': + value = [value >> 16, value & 0xffff] + else: + if mask is not None: + value = str(value) + '/' + str(mask) + match.setdefault(key, value) + + return match + + +def wrap_dpid_dict(dp, value, to_user=True): + if to_user: + return {str(dp.id): value} + + return {dp.id: value} + + +def stats_to_str(ofstats): + + stats = {} + ofstats = ofstats.to_jsondict()['OFPStats'] + ofstats = ofstats['oxs_fields'] + + for s in ofstats: + key = s['OXSTlv']['field'] + if key == 'duration': + value = { + 'duration_sec': s['OXSTlv']['value'][0], + 'duration_nsec': s['OXSTlv']['value'][1], + } + elif key == 'idle_time': + value = { + 'idle_time_sec': s['OXSTlv']['value'][0], + 'idle_time_nsec': s['OXSTlv']['value'][1], + } + else: + value = s['OXSTlv']['value'] + stats.setdefault(key, value) + + return stats + + +def get_desc_stats(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + s = {} + + for msg in msgs: + stats = msg.body + s = stats.to_jsondict()[stats.__class__.__name__] + + return wrap_dpid_dict(dp, s, to_user) + + +def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + if queue_id is None: + queue_id = dp.ofproto.OFPQ_ALL + else: + queue_id = UTIL.ofp_queue_from_user(queue_id) + + stats = dp.ofproto_parser.OFPQueueStatsRequest( + dp, 0, port_no, queue_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + desc = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + if to_user: + t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type) + p['type'] = t if t != p['type'] else 'UNKNOWN' + properties.append(p) + s['properties'] = properties + desc.append(s) + + return wrap_dpid_dict(dp, desc, to_user) + + +def get_queue_desc(dp, waiters, port_no=None, queue_id=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + if queue_id is None: + queue_id = dp.ofproto.OFPQ_ALL + else: + queue_id = UTIL.ofp_queue_from_user(queue_id) + + stats = dp.ofproto_parser.OFPQueueDescStatsRequest( + dp, 0, port_no, queue_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + configs = [] + for msg in msgs: + for queue in msg.body: + q = queue.to_jsondict()[queue.__class__.__name__] + prop_list = [] + for prop in queue.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + if to_user: + t = UTIL.ofp_queue_desc_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + prop_list.append(p) + q['properties'] = prop_list + configs.append(q) + + return wrap_dpid_dict(dp, configs, to_user) + + +def get_flow_desc_stats(dp, waiters, flow=None, to_user=True): + flow = flow if flow else {} + table_id = UTIL.ofp_table_from_user( + flow.get('table_id', dp.ofproto.OFPTT_ALL)) + flags = int(flow.get('flags', 0)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + match = to_match(dp, flow.get('match', {})) + + stats = dp.ofproto_parser.OFPFlowDescStatsRequest( + dp, flags, table_id, out_port, out_group, cookie, cookie_mask, + match) + + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + flows = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + s['instructions'] = instructions_to_str(stats.instructions) + s['stats'] = stats_to_str(stats.stats) + s['match'] = match_to_str(stats.match) + flows.append(s) + + return wrap_dpid_dict(dp, flows, to_user) + + +def get_flow_stats(dp, waiters, flow=None, to_user=True): + flow = flow if flow else {} + table_id = UTIL.ofp_table_from_user( + flow.get('table_id', dp.ofproto.OFPTT_ALL)) + flags = int(flow.get('flags', 0)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + match = to_match(dp, flow.get('match', {})) + + stats = dp.ofproto_parser.OFPFlowStatsRequest( + dp, flags, table_id, out_port, out_group, cookie, cookie_mask, + match) + + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + flows = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + s['stats'] = stats_to_str(stats.stats) + s['match'] = match_to_str(stats.match) + flows.append(s) + + return wrap_dpid_dict(dp, flows, to_user) + + +def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True): + flow = flow if flow else {} + table_id = UTIL.ofp_table_from_user( + flow.get('table_id', dp.ofproto.OFPTT_ALL)) + flags = int(flow.get('flags', 0)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + match = to_match(dp, flow.get('match', {})) + + stats = dp.ofproto_parser.OFPAggregateStatsRequest( + dp, flags, table_id, out_port, out_group, cookie, cookie_mask, + match) + + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + flows = [] + for msg in msgs: + stats = msg.body + s = stats.to_jsondict()[stats.__class__.__name__] + s['stats'] = stats_to_str(stats.stats) + flows.append(s) + + return wrap_dpid_dict(dp, flows, to_user) + + +def get_table_stats(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + tables = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + tables.append(s) + + return wrap_dpid_dict(dp, tables, to_user) + + +def get_table_features(dp, waiters, to_user=True): + stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, []) + msgs = [] + ofproto = dp.ofproto + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS, + ofproto.OFPTFPT_INSTRUCTIONS_MISS] + + p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES, + ofproto.OFPTFPT_NEXT_TABLES_MISS, + ofproto.OFPTFPT_TABLE_SYNC_FROM] + + p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS, + ofproto.OFPTFPT_WRITE_ACTIONS_MISS, + ofproto.OFPTFPT_APPLY_ACTIONS, + ofproto.OFPTFPT_APPLY_ACTIONS_MISS] + + p_type_packet = ofproto.OFPTFPT_PACKET_TYPES + + p_type_oxms = [ofproto.OFPTFPT_MATCH, + ofproto.OFPTFPT_WILDCARDS, + ofproto.OFPTFPT_WRITE_SETFIELD, + ofproto.OFPTFPT_WRITE_SETFIELD_MISS, + ofproto.OFPTFPT_APPLY_SETFIELD, + ofproto.OFPTFPT_APPLY_SETFIELD_MISS, + ofproto.OFPTFPT_WRITE_COPYFIELD, + ofproto.OFPTFPT_WRITE_COPYFIELD_MISS, + ofproto.OFPTFPT_APPLY_COPYFIELD, + ofproto.OFPTFPT_APPLY_COPYFIELD_MISS] + + p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER, + ofproto.OFPTFPT_EXPERIMENTER_MISS] + + tables = [] + for msg in msgs: + stats = msg.body + for stat in stats: + s = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = {} + t = UTIL.ofp_table_feature_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + if prop.type in p_type_instructions: + instruction_ids = [] + for id in prop.instruction_ids: + i = {'len': id.len, + 'type': id.type} + instruction_ids.append(i) + p['instruction_ids'] = instruction_ids + elif prop.type in p_type_next_tables: + table_ids = [] + for id in prop.table_ids: + table_ids.append(id) + p['table_ids'] = table_ids + elif prop.type in p_type_actions: + action_ids = [] + for id in prop.action_ids: + i = id.to_jsondict()[id.__class__.__name__] + action_ids.append(i) + p['action_ids'] = action_ids + elif prop.type in p_type_oxms: + oxm_ids = [] + for id in prop.oxm_ids: + i = id.to_jsondict()[id.__class__.__name__] + oxm_ids.append(i) + p['oxm_ids'] = oxm_ids + elif prop.type == p_type_packet: + oxm_values = [] + for val in prop.oxm_values: + i = {val[0]: val[1]} + oxm_values.append(i) + p['oxm_values'] = oxm_values + elif prop.type in p_type_experimenter: + pass + properties.append(p) + s['name'] = stat.name.decode('utf-8') + s['properties'] = properties + + if to_user: + s['table_id'] = UTIL.ofp_table_to_user(stat.table_id) + + tables.append(s) + + return wrap_dpid_dict(dp, tables, to_user) + + +def get_port_stats(dp, waiters, port_no=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + + stats = dp.ofproto_parser.OFPPortStatsRequest(dp, 0, port_no) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + ports = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + properties = [] + for prop in stats.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + t = UTIL.ofp_port_stats_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + properties.append(p) + s['properties'] = properties + + if to_user: + s['port_no'] = UTIL.ofp_port_to_user(stats.port_no) + + ports.append(s) + + return wrap_dpid_dict(dp, ports, to_user) + + +def get_meter_stats(dp, waiters, meter_id=None, to_user=True): + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = UTIL.ofp_meter_from_user(meter_id) + + stats = dp.ofproto_parser.OFPMeterStatsRequest( + dp, 0, meter_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + meters = [] + for msg in msgs: + for stats in msg.body: + s = stats.to_jsondict()[stats.__class__.__name__] + bands = [] + for band in stats.band_stats: + b = band.to_jsondict()[band.__class__.__name__] + bands.append(b) + s['band_stats'] = bands + + if to_user: + s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id) + + meters.append(s) + + return wrap_dpid_dict(dp, meters, to_user) + + +def get_meter_features(dp, waiters, to_user=True): + ofp = dp.ofproto + type_convert = {ofp.OFPMBT_DROP: 'DROP', + ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'} + + capa_convert = {ofp.OFPMF_KBPS: 'KBPS', + ofp.OFPMF_PKTPS: 'PKTPS', + ofp.OFPMF_BURST: 'BURST', + ofp.OFPMF_STATS: 'STATS'} + + stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + features = [] + for msg in msgs: + for feature in msg.body: + band_types = [] + for k, v in type_convert.items(): + if (1 << k) & feature.band_types: + + if to_user: + band_types.append(v) + + else: + band_types.append(k) + + capabilities = [] + for k, v in sorted(capa_convert.items()): + if k & feature.capabilities: + + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + + f = {'max_meter': feature.max_meter, + 'band_types': band_types, + 'capabilities': capabilities, + 'max_bands': feature.max_bands, + 'max_color': feature.max_color} + features.append(f) + + return wrap_dpid_dict(dp, features, to_user) + + +def get_meter_desc(dp, waiters, meter_id=None, to_user=True): + flags = {dp.ofproto.OFPMF_KBPS: 'KBPS', + dp.ofproto.OFPMF_PKTPS: 'PKTPS', + dp.ofproto.OFPMF_BURST: 'BURST', + dp.ofproto.OFPMF_STATS: 'STATS'} + + if meter_id is None: + meter_id = dp.ofproto.OFPM_ALL + else: + meter_id = UTIL.ofp_meter_from_user(meter_id) + + stats = dp.ofproto_parser.OFPMeterDescStatsRequest( + dp, 0, meter_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + configs = [] + for msg in msgs: + for config in msg.body: + c = config.to_jsondict()[config.__class__.__name__] + bands = [] + for band in config.bands: + b = band.to_jsondict()[band.__class__.__name__] + + if to_user: + t = UTIL.ofp_meter_band_type_to_user(band.type) + b['type'] = t if t != band.type else 'UNKNOWN' + + bands.append(b) + c_flags = [] + for k, v in sorted(flags.items()): + if k & config.flags: + if to_user: + c_flags.append(v) + + else: + c_flags.append(k) + + c['flags'] = c_flags + c['bands'] = bands + + if to_user: + c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id) + + configs.append(c) + + return wrap_dpid_dict(dp, configs, to_user) + + +def get_group_stats(dp, waiters, group_id=None, to_user=True): + if group_id is None: + group_id = dp.ofproto.OFPG_ALL + else: + group_id = UTIL.ofp_group_from_user(group_id) + + stats = dp.ofproto_parser.OFPGroupStatsRequest( + dp, 0, group_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + groups = [] + for msg in msgs: + for stats in msg.body: + g = stats.to_jsondict()[stats.__class__.__name__] + bucket_stats = [] + for bucket_stat in stats.bucket_stats: + c = bucket_stat.to_jsondict()[bucket_stat.__class__.__name__] + bucket_stats.append(c) + g['bucket_stats'] = bucket_stats + + if to_user: + g['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + + groups.append(g) + + return wrap_dpid_dict(dp, groups, to_user) + + +def get_group_features(dp, waiters, to_user=True): + + ofp = dp.ofproto + type_convert = {ofp.OFPGT_ALL: 'ALL', + ofp.OFPGT_SELECT: 'SELECT', + ofp.OFPGT_INDIRECT: 'INDIRECT', + ofp.OFPGT_FF: 'FF'} + cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT', + ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS', + ofp.OFPGFC_CHAINING: 'CHAINING', + ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'} + act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT', + ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT', + ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN', + ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL', + ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL', + ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN', + ofp.OFPAT_POP_VLAN: 'POP_VLAN', + ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS', + ofp.OFPAT_POP_MPLS: 'POP_MPLS', + ofp.OFPAT_SET_QUEUE: 'SET_QUEUE', + ofp.OFPAT_GROUP: 'GROUP', + ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL', + ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL', + ofp.OFPAT_SET_FIELD: 'SET_FIELD', + ofp.OFPAT_PUSH_PBB: 'PUSH_PBB', + ofp.OFPAT_POP_PBB: 'POP_PBB', + ofp.OFPAT_COPY_FIELD: 'COPY_FIELD', + ofp.OFPAT_METER: 'METER', + ofp.OFPAT_EXPERIMENTER: 'EXPERIMENTER', + } + + stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + features = [] + for msg in msgs: + feature = msg.body + types = [] + for k, v in type_convert.items(): + if (1 << k) & feature.types: + if to_user: + types.append(v) + + else: + types.append(k) + + capabilities = [] + for k, v in cap_convert.items(): + if k & feature.capabilities: + if to_user: + capabilities.append(v) + + else: + capabilities.append(k) + + if to_user: + max_groups = [] + for k, v in type_convert.items(): + max_groups.append({v: feature.max_groups[k]}) + + else: + max_groups = feature.max_groups + + actions = [] + for k1, v1 in type_convert.items(): + acts = [] + for k2, v2 in act_convert.items(): + if (1 << k2) & feature.actions[k1]: + if to_user: + acts.append(v2) + + else: + acts.append(k2) + + if to_user: + actions.append({v1: acts}) + + else: + actions.append({k1: acts}) + + f = {'types': types, + 'capabilities': capabilities, + 'max_groups': max_groups, + 'actions': actions} + features.append(f) + + return wrap_dpid_dict(dp, features, to_user) + + +def get_group_desc(dp, waiters, group_id=None, to_user=True): + if group_id is None: + group_id = dp.ofproto.OFPG_ALL + else: + group_id = UTIL.ofp_group_from_user(group_id) + + stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0, group_id) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + descs = [] + for msg in msgs: + for stats in msg.body: + d = stats.to_jsondict()[stats.__class__.__name__] + buckets = [] + for bucket in stats.buckets: + b = bucket.to_jsondict()[bucket.__class__.__name__] + actions = [] + for action in bucket.actions: + if to_user: + actions.append(action_to_str(action)) + + else: + actions.append(action) + properties = [] + for prop in bucket.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + t = UTIL.ofp_group_bucket_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + properties.append(p) + b['actions'] = actions + b['properties'] = properties + buckets.append(b) + + d['buckets'] = buckets + if to_user: + d['group_id'] = UTIL.ofp_group_to_user(stats.group_id) + t = UTIL.ofp_group_type_to_user(stats.type) + d['type'] = t if t != stats.type else 'UNKNOWN' + + descs.append(d) + + return wrap_dpid_dict(dp, descs, to_user) + + +def get_port_desc(dp, waiters, port_no=None, to_user=True): + if port_no is None: + port_no = dp.ofproto.OFPP_ANY + else: + port_no = UTIL.ofp_port_from_user(port_no) + + stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0, port_no) + msgs = [] + ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) + + descs = [] + + for msg in msgs: + stats = msg.body + for stat in stats: + d = stat.to_jsondict()[stat.__class__.__name__] + properties = [] + for prop in stat.properties: + p = prop.to_jsondict()[prop.__class__.__name__] + + if to_user: + t = UTIL.ofp_port_desc_prop_type_to_user(prop.type) + p['type'] = t if t != prop.type else 'UNKNOWN' + + properties.append(p) + d['name'] = stat.name.decode('utf-8') + d['properties'] = properties + + if to_user: + d['port_no'] = UTIL.ofp_port_to_user(stat.port_no) + + descs.append(d) + + return wrap_dpid_dict(dp, descs, to_user) + + +def mod_flow_entry(dp, flow, cmd): + cookie = int(flow.get('cookie', 0)) + cookie_mask = int(flow.get('cookie_mask', 0)) + table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) + idle_timeout = int(flow.get('idle_timeout', 0)) + hard_timeout = int(flow.get('hard_timeout', 0)) + priority = int(flow.get('priority', 0)) + buffer_id = UTIL.ofp_buffer_from_user( + flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) + out_port = UTIL.ofp_port_from_user( + flow.get('out_port', dp.ofproto.OFPP_ANY)) + out_group = UTIL.ofp_group_from_user( + flow.get('out_group', dp.ofproto.OFPG_ANY)) + importance = int(flow.get('importance', 0)) + flags = int(flow.get('flags', 0)) + match = to_match(dp, flow.get('match', {})) + inst = to_instructions(dp, flow.get('instructions', [])) + + flow_mod = dp.ofproto_parser.OFPFlowMod( + dp, cookie, cookie_mask, table_id, cmd, idle_timeout, + hard_timeout, priority, buffer_id, out_port, out_group, + importance, flags, match, inst) + + ofctl_utils.send_msg(dp, flow_mod, LOG) + + +def mod_meter_entry(dp, meter, cmd): + flags = 0 + if 'flags' in meter: + meter_flags = meter['flags'] + if not isinstance(meter_flags, list): + meter_flags = [meter_flags] + for flag in meter_flags: + t = UTIL.ofp_meter_flags_from_user(flag) + f = t if t != flag else None + if f is None: + LOG.error('Unknown meter flag: %s', flag) + continue + flags |= f + + meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0)) + + bands = [] + for band in meter.get('bands', []): + band_type = band.get('type') + rate = int(band.get('rate', 0)) + burst_size = int(band.get('burst_size', 0)) + if band_type == 'DROP': + bands.append( + dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size)) + elif band_type == 'DSCP_REMARK': + prec_level = int(band.get('prec_level', 0)) + bands.append( + dp.ofproto_parser.OFPMeterBandDscpRemark( + rate, burst_size, prec_level)) + elif band_type == 'EXPERIMENTER': + experimenter = int(band.get('experimenter', 0)) + bands.append( + dp.ofproto_parser.OFPMeterBandExperimenter( + rate, burst_size, experimenter)) + else: + LOG.error('Unknown band type: %s', band_type) + + meter_mod = dp.ofproto_parser.OFPMeterMod( + dp, cmd, flags, meter_id, bands) + + ofctl_utils.send_msg(dp, meter_mod, LOG) + + +def mod_group_entry(dp, group, cmd): + ofp = dp.ofproto + parser = dp.ofproto_parser + + group_type = str(group.get('type', 'ALL')) + t = UTIL.ofp_group_type_from_user(group_type) + group_type = t if t != group_type else None + if group_type is None: + LOG.error('Unknown group type: %s', group.get('type')) + + group_id = UTIL.ofp_group_from_user(group.get('group_id', 0)) + command_bucket_id = int(group.get('command_bucket_id', 0)) + + # Note: + # The list of group property types that are currently defined + # are only OFPGPT_EXPERIMENTER(Experimenter defined). + properties = [] + + buckets = [] + for bucket in group.get('buckets', []): + + # get bucket_id in buckets + bucket_id = int(bucket.get('bucket_id', 0)) + + # get actions in buckets + bucket_actions = [] + for dic in bucket.get('actions', []): + action = to_action(dp, dic) + if action is not None: + bucket_actions.append(action) + + # get properties in buckets + bucket_properties = [] + for p in bucket.get('properties', []): + group_bp_type = str(p.get('type', 'WEIGHT')) + t = UTIL.ofp_group_bucket_prop_type_from_user(group_bp_type) + group_bp_type = t if t != group_bp_type else ofp.OFPGBPT_WEIGHT + + if group_bp_type == ofp.OFPGBPT_WEIGHT: + weight = int(p.get('weight', 0)) + bucket_properties.append( + parser.OFPGroupBucketPropWeight( + type_=group_bp_type, weight=weight)) + elif group_bp_type == ofp.OFPGBPT_WATCH_PORT: + watch_port = int(p.get('watch', dp.ofproto.OFPP_ANY)) + bucket_properties.append( + parser.OFPGroupBucketPropWatch( + type_=group_bp_type, watch=watch_port)) + elif group_bp_type == ofp.OFPGBPT_WATCH_GROUP: + watch_group = int(p.get('watch', dp.ofproto.OFPG_ANY)) + bucket_properties.append( + parser.OFPGroupBucketPropWatch( + type_=group_bp_type, watch=watch_group)) + elif group_bp_type == ofp.OFPGBPT_EXPERIMENTER: + experimenter = p.get('experimenter', 0) + exp_type = p.get('exp_type', 0) + data_type = p.get('data_type', 'ascii') + if data_type not in ['ascii', 'base64']: + LOG.error('Unknown data type: %s', data_type) + data = p.get('data', '') + if data_type == 'base64': + data = base64.b64decode(data) + bucket_properties.append( + parser.OFPGroupBucketPropExperimenter( + type_=group_bp_type, experimenter=experimenter, + exp_type=exp_type, data=data)) + else: + LOG.error('Unknown group bucket prop type: %s', p['type']) + + # create bucket + bucket = parser.OFPBucket(bucket_id=bucket_id, + actions=bucket_actions, + properties=bucket_properties) + buckets.append(bucket) + + group_mod = parser.OFPGroupMod(dp, cmd, group_type, group_id, + command_bucket_id, buckets, + properties) + + ofctl_utils.send_msg(dp, group_mod, LOG) + + +def mod_port_behavior(dp, port_config): + ofp = dp.ofproto + parser = dp.ofproto_parser + port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) + hw_addr = str(port_config.get('hw_addr')) + config = int(port_config.get('config', 0)) + mask = int(port_config.get('mask', 0)) + properties = port_config.get('properties') + + prop = [] + for p in properties: + type_ = UTIL.ofp_port_mod_prop_type_from_user(p['type']) + length = None + if type_ == ofp.OFPPDPT_ETHERNET: + advertise = UTIL.ofp_port_features_from_user(p['advertise']) + prop.append( + parser.OFPPortModPropEthernet(type_, length, advertise)) + elif type_ == ofp.OFPPDPT_OPTICAL: + prop.append( + parser.OFPPortModPropOptical( + type_, length, p['configure'], p['freq_lmda'], + p['fl_offset'], p['grid_span'], p['tx_pwr'])) + elif type_ == ofp.OFPPDPT_EXPERIMENTER: + prop.append( + parser.OFPPortModPropExperimenter( + type_, length, p['experimenter'], p['exp_type'], + p['data'])) + else: + LOG.error('Unknown port desc prop type: %s', type_) + + port_mod = dp.ofproto_parser.OFPPortMod( + dp, port_no, hw_addr, config, mask, prop) + + ofctl_utils.send_msg(dp, port_mod, LOG) + + +# NOTE(jkoelker) Alias common funcitons +send_experimenter = ofctl_utils.send_experimenter diff --git a/ryu/lib/ovs/vsctl.py b/ryu/lib/ovs/vsctl.py index 6881b78e..b3457743 100644 --- a/ryu/lib/ovs/vsctl.py +++ b/ryu/lib/ovs/vsctl.py @@ -325,7 +325,8 @@ class VSCtlContext(object): for ovsrec_bridge in ovsrec_bridges.rows.values(): name = ovsrec_bridge.name if name in bridges: - LOG.warn('%s: database contains duplicate bridge name', name) + LOG.warning('%s: database contains duplicate bridge name', + name) bridges.add(name) vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name, None, 0) @@ -356,10 +357,10 @@ class VSCtlContext(object): vsctl_port = self.ports.get(port_name) if vsctl_port: if ovsrec_port == vsctl_port.port_cfg: - LOG.warn('%s: vsctl_port is in multiple bridges ' - '(%s and %s)', - port_name, vsctl_bridge.name, - vsctl_port.br.name) + LOG.warning('%s: vsctl_port is in multiple bridges ' + '(%s and %s)', + port_name, vsctl_bridge.name, + vsctl_port.br.name) else: LOG.error('%s: database contains duplicate ' 'vsctl_port name', @@ -378,7 +379,7 @@ class VSCtlContext(object): iface = self.ifaces.get(ovsrec_iface.name) if iface: if ovsrec_iface == iface.iface_cfg: - LOG.warn( + LOG.warning( '%s: interface is in multiple ports ' '(%s and %s)', ovsrec_iface.name, diff --git a/ryu/lib/packet/bgp.py b/ryu/lib/packet/bgp.py index 27277b83..7eae7127 100644 --- a/ryu/lib/packet/bgp.py +++ b/ryu/lib/packet/bgp.py @@ -23,18 +23,13 @@ RFC 4271 BGP-4 # - RFC 4364 BGP/MPLS IP Virtual Private Networks (VPNs) import abc -import six -import struct import copy -import netaddr +import functools import numbers +import socket +import struct -try: - # Python 3 - from functools import reduce -except ImportError: - # Python 2 - pass +import six from ryu.lib.stringify import StringifyMixin from ryu.lib.packet import afi as addr_family @@ -44,6 +39,8 @@ from ryu.lib.packet import stream_parser from ryu.lib import addrconv from ryu.lib.pack_utils import msg_pack_into +reduce = six.moves.reduce + BGP_MSG_OPEN = 1 BGP_MSG_UPDATE = 2 BGP_MSG_NOTIFICATION = 3 @@ -162,14 +159,14 @@ class _Value(object): _VALUE_FIELDS = ['value'] @staticmethod - def do_init(cls, self, kwargs, **extra_kwargs): + def do_init(cls_type, self, kwargs, **extra_kwargs): ourfields = {} - for f in cls._VALUE_FIELDS: + for f in cls_type._VALUE_FIELDS: v = kwargs[f] del kwargs[f] ourfields[f] = v kwargs.update(extra_kwargs) - super(cls, self).__init__(**kwargs) + super(cls_type, self).__init__(**kwargs) self.__dict__.update(ourfields) @classmethod @@ -236,6 +233,7 @@ class BgpExc(Exception): """Flag if set indicates Notification message should be sent to peer.""" def __init__(self, data=''): + super(BgpExc, self).__init__() self.data = data def __str__(self): @@ -260,6 +258,7 @@ class BadLen(BgpExc): SUB_CODE = BGP_ERROR_SUB_BAD_MESSAGE_LENGTH def __init__(self, msg_type_code, message_length): + super(BadLen, self).__init__() self.msg_type_code = msg_type_code self.length = message_length self.data = struct.pack('!H', self.length) @@ -279,6 +278,7 @@ class BadMsg(BgpExc): SUB_CODE = BGP_ERROR_SUB_BAD_MESSAGE_TYPE def __init__(self, msg_type): + super(BadMsg, self).__init__() self.msg_type = msg_type self.data = struct.pack('B', msg_type) @@ -317,6 +317,7 @@ class UnsupportedVersion(BgpExc): SUB_CODE = BGP_ERROR_SUB_UNSUPPORTED_VERSION_NUMBER def __init__(self, locally_support_version): + super(UnsupportedVersion, self).__init__() self.data = struct.pack('H', locally_support_version) @@ -403,6 +404,7 @@ class MissingWellKnown(BgpExc): SUB_CODE = BGP_ERROR_SUB_MISSING_WELL_KNOWN_ATTRIBUTE def __init__(self, pattr_type_code): + super(MissingWellKnown, self).__init__() self.pattr_type_code = pattr_type_code self.data = struct.pack('B', pattr_type_code) @@ -571,13 +573,20 @@ class OutOfResource(BgpExc): SUB_CODE = BGP_ERROR_SUB_OUT_OF_RESOURCES +@functools.total_ordering class RouteFamily(StringifyMixin): def __init__(self, afi, safi): self.afi = afi self.safi = safi - def __cmp__(self, other): - return cmp((other.afi, other.safi), (self.afi, self.safi)) + def __lt__(self, other): + return (self.afi, self.safi) < (other.afi, other.safi) + + def __eq__(self, other): + return (self.afi, self.safi) == (other.afi, other.safi) + + def __hash__(self): + return hash((self.afi, self.safi)) # Route Family Singleton RF_IPv4_UC = RouteFamily(addr_family.IP, subaddr_family.UNICAST) @@ -587,7 +596,7 @@ RF_IPv6_VPN = RouteFamily(addr_family.IP6, subaddr_family.MPLS_VPN) RF_IPv4_MPLS = RouteFamily(addr_family.IP, subaddr_family.MPLS_LABEL) RF_IPv6_MPLS = RouteFamily(addr_family.IP6, subaddr_family.MPLS_LABEL) RF_RTC_UC = RouteFamily(addr_family.IP, - subaddr_family.ROUTE_TARGET_CONSTRTAINS) + subaddr_family.ROUTE_TARGET_CONSTRAINTS) _rf_map = { (addr_family.IP, subaddr_family.UNICAST): RF_IPv4_UC, @@ -596,7 +605,7 @@ _rf_map = { (addr_family.IP6, subaddr_family.MPLS_VPN): RF_IPv6_VPN, (addr_family.IP, subaddr_family.MPLS_LABEL): RF_IPv4_MPLS, (addr_family.IP6, subaddr_family.MPLS_LABEL): RF_IPv6_MPLS, - (addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRTAINS): RF_RTC_UC + (addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRAINTS): RF_RTC_UC } @@ -604,9 +613,9 @@ def get_rf(afi, safi): return _rf_map[(afi, safi)] -def pad(bin, len_): - assert len(bin) <= len_ - return bin + b'\0' * (len_ - len(bin)) +def pad(binary, len_): + assert len(binary) <= len_ + return binary + b'\0' * (len_ - len(binary)) class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value): @@ -615,7 +624,9 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value): IPV4_ADDRESS = 1 FOUR_OCTET_AS = 2 - def __init__(self, type_, admin=0, assigned=0): + def __init__(self, admin=0, assigned=0, type_=None): + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) self.type = type_ self.admin = admin self.assigned = assigned @@ -626,7 +637,7 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value): (type_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf)) rest = buf[struct.calcsize(cls._PACK_STR):] subcls = cls._lookup_type(type_) - return subcls(type_=type_, **subcls.parse_value(rest)) + return subcls(**subcls.parse_value(rest)) @classmethod def from_str(cls, str_): @@ -642,7 +653,7 @@ class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value): type_ = cls.TWO_OCTET_AS first = int(first) subcls = cls._lookup_type(type_) - return subcls(type_=type_, admin=first, assigned=int(second)) + return subcls(admin=first, assigned=int(second)) def serialize(self): value = self.serialize_value() @@ -660,8 +671,9 @@ class BGPTwoOctetAsRD(_RouteDistinguisher): _VALUE_PACK_STR = '!HI' _VALUE_FIELDS = ['admin', 'assigned'] - def __init__(self, type_=_RouteDistinguisher.TWO_OCTET_AS, **kwargs): - self.do_init(BGPTwoOctetAsRD, self, kwargs, type_=type_) + def __init__(self, **kwargs): + super(BGPTwoOctetAsRD, self).__init__() + self.do_init(BGPTwoOctetAsRD, self, kwargs) @_RouteDistinguisher.register_type(_RouteDistinguisher.IPV4_ADDRESS) @@ -674,8 +686,9 @@ class BGPIPv4AddressRD(_RouteDistinguisher): ] } - def __init__(self, type_=_RouteDistinguisher.IPV4_ADDRESS, **kwargs): - self.do_init(BGPIPv4AddressRD, self, kwargs, type_=type_) + def __init__(self, **kwargs): + super(BGPIPv4AddressRD, self).__init__() + self.do_init(BGPIPv4AddressRD, self, kwargs) @classmethod def parse_value(cls, buf): @@ -700,16 +713,16 @@ class BGPFourOctetAsRD(_RouteDistinguisher): _VALUE_PACK_STR = '!IH' _VALUE_FIELDS = ['admin', 'assigned'] - def __init__(self, type_=_RouteDistinguisher.FOUR_OCTET_AS, - **kwargs): - self.do_init(BGPFourOctetAsRD, self, kwargs, type_=type_) + def __init__(self, **kwargs): + super(BGPFourOctetAsRD, self).__init__() + self.do_init(BGPFourOctetAsRD, self, kwargs) @six.add_metaclass(abc.ABCMeta) class _AddrPrefix(StringifyMixin): _PACK_STR = '!B' # length - def __init__(self, length, addr, prefixes=None): + def __init__(self, length, addr, prefixes=None, **kwargs): # length is on-wire bit length of prefixes+addr. assert prefixes != () if isinstance(addr, tuple): @@ -721,14 +734,14 @@ class _AddrPrefix(StringifyMixin): addr = prefixes + (addr,) self.addr = addr - @staticmethod + @classmethod @abc.abstractmethod - def _to_bin(addr): + def _to_bin(cls, addr): pass - @staticmethod + @classmethod @abc.abstractmethod - def _from_bin(addr): + def _from_bin(cls, addr): pass @classmethod @@ -761,12 +774,12 @@ class _AddrPrefix(StringifyMixin): class _BinAddrPrefix(_AddrPrefix): - @staticmethod - def _to_bin(addr): + @classmethod + def _to_bin(cls, addr): return addr - @staticmethod - def _from_bin(addr): + @classmethod + def _from_bin(cls, addr): return addr @@ -808,9 +821,10 @@ class _LabelledAddrPrefix(_AddrPrefix): return buf @classmethod - def _label_from_bin(cls, bin): - (b1, b2, b3) = struct.unpack_from(cls._LABEL_PACK_STR, six.binary_type(bin)) - rest = bin[struct.calcsize(cls._LABEL_PACK_STR):] + def _label_from_bin(cls, label): + (b1, b2, b3) = struct.unpack_from(cls._LABEL_PACK_STR, + six.binary_type(label)) + rest = label[struct.calcsize(cls._LABEL_PACK_STR):] return (b1 << 16) | (b2 << 8) | b3, rest @classmethod @@ -820,7 +834,7 @@ class _LabelledAddrPrefix(_AddrPrefix): labels = [x << 4 for x in labels] if labels and labels[-1] != cls._WITHDRAW_LABEL: labels[-1] |= 1 # bottom of stack - bin_labels = list(map(cls._label_to_bin, labels)) + bin_labels = list(cls._label_to_bin(l) for l in labels) return bytes(reduce(lambda x, y: x + y, bin_labels, bytearray()) + cls._prefix_to_bin(rest)) @@ -876,7 +890,7 @@ class _IPAddrPrefix(_AddrPrefix): @staticmethod def _prefix_from_bin(addr): - return (addrconv.ipv4.bin_to_text(pad(addr, 4)),) + return addrconv.ipv4.bin_to_text(pad(addr, 4)), class _IP6AddrPrefix(_AddrPrefix): @@ -887,7 +901,7 @@ class _IP6AddrPrefix(_AddrPrefix): @staticmethod def _prefix_from_bin(addr): - return (addrconv.ipv6.bin_to_text(pad(addr, 16)),) + return addrconv.ipv6.bin_to_text(pad(addr, 16)), class _VPNAddrPrefix(_AddrPrefix): @@ -1014,6 +1028,7 @@ class LabelledVPNIP6AddrPrefix(_LabelledAddrPrefix, _VPNAddrPrefix, return "%s:%s" % (self.route_dist, self.prefix) +@functools.total_ordering class RouteTargetMembershipNLRI(StringifyMixin): """Route Target Membership NLRI. @@ -1083,11 +1098,16 @@ class RouteTargetMembershipNLRI(StringifyMixin): return True return False - def __cmp__(self, other): - return cmp( - (self._origin_as, self._route_target), - (other.origin_as, other.route_target), - ) + def __lt__(self, other): + return ((self.origin_as, self.route_target) < + (other.origin_as, other.route_target)) + + def __eq__(self, other): + return ((self.origin_as, self.route_target) == + (other.origin_as, other.route_target)) + + def __hash__(self): + return hash((self.origin_as, self.route_target)) @classmethod def parser(cls, buf): @@ -1102,7 +1122,7 @@ class RouteTargetMembershipNLRI(StringifyMixin): return cls(origin_as, route_target) def serialize(self): - rt_nlri = '' + rt_nlri = b'' if not self.is_default_rtnlri(): rt_nlri += struct.pack('!I', self.origin_as) # Encode route target @@ -1111,7 +1131,10 @@ class RouteTargetMembershipNLRI(StringifyMixin): # RT Nlri is 12 octets return struct.pack('B', (8 * 12)) + rt_nlri -_addr_class_key = lambda x: (x.afi, x.safi) + +def _addr_class_key(route_family): + return route_family.afi, route_family.safi + _ADDR_CLASSES = { _addr_class_key(RF_IPv4_UC): IPAddrPrefix, @@ -1144,13 +1167,14 @@ class _OptParam(StringifyMixin, _TypeDisp, _Value): @classmethod def parser(cls, buf): - (type_, length) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf)) + (type_, length) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf)) rest = buf[struct.calcsize(cls._PACK_STR):] value = bytes(rest[:length]) rest = rest[length:] subcls = cls._lookup_type(type_) caps = subcls.parse_value(value) - if type(caps) != list: + if not isinstance(caps, list): caps = [subcls(type_=type_, length=length, **caps[0])] return caps, rest @@ -1267,7 +1291,8 @@ class BGPOptParamCapabilityGracefulRestart(_OptParamCapability): @classmethod def parse_cap_value(cls, buf): - (restart, ) = struct.unpack_from(cls._CAP_PACK_STR, six.binary_type(buf)) + (restart, ) = struct.unpack_from(cls._CAP_PACK_STR, + six.binary_type(buf)) buf = buf[2:] l = [] while len(buf) >= 4: @@ -1278,8 +1303,6 @@ class BGPOptParamCapabilityGracefulRestart(_OptParamCapability): def serialize_cap_value(self): buf = bytearray() msg_pack_into(self._CAP_PACK_STR, buf, 0, self.flags << 12 | self.time) - tuples = self.tuples - i = 0 offset = 2 for i in self.tuples: afi, safi, flags = i @@ -1298,7 +1321,8 @@ class BGPOptParamCapabilityFourOctetAsNumber(_OptParamCapability): @classmethod def parse_cap_value(cls, buf): - (as_number, ) = struct.unpack_from(cls._CAP_PACK_STR, six.binary_type(buf)) + (as_number, ) = struct.unpack_from(cls._CAP_PACK_STR, + six.binary_type(buf)) return {'as_number': as_number} def serialize_cap_value(self): @@ -1363,7 +1387,8 @@ class _PathAttribute(StringifyMixin, _TypeDisp, _Value): @classmethod def parser(cls, buf): - (flags, type_) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf)) + (flags, type_) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf)) rest = buf[struct.calcsize(cls._PACK_STR):] if (flags & BGP_ATTR_FLAG_EXTENDED_LENGTH) != 0: len_pack_str = cls._PACK_STR_EXT_LEN @@ -1506,7 +1531,7 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute): six.binary_type(buf)) buf = buf[struct.calcsize(cls._SEG_HDR_PACK_STR):] l = [] - for i in range(0, num_as): + for _ in range(0, num_as): (as_number,) = struct.unpack_from(as_pack_str, six.binary_type(buf)) buf = buf[struct.calcsize(as_pack_str):] @@ -1516,7 +1541,8 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute): elif type_ == cls._AS_SEQUENCE: result.append(l) else: - assert(0) # protocol error + # protocol error + raise struct.error('Unsupported segment type: %s' % type_) return { 'value': result, 'as_pack_str': as_pack_str, @@ -1530,6 +1556,10 @@ class _BGPPathAttributeAsPathCommon(_PathAttribute): type_ = self._AS_SET elif isinstance(e, list): type_ = self._AS_SEQUENCE + else: + raise struct.error( + 'Element of %s.value must be of type set or list' % + self.__class__.__name__) l = list(e) num_as = len(l) if num_as == 0: @@ -1578,7 +1608,8 @@ class BGPPathAttributeNextHop(_PathAttribute): @classmethod def parse_value(cls, buf): - (ip_addr,) = struct.unpack_from(cls._VALUE_PACK_STR, six.binary_type(buf)) + (ip_addr,) = struct.unpack_from(cls._VALUE_PACK_STR, + six.binary_type(buf)) return { 'value': addrconv.ipv4.bin_to_text(ip_addr), } @@ -1887,7 +1918,9 @@ class _ExtendedCommunity(StringifyMixin, _TypeDisp, _Value): FOUR_OCTET_AS_SPECIFIC = 0x02 OPAQUE = 0x03 - def __init__(self, type_): + def __init__(self, type_=None): + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) self.type = type_ @classmethod @@ -1912,10 +1945,9 @@ class BGPTwoOctetAsSpecificExtendedCommunity(_ExtendedCommunity): _VALUE_PACK_STR = '!BHI' # sub type, as number, local adm _VALUE_FIELDS = ['subtype', 'as_number', 'local_administrator'] - def __init__(self, type_=_ExtendedCommunity.TWO_OCTET_AS_SPECIFIC, - **kwargs): - self.do_init(BGPTwoOctetAsSpecificExtendedCommunity, self, kwargs, - type_=type_) + def __init__(self, **kwargs): + super(BGPTwoOctetAsSpecificExtendedCommunity, self).__init__() + self.do_init(BGPTwoOctetAsSpecificExtendedCommunity, self, kwargs) @_ExtendedCommunity.register_type(_ExtendedCommunity.IPV4_ADDRESS_SPECIFIC) @@ -1928,10 +1960,9 @@ class BGPIPv4AddressSpecificExtendedCommunity(_ExtendedCommunity): ] } - def __init__(self, type_=_ExtendedCommunity.IPV4_ADDRESS_SPECIFIC, - **kwargs): - self.do_init(BGPIPv4AddressSpecificExtendedCommunity, self, kwargs, - type_=type_) + def __init__(self, **kwargs): + super(BGPIPv4AddressSpecificExtendedCommunity, self).__init__() + self.do_init(BGPIPv4AddressSpecificExtendedCommunity, self, kwargs) @classmethod def parse_value(cls, buf): @@ -1957,10 +1988,9 @@ class BGPFourOctetAsSpecificExtendedCommunity(_ExtendedCommunity): _VALUE_PACK_STR = '!BIH' # sub type, as number, local adm _VALUE_FIELDS = ['subtype', 'as_number', 'local_administrator'] - def __init__(self, type_=_ExtendedCommunity.FOUR_OCTET_AS_SPECIFIC, - **kwargs): - self.do_init(BGPFourOctetAsSpecificExtendedCommunity, self, kwargs, - type_=type_) + def __init__(self, **kwargs): + super(BGPFourOctetAsSpecificExtendedCommunity, self).__init__() + self.do_init(BGPFourOctetAsSpecificExtendedCommunity, self, kwargs) @_ExtendedCommunity.register_type(_ExtendedCommunity.OPAQUE) @@ -1968,18 +1998,18 @@ class BGPOpaqueExtendedCommunity(_ExtendedCommunity): _VALUE_PACK_STR = '!7s' # opaque value _VALUE_FIELDS = ['opaque'] - def __init__(self, type_=_ExtendedCommunity.OPAQUE, - **kwargs): - self.do_init(BGPOpaqueExtendedCommunity, self, kwargs, - type_=type_) + def __init__(self, **kwargs): + super(BGPOpaqueExtendedCommunity, self).__init__() + self.do_init(BGPOpaqueExtendedCommunity, self, kwargs) @_ExtendedCommunity.register_unknown_type() class BGPUnknownExtendedCommunity(_ExtendedCommunity): _VALUE_PACK_STR = '!7s' # opaque value - def __init__(self, **kwargs): - self.do_init(BGPUnknownExtendedCommunity, self, kwargs) + def __init__(self, type_, **kwargs): + super(BGPUnknownExtendedCommunity, self).__init__(type_=type_) + self.do_init(BGPUnknownExtendedCommunity, self, kwargs, type_=type_) @_PathAttribute.register_type(BGP_ATTR_TYPE_MP_REACH_NLRI) @@ -2116,7 +2146,8 @@ class BGPPathAttributeMpUnreachNLRI(_PathAttribute): @classmethod def parse_value(cls, buf): - (afi, safi,) = struct.unpack_from(cls._VALUE_PACK_STR, six.binary_type(buf)) + (afi, safi,) = struct.unpack_from(cls._VALUE_PACK_STR, + six.binary_type(buf)) binnlri = buf[struct.calcsize(cls._VALUE_PACK_STR):] addr_cls = _get_addr_class(afi, safi) nlri = [] @@ -2160,7 +2191,7 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp): ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. one of BGP\_MSG\_ constants. + type Type field. one of ``BGP_MSG_*`` constants. ========================== =============================================== """ @@ -2168,12 +2199,15 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp): _HDR_LEN = struct.calcsize(_HDR_PACK_STR) _class_prefixes = ['BGP'] - def __init__(self, type_, len_=None, marker=None): + def __init__(self, marker=None, len_=None, type_=None): + super(BGPMessage, self).__init__() if marker is None: self._marker = _MARKER else: self._marker = marker self.len = len_ + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) self.type = type_ @classmethod @@ -2193,7 +2227,7 @@ class BGPMessage(packet_base.PacketBase, _TypeDisp): kwargs = subcls.parser(binmsg) return subcls(marker=marker, len_=len_, type_=type_, **kwargs), rest - def serialize(self): + def serialize(self, payload=None, prev=None): # fixup self._marker = _MARKER tail = self.serialize_tail() @@ -2260,9 +2294,12 @@ class BGPOpen(BGPMessage): @classmethod def parser(cls, buf): - (version, my_as, hold_time, - bgp_identifier, opt_param_len) = struct.unpack_from(cls._PACK_STR, - six.binary_type(buf)) + (version, + my_as, + hold_time, + bgp_identifier, + opt_param_len) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf)) rest = buf[struct.calcsize(cls._PACK_STR):] binopts = rest[:opt_param_len] opt_param = [] diff --git a/ryu/lib/packet/ospf.py b/ryu/lib/packet/ospf.py index 98c5d71c..464f089b 100644 --- a/ryu/lib/packet/ospf.py +++ b/ryu/lib/packet/ospf.py @@ -682,7 +682,7 @@ class OSPFMessage(packet_base.PacketBase, _TypeDisp): rest = buf[length:] subcls = cls._lookup_type(type_) kwargs = subcls.parser(binmsg) - return subcls(length, router_id, area_id, au_type, authentication, + return subcls(length, router_id, area_id, au_type, int(authentication), checksum, version, **kwargs), None, rest @classmethod diff --git a/ryu/lib/packet/safi.py b/ryu/lib/packet/safi.py index fc3e0acd..17ca138c 100644 --- a/ryu/lib/packet/safi.py +++ b/ryu/lib/packet/safi.py @@ -23,4 +23,4 @@ UNICAST = 1 MULTICAST = 2 MPLS_LABEL = 4 # RFC 3107 MPLS_VPN = 128 # RFC 4364 -ROUTE_TARGET_CONSTRTAINS = 132 # RFC 4684 +ROUTE_TARGET_CONSTRAINTS = 132 # RFC 4684 diff --git a/ryu/lib/packet/tcp.py b/ryu/lib/packet/tcp.py index 7b5a7c7b..1771b9a1 100644 --- a/ryu/lib/packet/tcp.py +++ b/ryu/lib/packet/tcp.py @@ -35,6 +35,16 @@ TCP_OPTION_KIND_TIMESTAMPS = 8 # Timestamps TCP_OPTION_KIND_USER_TIMEOUT = 28 # User Timeout Option TCP_OPTION_KIND_AUTHENTICATION = 29 # TCP Authentication Option (TCP-AO) +TCP_FIN = 0x001 +TCP_SYN = 0x002 +TCP_RST = 0x004 +TCP_PSH = 0x008 +TCP_ACK = 0x010 +TCP_URG = 0x020 +TCP_ECE = 0x040 +TCP_CWR = 0x080 +TCP_NS = 0x100 + class tcp(packet_base.PacketBase): """TCP (RFC 793) header encoder/decoder class. @@ -83,6 +93,21 @@ class tcp(packet_base.PacketBase): def __len__(self): return self.offset * 4 + def has_flags(self, *flags): + """Check if flags are set on this packet. + + returns boolean if all passed flags is set + + Example:: + + >>> pkt = tcp.tcp(bits=(tcp.TCP_SYN | tcp.TCP_ACK)) + >>> pkt.has_flags(tcp.TCP_SYN, tcp.TCP_ACK) + True + """ + + mask = sum(flags) + return (self.bits & mask) == mask + @classmethod def parser(cls, buf): (src_port, dst_port, seq, ack, offset, bits, window_size, diff --git a/ryu/lib/packet/udp.py b/ryu/lib/packet/udp.py index bae6d735..b67bd1a4 100644 --- a/ryu/lib/packet/udp.py +++ b/ryu/lib/packet/udp.py @@ -18,6 +18,7 @@ import struct from . import packet_base from . import packet_utils from . import dhcp +from . import vxlan class udp(packet_base.PacketBase): @@ -49,10 +50,14 @@ class udp(packet_base.PacketBase): self.total_length = total_length self.csum = csum - @classmethod - def get_packet_type(cls, src_port, dst_port): - if (src_port == 68 and dst_port == 67) or (src_port == 67 and dst_port == 68): + @staticmethod + def get_packet_type(src_port, dst_port): + if ((src_port == 68 and dst_port == 67) or + (src_port == 67 and dst_port == 68)): return dhcp.dhcp + if (dst_port == vxlan.UDP_DST_PORT or + dst_port == vxlan.UDP_DST_PORT_OLD): + return vxlan.vxlan return None @classmethod diff --git a/ryu/lib/packet/vxlan.py b/ryu/lib/packet/vxlan.py new file mode 100644 index 00000000..d68b9b62 --- /dev/null +++ b/ryu/lib/packet/vxlan.py @@ -0,0 +1,90 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +VXLAN packet parser/serializer + +RFC 7348 +VXLAN Header: ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +|R|R|R|R|I|R|R|R| Reserved | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| VXLAN Network Identifier (VNI) | Reserved | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +- Flags (8 bits): where the I flag MUST be set to 1 for a valid + VXLAN Network ID (VNI). The other 7 bits (designated "R") are + reserved fields and MUST be set to zero on transmission and + ignored on receipt. + +- VXLAN Segment ID/VXLAN Network Identifier (VNI): this is a + 24-bit value used to designate the individual VXLAN overlay + network on which the communicating VMs are situated. VMs in + different VXLAN overlay networks cannot communicate with each + other. + +- Reserved fields (24 bits and 8 bits): MUST be set to zero on + transmission and ignored on receipt. +""" + +import struct +import logging + +from . import packet_base + + +LOG = logging.getLogger(__name__) + +UDP_DST_PORT = 4789 +UDP_DST_PORT_OLD = 8472 # for backward compatibility like Linux + + +class vxlan(packet_base.PacketBase): + """VXLAN (RFC 7348) header encoder/decoder class. + + An instance has the following attributes at least. + Most of them are same to the on-wire counterparts but in host byte order. + __init__ takes the corresponding args in this order. + + ============== ==================== + Attribute Description + ============== ==================== + vni VXLAN Network Identifier + ============== ==================== + """ + + # Note: Python has no format character for 24 bits field. + # we use uint32 format character instead and bit-shift at serializing. + _PACK_STR = '!II' + _MIN_LEN = struct.calcsize(_PACK_STR) + + def __init__(self, vni): + super(vxlan, self).__init__() + self.vni = vni + + @classmethod + def parser(cls, buf): + (flags_reserved, vni_rserved) = struct.unpack_from(cls._PACK_STR, buf) + + # Check VXLAN flags is valid + assert (1 << 3) == (flags_reserved >> 24) + + # Note: To avoid cyclic import, import ethernet module here + from ryu.lib.packet import ethernet + return cls(vni_rserved >> 8), ethernet.ethernet, buf[cls._MIN_LEN:] + + def serialize(self, payload, prev): + return struct.pack(self._PACK_STR, + 1 << (3 + 24), self.vni << 8) diff --git a/ryu/lib/pcaplib.py b/ryu/lib/pcaplib.py index 03e02027..8b7400bf 100644 --- a/ryu/lib/pcaplib.py +++ b/ryu/lib/pcaplib.py @@ -1,3 +1,18 @@ +# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """ Parsing libpcap and reading/writing PCAP file. Reference source: http://wiki.wireshark.org/Development/LibpcapFileFormat @@ -18,55 +33,10 @@ Reference source: http://wiki.wireshark.org/Development/LibpcapFileFormat +---------------------+ | Packet Data | +---------------------+ - | ... - +---------------- ... - - -Sample usage of dump packets: - - from ryu.lib import pcaplib - - class SimpleSwitch13(app_manager.RyuApp): - OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] - - def __init__(self, *args, **kwargs): - super(SimpleSwitch13, self).__init__(*args, **kwargs) - self.mac_to_port = {} - - # Creating an instance with a PCAP filename - self.pcap_pen = Writer(open('mypcap.pcap', 'wb')) - - @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) - def _packet_in_handler(self, ev): - msg = ev.msg - - # Dump the data packet into PCAP file - self.pcap_pen.write_pkt(msg.data) - - pkt = packet.Packet(msg.data) - -Sample usage of reading PCAP files: - - from ryu.lib import pcaplib - from ryu.lib.packet import packet - - frame_count = 0 - # Using the Reader iterator that yields packets in PCAP file - for ts, buf in pcaplib.Reader(open('test.pcap', 'rb')): - frame_count += 1 - pkt = packet.Packet(buf) - - eth = pkt.get_protocols(ethernet.ethernet)[0] - - dst = eth.dst - src = eth.src - # print frames count, timestamp, ethernet src, ethernet dst - # and raw packet. - print frame_count, ts, dst, src, pkt - + | ... | + +---------------------+ """ -import six import struct import sys import time @@ -103,43 +73,56 @@ class PcapFileHdr(object): +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ File Format """ - _FILE_HDR_FMT = None + _FILE_HDR_FMT = '4sHHIIII' + _FILE_HDR_FMT_BIG_ENDIAN = '>' + _FILE_HDR_FMT + _FILE_HDR_FMT_LITTLE_ENDIAN = '<' + _FILE_HDR_FMT + FILE_HDR_SIZE = struct.calcsize(_FILE_HDR_FMT) - def __init__(self, magic=b'\xd4\xc3\xb2\xa1', version_major=2, + # Magic Number field is used to detect the file format itself and + # the byte ordering. + MAGIC_NUMBER_IDENTICAL = b'\xa1\xb2\xc3\xd4' # Big Endian + MAGIC_NUMBER_SWAPPED = b'\xd4\xc3\xb2\xa1' # Little Endian + + def __init__(self, magic=MAGIC_NUMBER_SWAPPED, version_major=2, version_minor=4, thiszone=0, sigfigs=0, snaplen=0, - linktype=0): + network=0): self.magic = magic self.version_major = version_major self.version_minor = version_minor self.thiszone = thiszone self.sigfigs = sigfigs self.snaplen = snaplen - self.linktype = linktype + self.network = network @classmethod def parser(cls, buf): - if buf[:4] == b'\xa1\xb2\xc3\xd4': + magic_buf = buf[:4] + if magic_buf == cls.MAGIC_NUMBER_IDENTICAL: # Big Endian - cls._FILE_HDR_FMT = '>IHHIIII' - byteorder = '>' - elif buf[:4] == b'\xd4\xc3\xb2\xa1': + fmt = cls._FILE_HDR_FMT_BIG_ENDIAN + byteorder = 'big' + elif magic_buf == cls.MAGIC_NUMBER_SWAPPED: # Little Endian - cls._FILE_HDR_FMT = 'IHHIIII') - self._f.write(str(p)) + def _write_pcap_file_hdr(self): + pcap_file_hdr = PcapFileHdr(snaplen=self.snaplen, + network=self.network) + self._f.write(pcap_file_hdr.serialize()) - def _write_pkt_hdr(self, ts, buf_str_len): + def _write_pkt_hdr(self, ts, buf_len): sec = int(ts) - if sec == 0: - usec = 0 - else: - usec = int(ts * 1e6) % int(ts) + usec = int(round(ts % 1, 6) * 1e6) if sec != 0 else 0 - if sys.byteorder == 'little': - # usec = int(ts * 1e6) % int(ts) - # old_usec = int((float(ts) - int(ts)) * 1e6) - pc_pkt_hdr = PcapPktHdr(ts_sec=sec, - ts_usec=usec, - incl_len=buf_str_len, - orig_len=buf_str_len) - p = pc_pkt_hdr.serialize(fmt='IIII') - self._f.write(str(p)) + pc_pkt_hdr = PcapPktHdr(ts_sec=sec, ts_usec=usec, + incl_len=buf_len, orig_len=buf_len) + + self._f.write(pc_pkt_hdr.serialize()) def write_pkt(self, buf, ts=None): - if ts is None: - ts = time.time() + ts = time.time() if ts is None else ts - buf_str = six.binary_type(buf) - buf_str_len = len(buf_str) - self._write_pkt_hdr(ts, buf_str_len) - self._f.write(buf_str) + # Check the max length of captured packets + buf_len = len(buf) + if buf_len > self.snaplen: + buf_len = self.snaplen + buf = buf[:self.snaplen] + + self._write_pkt_hdr(ts, buf_len) + + self._f.write(buf) def __del__(self): self._f.close() diff --git a/ryu/ofproto/nicira_ext.py b/ryu/ofproto/nicira_ext.py index 74616b54..aae3383e 100644 --- a/ryu/ofproto/nicira_ext.py +++ b/ryu/ofproto/nicira_ext.py @@ -17,9 +17,11 @@ # Nicira extensions # Many of these definitions are common among OpenFlow versions. +import sys from struct import calcsize - +from ryu.lib import type_desc from ryu.ofproto.ofproto_common import OFP_HEADER_SIZE +from ryu.ofproto import oxm_fields # Action subtypes NXAST_RESUBMIT = 1 @@ -84,6 +86,7 @@ assert calcsize(NX_ACTION_NOTE_PACK_STR) == NX_ACTION_NOTE_SIZE NX_ACTION_BUNDLE_PACK_STR = '!HHIHHHHIHHI4x' NX_ACTION_BUNDLE_SIZE = 32 +NX_ACTION_BUNDLE_0_SIZE = 24 assert calcsize(NX_ACTION_BUNDLE_PACK_STR) == NX_ACTION_BUNDLE_SIZE NX_ACTION_AUTOPATH_PACK_STR = '!HHIHHII4x' @@ -108,6 +111,7 @@ assert calcsize(NX_ACTION_FIN_TIMEOUT_PACK_STR) == NX_ACTION_FIN_TIMEOUT_SIZE NX_ACTION_HEADER_PACK_STR = '!HHIH6x' NX_ACTION_HEADER_SIZE = 16 +NX_ACTION_HEADER_0_SIZE = 2 assert calcsize(NX_ACTION_HEADER_PACK_STR) == NX_ACTION_HEADER_SIZE # Messages @@ -250,3 +254,206 @@ NX_NAT_RANGE_IPV6_MIN = 1 << 2 NX_NAT_RANGE_IPV6_MAX = 1 << 3 NX_NAT_RANGE_PROTO_MIN = 1 << 4 NX_NAT_RANGE_PROTO_MAX = 1 << 5 + + +def nxm_header__(vendor, field, hasmask, length): + return (vendor << 16) | (field << 9) | (hasmask << 8) | length + + +def nxm_header(vendor, field, length): + return nxm_header__(vendor, field, 0, length) + + +def nxm_header_w(vendor, field, length): + return nxm_header__(vendor, field, 1, (length) * 2) + + +NXM_OF_IN_PORT = nxm_header(0x0000, 0, 2) + +NXM_OF_ETH_DST = nxm_header(0x0000, 1, 6) +NXM_OF_ETH_DST_W = nxm_header_w(0x0000, 1, 6) +NXM_OF_ETH_SRC = nxm_header(0x0000, 2, 6) +NXM_OF_ETH_SRC_W = nxm_header_w(0x0000, 2, 6) +NXM_OF_ETH_TYPE = nxm_header(0x0000, 3, 2) + +NXM_OF_VLAN_TCI = nxm_header(0x0000, 4, 2) +NXM_OF_VLAN_TCI_W = nxm_header_w(0x0000, 4, 2) + +NXM_OF_IP_TOS = nxm_header(0x0000, 5, 1) + +NXM_OF_IP_PROTO = nxm_header(0x0000, 6, 1) + +NXM_OF_IP_SRC = nxm_header(0x0000, 7, 4) +NXM_OF_IP_SRC_W = nxm_header_w(0x0000, 7, 4) +NXM_OF_IP_DST = nxm_header(0x0000, 8, 4) +NXM_OF_IP_DST_W = nxm_header_w(0x0000, 8, 4) + +NXM_OF_TCP_SRC = nxm_header(0x0000, 9, 2) +NXM_OF_TCP_SRC_W = nxm_header_w(0x0000, 9, 2) +NXM_OF_TCP_DST = nxm_header(0x0000, 10, 2) +NXM_OF_TCP_DST_W = nxm_header_w(0x0000, 10, 2) + +NXM_OF_UDP_SRC = nxm_header(0x0000, 11, 2) +NXM_OF_UDP_SRC_W = nxm_header_w(0x0000, 11, 2) +NXM_OF_UDP_DST = nxm_header(0x0000, 12, 2) +NXM_OF_UDP_DST_W = nxm_header_w(0x0000, 12, 2) + +NXM_OF_ICMP_TYPE = nxm_header(0x0000, 13, 1) +NXM_OF_ICMP_CODE = nxm_header(0x0000, 14, 1) + +NXM_OF_ARP_OP = nxm_header(0x0000, 15, 2) + +NXM_OF_ARP_SPA = nxm_header(0x0000, 16, 4) +NXM_OF_ARP_SPA_W = nxm_header_w(0x0000, 16, 4) +NXM_OF_ARP_TPA = nxm_header(0x0000, 17, 4) +NXM_OF_ARP_TPA_W = nxm_header_w(0x0000, 17, 4) + +NXM_NX_TUN_ID = nxm_header(0x0001, 16, 8) +NXM_NX_TUN_ID_W = nxm_header_w(0x0001, 16, 8) +NXM_NX_TUN_IPV4_SRC = nxm_header(0x0001, 31, 4) +NXM_NX_TUN_IPV4_SRC_W = nxm_header_w(0x0001, 31, 4) +NXM_NX_TUN_IPV4_DST = nxm_header(0x0001, 32, 4) +NXM_NX_TUN_IPV4_DST_W = nxm_header_w(0x0001, 32, 4) + +NXM_NX_ARP_SHA = nxm_header(0x0001, 17, 6) +NXM_NX_ARP_THA = nxm_header(0x0001, 18, 6) + +NXM_NX_IPV6_SRC = nxm_header(0x0001, 19, 16) +NXM_NX_IPV6_SRC_W = nxm_header_w(0x0001, 19, 16) +NXM_NX_IPV6_DST = nxm_header(0x0001, 20, 16) +NXM_NX_IPV6_DST_W = nxm_header_w(0x0001, 20, 16) + +NXM_NX_ICMPV6_TYPE = nxm_header(0x0001, 21, 1) +NXM_NX_ICMPV6_CODE = nxm_header(0x0001, 22, 1) + +NXM_NX_ND_TARGET = nxm_header(0x0001, 23, 16) +NXM_NX_ND_TARGET_W = nxm_header_w(0x0001, 23, 16) + +NXM_NX_ND_SLL = nxm_header(0x0001, 24, 6) + +NXM_NX_ND_TLL = nxm_header(0x0001, 25, 6) + +NXM_NX_IP_FRAG = nxm_header(0x0001, 26, 1) +NXM_NX_IP_FRAG_W = nxm_header_w(0x0001, 26, 1) + +NXM_NX_IPV6_LABEL = nxm_header(0x0001, 27, 4) + +NXM_NX_IP_ECN = nxm_header(0x0001, 28, 1) + +NXM_NX_IP_TTL = nxm_header(0x0001, 29, 1) + +NXM_NX_PKT_MARK = nxm_header(0x0001, 33, 4) +NXM_NX_PKT_MARK_W = nxm_header_w(0x0001, 33, 4) + +NXM_NX_TCP_FLAGS = nxm_header(0x0001, 34, 2) +NXM_NX_TCP_FLAGS_W = nxm_header_w(0x0001, 34, 2) + + +def nxm_nx_reg(idx): + return nxm_header(0x0001, idx, 4) + + +def nxm_nx_reg_w(idx): + return nxm_header_w(0x0001, idx, 4) + +NXM_HEADER_PACK_STRING = '!I' + +# +# The followings are implementations for OpenFlow 1.2+ +# + +sys.modules[__name__].__doc__ = """ +The API of this class is the same as ``OFPMatch``. + +You can define the flow match by the keyword arguments. +The following arguments are available. + +================ =============== ============================================== +Argument Value Description +================ =============== ============================================== +eth_dst_nxm MAC address Ethernet destination address. +eth_src_nxm MAC address Ethernet source address. +eth_type_nxm Integer 16bit Ethernet type. Needed to support Nicira + extensions that require the eth_type to + be set. (i.e. tcp_flags_nxm) +ip_proto_nxm Integer 8bit IP protocol. Needed to support Nicira + extensions that require the ip_proto to + be set. (i.e. tcp_flags_nxm) +tunnel_id_nxm Integer 64bit Tunnel identifier. +tun_ipv4_src IPv4 address Tunnel IPv4 source address. +tun_ipv4_dst IPv4 address Tunnel IPv4 destination address. +pkt_mark Integer 32bit Packet metadata mark. +tcp_flags_nxm Integer 16bit TCP Flags. Requires setting fields: + eth_type_nxm = [0x0800 (IP)|0x86dd (IPv6)] and + ip_proto_nxm = 6 (TCP) +conj_id Integer 32bit Conjunction ID used only with + the conjunction action +ct_state Integer 32bit Conntrack state. +ct_zone Integer 16bit Conntrack zone. +ct_mark Integer 32bit Conntrack mark. +ct_label Integer 128bit Conntrack label. +tun_ipv6_src IPv6 address Tunnel IPv6 source address. +tun_ipv6_dst IPv6 address Tunnel IPv6 destination address. +_dp_hash Integer 32bit Flow hash computed in Datapath. +reg Integer 32bit Packet register. + is register number 0-7. +================ =============== ============================================== + +.. Note:: + + Setting the TCP flags via the nicira extensions. + This is required when using OVS version < 2.4. + When using the nxm fields, you need to use any nxm prereq + fields as well or you will receive a OFPBMC_BAD_PREREQ error + + Example:: + + # WILL NOT work + flag = tcp.TCP_ACK + match = parser.OFPMatch( + tcp_flags_nxm=(flag, flag), + ip_proto=inet.IPPROTO_TCP, + eth_type=eth_type) + + # Works + flag = tcp.TCP_ACK + match = parser.OFPMatch( + tcp_flags_nxm=(flag, flag), + ip_proto_nxm=inet.IPPROTO_TCP, + eth_type_nxm=eth_type) +""" + +oxm_types = [ + oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr), + oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr), + oxm_fields.NiciraExtended0('eth_type_nxm', 3, type_desc.Int2), + oxm_fields.NiciraExtended0('ip_proto_nxm', 6, type_desc.Int1), + oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8), + oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr), + oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr), + oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4), + oxm_fields.NiciraExtended1('tcp_flags_nxm', 34, type_desc.Int2), + oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4), + oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4), + oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2), + oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4), + oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16), + oxm_fields.NiciraExtended1('tun_ipv6_src', 109, type_desc.IPv6Addr), + oxm_fields.NiciraExtended1('tun_ipv6_dst', 110, type_desc.IPv6Addr), + + # The following definition is merely for testing 64-bit experimenter OXMs. + # Following Open vSwitch, we use dp_hash for this purpose. + # Prefix the name with '_' to indicate this is not intended to be used + # in wild. + oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4), + + # Support for matching/setting NX registers 0-7 + oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4), + oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4), + oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4), + oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4), + oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4), + oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4), + oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4), + oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4), +] diff --git a/ryu/ofproto/nx_actions.py b/ryu/ofproto/nx_actions.py index f729bdf9..14bc4796 100644 --- a/ryu/ofproto/nx_actions.py +++ b/ryu/ofproto/nx_actions.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + import struct from ryu import utils @@ -26,8 +28,6 @@ from ryu.ofproto.ofproto_parser import StringifyMixin def generate(ofp_name, ofpp_name): import sys - import string - import functools ofp = sys.modules[ofp_name] ofpp = sys.modules[ofpp_name] @@ -154,7 +154,7 @@ def generate(ofp_name, ofpp_name): _experimenter = ofproto_common.NX_EXPERIMENTER_ID def __init__(self): - super(NXAction, self).__init__(experimenter=self._experimenter) + super(NXAction, self).__init__(self._experimenter) self.subtype = self._subtype @classmethod @@ -165,14 +165,21 @@ def generate(ofp_name, ofpp_name): rest = buf[struct.calcsize(fmt_str):] if subtype_cls is None: return NXActionUnknown(subtype, rest) - return subtype_cls.parse(rest) + return subtype_cls.parser(rest) def serialize(self, buf, offset): + data = self.serialize_body() + payload_offset = ( + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + + struct.calcsize(NXAction._fmt_str) + ) + self.len = utils.round_up(payload_offset + len(data), 8) super(NXAction, self).serialize(buf, offset) msg_pack_into(NXAction._fmt_str, buf, offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE, self.subtype) + buf += data @classmethod def register(cls, subtype_cls): @@ -187,21 +194,135 @@ def generate(ofp_name, ofpp_name): self.data = data @classmethod - def parse(cls, subtype, buf): + def parser(cls, buf): return cls(data=buf) - def serialize(self, buf, offset): + def serialize_body(self): # fixup - data = self.data - if data is None: - data = bytearray() - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionUnknown, self).serialize(buf, offset) - buf += data + return bytearray() if self.data is None else self.data + + class NXActionPopQueue(NXAction): + _subtype = nicira_ext.NXAST_POP_QUEUE + + _fmt_str = '!6x' + + def __init__(self, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionPopQueue, self).__init__() + + @classmethod + def parser(cls, buf): + return cls() + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0) + return data + + class NXActionRegLoad(NXAction): + _subtype = nicira_ext.NXAST_REG_LOAD + _fmt_str = '!HIQ' # ofs_nbits, dst, value + _TYPE = { + 'ascii': [ + 'dst', + ] + } + + def __init__(self, start, end, dst, value, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionRegLoad, self).__init__() + self.start = start + self.end = end + self.dst = dst + self.value = value + + @classmethod + def parser(cls, buf): + (ofs_nbits, dst, value,) = struct.unpack_from( + cls._fmt_str, buf, 0) + start = ofs_nbits >> 6 + end = (ofs_nbits & 0x3f) + start + # Right-shift instead of using oxm_parse_header for simplicity... + dst_name = ofp.oxm_to_user_header(dst >> 9) + return cls(start, end, dst_name, value) + + def serialize_body(self): + hdr_data = bytearray() + n = ofp.oxm_from_user_header(self.dst) + ofp.oxm_serialize_header(n, hdr_data, 0) + (dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0) + + ofs_nbits = (self.start << 6) + (self.end - self.start) + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + ofs_nbits, dst_num, self.value) + return data + + class NXActionNote(NXAction): + _subtype = nicira_ext.NXAST_NOTE + + # note + _fmt_str = '!%dB' + + # set the integer array in a note + def __init__(self, + note, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionNote, self).__init__() + self.note = note + + @classmethod + def parser(cls, buf): + note = struct.unpack_from( + cls._fmt_str % len(buf), buf, 0) + return cls(list(note)) + + def serialize_body(self): + assert isinstance(self.note, (tuple, list)) + for n in self.note: + assert isinstance(n, six.integer_types) + + pad = (len(self.note) + nicira_ext.NX_ACTION_HEADER_0_SIZE) % 8 + if pad: + self.note += [0x0 for i in range(8 - pad)] + note_len = len(self.note) + data = bytearray() + msg_pack_into(self._fmt_str % note_len, data, 0, + *self.note) + return data + + class _NXActionSetTunnelBase(NXAction): + # _subtype, _fmt_str must be attributes of subclass. + + def __init__(self, + tun_id, + type_=None, len_=None, experimenter=None, subtype=None): + super(_NXActionSetTunnelBase, self).__init__() + self.tun_id = tun_id + + @classmethod + def parser(cls, buf): + (tun_id,) = struct.unpack_from( + cls._fmt_str, buf, 0) + return cls(tun_id) + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + self.tun_id) + return data + + class NXActionSetTunnel(_NXActionSetTunnelBase): + _subtype = nicira_ext.NXAST_SET_TUNNEL + + # tun_id + _fmt_str = '!2xI' + + class NXActionSetTunnel64(_NXActionSetTunnelBase): + _subtype = nicira_ext.NXAST_SET_TUNNEL64 + + # tun_id + _fmt_str = '!6xQ' class NXActionRegMove(NXAction): _subtype = nicira_ext.NXAST_REG_MOVE @@ -224,9 +345,9 @@ def generate(ofp_name, ofpp_name): self.dst_field = dst_field @classmethod - def parse(cls, buf): + def parser(cls, buf): (n_bits, src_ofs, dst_ofs,) = struct.unpack_from( - NXActionRegMove._fmt_str, buf, 0) + cls._fmt_str, buf, 0) rest = buf[struct.calcsize(NXActionRegMove._fmt_str):] # src field (n, len) = ofp.oxm_parse_header(rest, 0) @@ -240,10 +361,10 @@ def generate(ofp_name, ofpp_name): return cls(src_field, dst_field=dst_field, n_bits=n_bits, src_ofs=src_ofs, dst_ofs=dst_ofs) - def serialize(self, buf, offset): + def serialize_body(self): # fixup data = bytearray() - msg_pack_into(NXActionRegMove._fmt_str, data, 0, + msg_pack_into(self._fmt_str, data, 0, self.n_bits, self.src_ofs, self.dst_ofs) # src field n = ofp.oxm_from_user_header(self.src_field) @@ -251,14 +372,98 @@ def generate(ofp_name, ofpp_name): # dst field n = ofp.oxm_from_user_header(self.dst_field) ofp.oxm_serialize_header(n, data, len(data)) - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionRegMove, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + return data + + class NXActionResubmit(NXAction): + _subtype = nicira_ext.NXAST_RESUBMIT + + # in_port + _fmt_str = '!H4x' + + def __init__(self, + in_port=0xfff8, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionResubmit, self).__init__() + self.in_port = in_port + + @classmethod + def parser(cls, buf): + (in_port,) = struct.unpack_from( + cls._fmt_str, buf, 0) + return cls(in_port) + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + self.in_port) + return data + + class NXActionResubmitTable(NXAction): + _subtype = nicira_ext.NXAST_RESUBMIT_TABLE + + # in_port, table_id + _fmt_str = '!HB3x' + + def __init__(self, + in_port=0xfff8, + table_id=0xff, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionResubmitTable, self).__init__() + self.in_port = in_port + self.table_id = table_id + + @classmethod + def parser(cls, buf): + (in_port, + table_id) = struct.unpack_from( + cls._fmt_str, buf, 0) + return cls(in_port, table_id) + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + self.in_port, self.table_id) + return data + + class NXActionOutputReg(NXAction): + _subtype = nicira_ext.NXAST_OUTPUT_REG + + # ofs_nbits, src, max_len + _fmt_str = '!HIH6x' + + def __init__(self, + start, + end, + src, + max_len, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionOutputReg, self).__init__() + self.start = start + self.end = end + self.src = src + self.max_len = max_len + + @classmethod + def parser(cls, buf): + (ofs_nbits, + src, + max_len) = struct.unpack_from( + cls._fmt_str, buf, 0) + start = ofs_nbits >> 6 + end = (ofs_nbits & 0x3f) + start + return cls(start, + end, + src, + max_len) + + def serialize_body(self): + data = bytearray() + ofs_nbits = (self.start << 6) + (self.end - self.start) + msg_pack_into(self._fmt_str, data, 0, + ofs_nbits, + self.src, + self.max_len) + return data class NXActionLearn(NXAction): _subtype = nicira_ext.NXAST_LEARN @@ -291,7 +496,7 @@ def generate(ofp_name, ofpp_name): self.specs = specs @classmethod - def parse(cls, buf): + def parser(cls, buf): (idle_timeout, hard_timeout, priority, @@ -300,8 +505,8 @@ def generate(ofp_name, ofpp_name): table_id, fin_idle_timeout, fin_hard_timeout,) = struct.unpack_from( - NXActionLearn._fmt_str, buf, 0) - rest = buf[struct.calcsize(NXActionLearn._fmt_str):] + cls._fmt_str, buf, 0) + rest = buf[struct.calcsize(cls._fmt_str):] # specs specs = [] while len(rest) > 0: @@ -319,10 +524,10 @@ def generate(ofp_name, ofpp_name): fin_hard_timeout=fin_hard_timeout, specs=specs) - def serialize(self, buf, offset): + def serialize_body(self): # fixup data = bytearray() - msg_pack_into(NXActionLearn._fmt_str, data, 0, + msg_pack_into(self._fmt_str, data, 0, self.idle_timeout, self.hard_timeout, self.priority, @@ -333,14 +538,88 @@ def generate(ofp_name, ofpp_name): self.fin_hard_timeout) for spec in self.specs: data += spec.serialize() - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionLearn, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + return data + + class NXActionExit(NXAction): + _subtype = nicira_ext.NXAST_EXIT + + _fmt_str = '!6x' + + def __init__(self, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionExit, self).__init__() + + @classmethod + def parser(cls, buf): + return cls() + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0) + return data + + class NXActionController(NXAction): + _subtype = nicira_ext.NXAST_CONTROLLER + + # max_len, controller_id, reason + _fmt_str = '!HHBx' + + def __init__(self, + max_len, + controller_id, + reason, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionController, self).__init__() + self.max_len = max_len + self.controller_id = controller_id + self.reason = reason + + @classmethod + def parser(cls, buf): + (max_len, + controller_id, + reason) = struct.unpack_from( + cls._fmt_str, buf) + return cls(max_len, + controller_id, + reason) + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + self.max_len, + self.controller_id, + self.reason) + return data + + class NXActionFinTimeout(NXAction): + _subtype = nicira_ext.NXAST_FIN_TIMEOUT + + # fin_idle_timeout, fin_hard_timeout + _fmt_str = '!HH2x' + + def __init__(self, + fin_idle_timeout, + fin_hard_timeout, + type_=None, len_=None, experimenter=None, subtype=None): + super(NXActionFinTimeout, self).__init__() + self.fin_idle_timeout = fin_idle_timeout + self.fin_hard_timeout = fin_hard_timeout + + @classmethod + def parser(cls, buf): + (fin_idle_timeout, + fin_hard_timeout) = struct.unpack_from( + cls._fmt_str, buf, 0) + return cls(fin_idle_timeout, + fin_hard_timeout) + + def serialize_body(self): + data = bytearray() + msg_pack_into(self._fmt_str, data, 0, + self.fin_idle_timeout, + self.fin_hard_timeout) + return data class NXActionConjunction(NXAction): _subtype = nicira_ext.NXAST_CONJUNCTION @@ -359,67 +638,172 @@ def generate(ofp_name, ofpp_name): self.id = id_ @classmethod - def parse(cls, buf): + def parser(cls, buf): (clause, n_clauses, id_,) = struct.unpack_from( - NXActionConjunction._fmt_str, buf, 0) + cls._fmt_str, buf, 0) return cls(clause, n_clauses, id_) - def serialize(self, buf, offset): + def serialize_body(self): data = bytearray() - msg_pack_into(NXActionConjunction._fmt_str, data, 0, + msg_pack_into(self._fmt_str, data, 0, self.clause, self.n_clauses, self.id) - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionConjunction, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + return data - class NXActionResubmitTable(NXAction): - _subtype = nicira_ext.NXAST_RESUBMIT_TABLE + class NXActionMultipath(NXAction): + _subtype = nicira_ext.NXAST_MULTIPATH - # in_port, table_id - _fmt_str = '!HB3x' + # fields, basis, algorithm, max_link, + # arg, ofs_nbits, dst + _fmt_str = '!HH2xHHI2xHI' def __init__(self, - in_port, - table_id, + fields, + basis, + algorithm, + max_link, + arg, + start, + end, + dst, type_=None, len_=None, experimenter=None, subtype=None): - super(NXActionResubmitTable, self).__init__() - self.in_port = in_port - self.table_id = table_id + super(NXActionMultipath, self).__init__() + self.fields = fields + self.basis = basis + self.algorithm = algorithm + self.max_link = max_link + self.arg = arg + self.start = start + self.end = end + self.dst = dst @classmethod - def parse(cls, buf): - (in_port, - table_id) = struct.unpack_from( - NXActionResubmitTable._fmt_str, buf, 0) - return cls(in_port, table_id) + def parser(cls, buf): + (fields, + basis, + algorithm, + max_link, + arg, + ofs_nbits, + dst) = struct.unpack_from( + cls._fmt_str, buf, 0) + start = ofs_nbits >> 6 + end = (ofs_nbits & 0x3f) + start + return cls(fields, + basis, + algorithm, + max_link, + arg, + start, + end, + dst) - def serialize(self, buf, offset): + def serialize_body(self): + ofs_nbits = (self.start << 6) + (self.end - self.start) data = bytearray() - msg_pack_into(NXActionResubmitTable._fmt_str, data, 0, - self.in_port, - self.table_id) - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionResubmitTable, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + msg_pack_into(self._fmt_str, data, 0, + self.fields, + self.basis, + self.algorithm, + self.max_link, + self.arg, + ofs_nbits, + self.dst) + return data + + class _NXActionBundleBase(NXAction): + # algorithm, fields, basis, slave_type, n_slaves + # ofs_nbits, dst, slaves + _fmt_str = '!HHHIHHI4x' + + def __init__(self, algorithm, fields, basis, slave_type, n_slaves, + start, end, dst, slaves): + super(_NXActionBundleBase, self).__init__() + self.len = utils.round_up( + nicira_ext.NX_ACTION_BUNDLE_0_SIZE + len(slaves) * 2, 8) + + self.algorithm = algorithm + self.fields = fields + self.basis = basis + self.slave_type = slave_type + self.n_slaves = n_slaves + self.start = start + self.end = end + self.dst = dst + + assert isinstance(slaves, (list, tuple)) + for s in slaves: + assert isinstance(s, six.integer_types) + + self.slaves = slaves + + @classmethod + def parser(cls, buf): + (algorithm, fields, basis, + slave_type, n_slaves, ofs_nbits, dst) = struct.unpack_from( + cls._fmt_str, buf, 0) + start = ofs_nbits >> 6 + end = (ofs_nbits & 0x3f) + start + slave_offset = (nicira_ext.NX_ACTION_BUNDLE_0_SIZE - + nicira_ext.NX_ACTION_HEADER_0_SIZE) + + slaves = [] + for i in range(0, n_slaves): + s = struct.unpack_from('!H', buf, slave_offset) + slaves.append(s[0]) + slave_offset += 2 + + return cls(algorithm, fields, basis, slave_type, + n_slaves, start, end, dst, slaves) + + def serialize_body(self): + ofs_nbits = (self.start << 6) + (self.end - self.start) + data = bytearray() + slave_offset = (nicira_ext.NX_ACTION_BUNDLE_0_SIZE - + nicira_ext.NX_ACTION_HEADER_0_SIZE) + self.n_slaves = len(self.slaves) + for s in self.slaves: + msg_pack_into('!H', data, slave_offset, s) + slave_offset += 2 + pad_len = (utils.round_up(self.n_slaves, 4) - + self.n_slaves) + + if pad_len != 0: + msg_pack_into('%dx' % pad_len * 2, data, slave_offset) + + msg_pack_into(self._fmt_str, data, 0, + self.algorithm, self.fields, self.basis, + self.slave_type, self.n_slaves, + ofs_nbits, self.dst) + + return data + + class NXActionBundle(_NXActionBundleBase): + _subtype = nicira_ext.NXAST_BUNDLE + + def __init__(self, algorithm, fields, basis, slave_type, n_slaves, + start, end, dst, slaves): + # NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. + super(NXActionBundle, self).__init__( + algorithm, fields, basis, slave_type, n_slaves, + start=0, end=0, dst=0, slaves=slaves) + + class NXActionBundleLoad(_NXActionBundleBase): + _subtype = nicira_ext.NXAST_BUNDLE_LOAD + + def __init__(self, algorithm, fields, basis, slave_type, n_slaves, + start, end, dst, slaves): + super(NXActionBundleLoad, self).__init__( + algorithm, fields, basis, slave_type, n_slaves, + start, end, dst, slaves) class NXActionCT(NXAction): _subtype = nicira_ext.NXAST_CT - # flags, zone_src, zone_ofs_nbits (zone_imm), recirc_table, + # flags, zone_src, zone_ofs_nbits, recirc_table, # pad, alg _fmt_str = '!HIHB3xH' # Followed by actions @@ -427,7 +811,8 @@ def generate(ofp_name, ofpp_name): def __init__(self, flags, zone_src, - zone_ofs_nbits, # is zone_imm if zone_src == 0 + zone_start, + zone_end, recirc_table, alg, actions, @@ -435,20 +820,23 @@ def generate(ofp_name, ofpp_name): super(NXActionCT, self).__init__() self.flags = flags self.zone_src = zone_src - self.zone_ofs_nbits = zone_ofs_nbits + self.zone_start = zone_start + self.zone_end = zone_end self.recirc_table = recirc_table self.alg = alg self.actions = actions @classmethod - def parse(cls, buf): + def parser(cls, buf): (flags, zone_src, zone_ofs_nbits, recirc_table, alg,) = struct.unpack_from( - NXActionCT._fmt_str, buf, 0) - rest = buf[struct.calcsize(NXActionCT._fmt_str):] + cls._fmt_str, buf, 0) + zone_start = zone_ofs_nbits >> 6 + zone_end = (zone_ofs_nbits & 0x3f) + zone_start + rest = buf[struct.calcsize(cls._fmt_str):] # actions actions = [] while len(rest) > 0: @@ -456,27 +844,22 @@ def generate(ofp_name, ofpp_name): actions.append(action) rest = rest[action.len:] - return cls(flags, zone_src, zone_ofs_nbits, recirc_table, + return cls(flags, zone_src, zone_start, zone_end, recirc_table, alg, actions) - def serialize(self, buf, offset): + def serialize_body(self): + zone_ofs_nbits = ((self.zone_start << 6) + + (self.zone_end - self.zone_start)) data = bytearray() - msg_pack_into(NXActionCT._fmt_str, data, 0, + msg_pack_into(self._fmt_str, data, 0, self.flags, self.zone_src, - self.zone_ofs_nbits, + zone_ofs_nbits, self.recirc_table, self.alg) for a in self.actions: a.serialize(data, len(data)) - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionCT, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + return data class NXActionNAT(NXAction): _subtype = nicira_ext.NXAST_NAT @@ -513,11 +896,11 @@ def generate(ofp_name, ofpp_name): self.range_proto_max = range_proto_max @classmethod - def parse(cls, buf): + def parser(cls, buf): (flags, range_present) = struct.unpack_from( - NXActionNAT._fmt_str, buf, 0) - rest = buf[struct.calcsize(NXActionNAT._fmt_str):] + cls._fmt_str, buf, 0) + rest = buf[struct.calcsize(cls._fmt_str):] # optional parameters kwargs = dict() if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN: @@ -534,15 +917,15 @@ def generate(ofp_name, ofpp_name): kwargs['range_ipv6_max'] = ( type_desc.IPv6Addr.to_user(rest[:16])) rest = rest[16:] - if range_present & NX_NAT_RANGE_PROTO_MIN: + if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN: kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2]) rest = rest[2:] - if range_present & NX_NAT_RANGE_PROTO_MAX: + if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX: kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2]) return cls(flags, **kwargs) - def serialize(self, buf, offset): + def serialize_body(self): # Pack optional parameters first, as range_present needs # to be calculated. optional_data = b'' @@ -573,20 +956,13 @@ def generate(ofp_name, ofpp_name): self.range_proto_max) data = bytearray() - msg_pack_into(NXActionNAT._fmt_str, data, 0, + msg_pack_into(self._fmt_str, data, 0, self.flags, range_present) msg_pack_into('!%ds' % len(optional_data), data, len(data), optional_data) - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXActionNAT, self).serialize(buf, offset) - msg_pack_into('!%ds' % len(data), buf, offset + payload_offset, - bytes(data)) + return data def add_attr(k, v): v.__module__ = ofpp.__name__ # Necessary for stringify stuff @@ -596,10 +972,23 @@ def generate(ofp_name, ofpp_name): add_attr('NXActionUnknown', NXActionUnknown) classes = [ + 'NXActionPopQueue', + 'NXActionRegLoad', + 'NXActionNote', + 'NXActionSetTunnel', + 'NXActionSetTunnel64', 'NXActionRegMove', - 'NXActionLearn', - 'NXActionConjunction', + 'NXActionResubmit', 'NXActionResubmitTable', + 'NXActionOutputReg', + 'NXActionLearn', + 'NXActionExit', + 'NXActionController', + 'NXActionFinTimeout', + 'NXActionConjunction', + 'NXActionMultipath', + 'NXActionBundle', + 'NXActionBundleLoad', 'NXActionCT', 'NXActionNAT', '_NXFlowSpec', # exported for testing diff --git a/ryu/ofproto/nx_match.py b/ryu/ofproto/nx_match.py index 281fdc22..77803ccf 100644 --- a/ryu/ofproto/nx_match.py +++ b/ryu/ofproto/nx_match.py @@ -16,16 +16,15 @@ # limitations under the License. import struct -import sys from ryu import exception from ryu.lib import mac -from ryu.lib import type_desc from ryu.lib.pack_utils import msg_pack_into +from ryu.ofproto import ether from ryu.ofproto import ofproto_parser from ryu.ofproto import ofproto_v1_0 from ryu.ofproto import inet -from ryu.ofproto import oxm_fields + import logging LOG = logging.getLogger('ryu.ofproto.nx_match') @@ -94,6 +93,7 @@ class Flow(ofproto_parser.StringifyMixin): self.regs = [0] * FLOW_N_REGS self.ipv6_label = 0 self.pkt_mark = 0 + self.tcp_flags = 0 class FlowWildcards(ofproto_parser.StringifyMixin): @@ -116,6 +116,7 @@ class FlowWildcards(ofproto_parser.StringifyMixin): self.regs_mask = [0] * FLOW_N_REGS self.wildcards = ofproto_v1_0.OFPFW_ALL self.pkt_mark_mask = 0 + self.tcp_flags_mask = 0 class ClsRule(ofproto_parser.StringifyMixin): @@ -312,6 +313,10 @@ class ClsRule(ofproto_parser.StringifyMixin): self.flow.pkt_mark = pkt_mark self.wc.pkt_mark_mask = mask + def set_tcp_flags(self, tcp_flags, mask): + self.flow.tcp_flags = tcp_flags + self.wc.tcp_flags_mask = mask + def flow_format(self): # Tunnel ID is only supported by NXM if self.wc.tun_id_mask != 0: @@ -332,6 +337,9 @@ class ClsRule(ofproto_parser.StringifyMixin): if self.wc.regs_bits > 0: return ofproto_v1_0.NXFF_NXM + if self.flow.tcp_flags > 0: + return ofproto_v1_0.NXFF_NXM + return ofproto_v1_0.NXFF_OPENFLOW10 def match_tuple(self): @@ -948,6 +956,19 @@ class MFPktMark(MFField): rule.wc.pkt_mark_mask) +@_register_make +@_set_nxm_headers([ofproto_v1_0.NXM_NX_TCP_FLAGS, + ofproto_v1_0.NXM_NX_TCP_FLAGS_W]) +class MFTcpFlags(MFField): + @classmethod + def make(cls, header): + return cls(header, MF_PACK_STRING_BE16) + + def put(self, buf, offset, rule): + return self.putm(buf, offset, rule.flow.tcp_flags, + rule.wc.tcp_flags_mask) + + def serialize_nxm_match(rule, buf, offset): old_offset = offset @@ -1029,6 +1050,22 @@ def serialize_nxm_match(rule, buf, offset): if header != 0: offset += nxm_put(buf, offset, header, rule) + if rule.flow.tcp_flags != 0: + # TCP Flags can only be used if the ethernet type is IPv4 or IPv6 + if rule.flow.dl_type in (ether.ETH_TYPE_IP, ether.ETH_TYPE_IPV6): + # TCP Flags can only be used if the ip protocol is TCP + if rule.flow.nw_proto == inet.IPPROTO_TCP: + if rule.wc.tcp_flags_mask == UINT16_MAX: + header = ofproto_v1_0.NXM_NX_TCP_FLAGS + else: + header = ofproto_v1_0.NXM_NX_TCP_FLAGS_W + else: + header = 0 + else: + header = 0 + if header != 0: + offset += nxm_put(buf, offset, header, rule) + # IP Source and Destination if rule.flow.nw_src != 0: if rule.wc.nw_src_mask == UINT32_MAX: @@ -1189,65 +1226,3 @@ class NXMatch(object): msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING, buf, offset, self.header) return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING) - - -# -# The followings are implementations for OpenFlow 1.2+ -# - -sys.modules[__name__].__doc__ = """ -The API of this class is the same as ``OFPMatch``. - -You can define the flow match by the keyword arguments. -The following arguments are available. - -================ =============== ================================== -Argument Value Description -================ =============== ================================== -eth_dst_nxm MAC address Ethernet destination address. -eth_src_nxm MAC address Ethernet source address. -tunnel_id_nxm Integer 64bit Tunnel identifier. -tun_ipv4_src IPv4 address Tunnel IPv4 source address. -tun_ipv4_dst IPv4 address Tunnel IPv4 destination address. -pkt_mark Integer 32bit Packet metadata mark. -conj_id Integer 32bit Conjunction ID used only with - the conjunction action -ct_state Integer 32bit Conntrack state. -ct_zone Integer 16bit Conntrack zone. -ct_mark Integer 32bit Conntrack mark. -ct_label Integer 128bit Conntrack label. -_dp_hash Integer 32bit Flow hash computed in Datapath. -reg Integer 32bit Packet register. - is register number 0-7. -================ =============== ================================== -""" - -oxm_types = [ - oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr), - oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr), - oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8), - oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr), - oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr), - oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4), - oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4), - oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4), - oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2), - oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4), - oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16), - - # The following definition is merely for testing 64-bit experimenter OXMs. - # Following Open vSwitch, we use dp_hash for this purpose. - # Prefix the name with '_' to indicate this is not intended to be used - # in wild. - oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4), - - # Support for matching/setting NX registers 0-7 - oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4), - oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4), - oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4), - oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4), - oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4), - oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4), - oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4), - oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4), -] diff --git a/ryu/ofproto/ofproto_common.py b/ryu/ofproto/ofproto_common.py index 0d64b54f..4f0d9956 100644 --- a/ryu/ofproto/ofproto_common.py +++ b/ryu/ofproto/ofproto_common.py @@ -21,10 +21,13 @@ OFP_HEADER_PACK_STR = '!BBHI' OFP_HEADER_SIZE = 8 assert calcsize(OFP_HEADER_PACK_STR) == OFP_HEADER_SIZE -# note: while IANA assigned port number for OpenFlow is 6653, -# 6633 is (still) the defacto standard. -OFP_TCP_PORT = 6633 -OFP_SSL_PORT = 6633 +# Note: IANA assigned port number for OpenFlow is 6653 +# from OpenFlow 1.3.3 (EXT-133). +# Some applications may still use 6633 as the de facto standard though. +OFP_TCP_PORT = 6653 +OFP_SSL_PORT = 6653 +OFP_TCP_PORT_OLD = 6633 +OFP_SSL_PORT_OLD = 6633 # Vendor/Experimenter IDs # https://rs.opennetworking.org/wiki/display/PUBLIC/ONF+Registry diff --git a/ryu/ofproto/ofproto_parser.py b/ryu/ofproto/ofproto_parser.py index 1e553caa..670878d8 100644 --- a/ryu/ofproto/ofproto_parser.py +++ b/ryu/ofproto/ofproto_parser.py @@ -170,7 +170,7 @@ class MsgBase(StringifyMixin): def __str__(self): def hexify(x): - return hex(x) if isinstance(x, int) else x + return hex(x) if isinstance(x, six.integer_types) else x buf = 'version=%s,msg_type=%s,msg_len=%s,xid=%s,' %\ (hexify(self.version), hexify(self.msg_type), hexify(self.msg_len), hexify(self.xid)) diff --git a/ryu/ofproto/ofproto_v1_0.py b/ryu/ofproto/ofproto_v1_0.py index c6cd9736..52d1b746 100644 --- a/ryu/ofproto/ofproto_v1_0.py +++ b/ryu/ofproto/ofproto_v1_0.py @@ -18,10 +18,8 @@ OpenFlow 1.0 definitions. """ -from struct import calcsize - from ryu.ofproto import ofproto_utils - +from ryu.ofproto.nicira_ext import * # For API compat MAX_XID = 0xffffffff @@ -227,6 +225,8 @@ OFP_ACTION_VENDOR_HEADER_PACK_STR = '!HHI' OFP_ACTION_VENDOR_HEADER_SIZE = 8 assert (calcsize(OFP_ACTION_VENDOR_HEADER_PACK_STR) == OFP_ACTION_VENDOR_HEADER_SIZE) +# OpenFlow1.2 or later compatible +OFP_ACTION_EXPERIMENTER_HEADER_SIZE = OFP_ACTION_VENDOR_HEADER_SIZE OFP_ACTION_HEADER_PACK_STR = '!HH4x' OFP_ACTION_HEADER_SIZE = 8 @@ -496,107 +496,37 @@ OFP_QUEUE_PROP_MIN_RATE_SIZE = 16 assert (calcsize(OFP_QUEUE_PROP_MIN_RATE_PACK_STR) + OFP_QUEUE_PROP_HEADER_SIZE == OFP_QUEUE_PROP_MIN_RATE_SIZE) +# OXM + +# enum ofp_oxm_class +OFPXMC_OPENFLOW_BASIC = 0x8000 # Basic class for OpenFlow + + +def _oxm_tlv_header(class_, field, hasmask, length): + return (class_ << 16) | (field << 9) | (hasmask << 8) | length + + +def oxm_tlv_header(field, length): + return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 0, length) + + +def oxm_tlv_header_w(field, length): + return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 1, length * 2) + + +def oxm_tlv_header_extract_hasmask(header): + return (header >> 8) & 1 + + +def oxm_tlv_header_extract_length(header): + if oxm_tlv_header_extract_hasmask(header): + length = (header & 0xff) // 2 + else: + length = header & 0xff + return length + + +oxm_fields.generate(__name__) + # generate utility methods ofproto_utils.generate(__name__) - - -def nxm_header__(vendor, field, hasmask, length): - return (vendor << 16) | (field << 9) | (hasmask << 8) | length - - -def nxm_header(vendor, field, length): - return nxm_header__(vendor, field, 0, length) - - -def nxm_header_w(vendor, field, length): - return nxm_header__(vendor, field, 1, (length) * 2) - - -NXM_OF_IN_PORT = nxm_header(0x0000, 0, 2) - -NXM_OF_ETH_DST = nxm_header(0x0000, 1, 6) -NXM_OF_ETH_DST_W = nxm_header_w(0x0000, 1, 6) -NXM_OF_ETH_SRC = nxm_header(0x0000, 2, 6) -NXM_OF_ETH_SRC_W = nxm_header_w(0x0000, 2, 6) -NXM_OF_ETH_TYPE = nxm_header(0x0000, 3, 2) - -NXM_OF_VLAN_TCI = nxm_header(0x0000, 4, 2) -NXM_OF_VLAN_TCI_W = nxm_header_w(0x0000, 4, 2) - -NXM_OF_IP_TOS = nxm_header(0x0000, 5, 1) - -NXM_OF_IP_PROTO = nxm_header(0x0000, 6, 1) - -NXM_OF_IP_SRC = nxm_header(0x0000, 7, 4) -NXM_OF_IP_SRC_W = nxm_header_w(0x0000, 7, 4) -NXM_OF_IP_DST = nxm_header(0x0000, 8, 4) -NXM_OF_IP_DST_W = nxm_header_w(0x0000, 8, 4) - -NXM_OF_TCP_SRC = nxm_header(0x0000, 9, 2) -NXM_OF_TCP_SRC_W = nxm_header_w(0x0000, 9, 2) -NXM_OF_TCP_DST = nxm_header(0x0000, 10, 2) -NXM_OF_TCP_DST_W = nxm_header_w(0x0000, 10, 2) - -NXM_OF_UDP_SRC = nxm_header(0x0000, 11, 2) -NXM_OF_UDP_SRC_W = nxm_header_w(0x0000, 11, 2) -NXM_OF_UDP_DST = nxm_header(0x0000, 12, 2) -NXM_OF_UDP_DST_W = nxm_header_w(0x0000, 12, 2) - -NXM_OF_ICMP_TYPE = nxm_header(0x0000, 13, 1) -NXM_OF_ICMP_CODE = nxm_header(0x0000, 14, 1) - -NXM_OF_ARP_OP = nxm_header(0x0000, 15, 2) - -NXM_OF_ARP_SPA = nxm_header(0x0000, 16, 4) -NXM_OF_ARP_SPA_W = nxm_header_w(0x0000, 16, 4) -NXM_OF_ARP_TPA = nxm_header(0x0000, 17, 4) -NXM_OF_ARP_TPA_W = nxm_header_w(0x0000, 17, 4) - -NXM_NX_TUN_ID = nxm_header(0x0001, 16, 8) -NXM_NX_TUN_ID_W = nxm_header_w(0x0001, 16, 8) -NXM_NX_TUN_IPV4_SRC = nxm_header(0x0001, 31, 4) -NXM_NX_TUN_IPV4_SRC_W = nxm_header_w(0x0001, 31, 4) -NXM_NX_TUN_IPV4_DST = nxm_header(0x0001, 32, 4) -NXM_NX_TUN_IPV4_DST_W = nxm_header_w(0x0001, 32, 4) - -NXM_NX_ARP_SHA = nxm_header(0x0001, 17, 6) -NXM_NX_ARP_THA = nxm_header(0x0001, 18, 6) - -NXM_NX_IPV6_SRC = nxm_header(0x0001, 19, 16) -NXM_NX_IPV6_SRC_W = nxm_header_w(0x0001, 19, 16) -NXM_NX_IPV6_DST = nxm_header(0x0001, 20, 16) -NXM_NX_IPV6_DST_W = nxm_header_w(0x0001, 20, 16) - -NXM_NX_ICMPV6_TYPE = nxm_header(0x0001, 21, 1) -NXM_NX_ICMPV6_CODE = nxm_header(0x0001, 22, 1) - -NXM_NX_ND_TARGET = nxm_header(0x0001, 23, 16) -NXM_NX_ND_TARGET_W = nxm_header_w(0x0001, 23, 16) - -NXM_NX_ND_SLL = nxm_header(0x0001, 24, 6) - -NXM_NX_ND_TLL = nxm_header(0x0001, 25, 6) - -NXM_NX_IP_FRAG = nxm_header(0x0001, 26, 1) -NXM_NX_IP_FRAG_W = nxm_header_w(0x0001, 26, 1) - -NXM_NX_IPV6_LABEL = nxm_header(0x0001, 27, 4) - -NXM_NX_IP_ECN = nxm_header(0x0001, 28, 1) - -NXM_NX_IP_TTL = nxm_header(0x0001, 29, 1) - -NXM_NX_PKT_MARK = nxm_header(0x0001, 33, 4) -NXM_NX_PKT_MARK_W = nxm_header_w(0x0001, 33, 4) - - -def nxm_nx_reg(idx): - return nxm_header(0x0001, idx, 4) - - -def nxm_nx_reg_w(idx): - return nxm_header_w(0x0001, idx, 4) - -NXM_HEADER_PACK_STRING = '!I' - -from ryu.ofproto.nicira_ext import * # For API compat diff --git a/ryu/ofproto/ofproto_v1_0_parser.py b/ryu/ofproto/ofproto_v1_0_parser.py index 4952338e..a34564b5 100644 --- a/ryu/ofproto/ofproto_v1_0_parser.py +++ b/ryu/ofproto/ofproto_v1_0_parser.py @@ -28,10 +28,11 @@ from ryu.lib import addrconv from ryu.lib import ip from ryu.lib import mac from ryu.lib.pack_utils import msg_pack_into +from ryu.ofproto import nx_match from ryu.ofproto import ofproto_common from ryu.ofproto import ofproto_parser from ryu.ofproto import ofproto_v1_0 as ofproto -from ryu.ofproto import nx_match +from ryu.ofproto import nx_actions from ryu import utils import logging @@ -215,7 +216,8 @@ class OFPMatch(StringifyMixin): self.dl_src = mac.DONTCARE else: wc &= ~ofproto.OFPFW_DL_SRC - if isinstance(dl_src, (six.text_type, str)) and netaddr.valid_mac(dl_src): + if (isinstance(dl_src, (six.text_type, str)) and + netaddr.valid_mac(dl_src)): dl_src = addrconv.mac.text_to_bin(dl_src) if dl_src == 0: self.dl_src = mac.DONTCARE @@ -226,7 +228,8 @@ class OFPMatch(StringifyMixin): self.dl_dst = mac.DONTCARE else: wc &= ~ofproto.OFPFW_DL_DST - if isinstance(dl_dst, (six.text_type, str)) and netaddr.valid_mac(dl_dst): + if (isinstance(dl_dst, (six.text_type, str)) and + netaddr.valid_mac(dl_dst)): dl_dst = addrconv.mac.text_to_bin(dl_dst) if dl_dst == 0: self.dl_dst = mac.DONTCARE @@ -518,7 +521,8 @@ class OFPActionStripVlan(OFPAction): class OFPActionDlAddr(OFPAction): def __init__(self, dl_addr): super(OFPActionDlAddr, self).__init__() - if isinstance(dl_addr, (six.text_type, str)) and netaddr.valid_mac(dl_addr): + if (isinstance(dl_addr, (six.text_type, str)) and + netaddr.valid_mac(dl_addr)): dl_addr = addrconv.mac.text_to_bin(dl_addr) self.dl_addr = dl_addr @@ -781,16 +785,63 @@ class OFPActionVendor(OFPAction): return cls return _register_action_vendor - def __init__(self): + def __init__(self, vendor=None): super(OFPActionVendor, self).__init__() - self.vendor = self.cls_vendor + self.type = ofproto.OFPAT_VENDOR + self.len = None + + if vendor is None: + self.vendor = self.cls_vendor + else: + self.vendor = vendor @classmethod def parser(cls, buf, offset): type_, len_, vendor = struct.unpack_from( ofproto.OFP_ACTION_VENDOR_HEADER_PACK_STR, buf, offset) - cls_ = cls._ACTION_VENDORS.get(vendor) - return cls_.parser(buf, offset) + + data = buf[(offset + ofproto.OFP_ACTION_VENDOR_HEADER_SIZE + ): offset + len_] + + if vendor == ofproto_common.NX_EXPERIMENTER_ID: + obj = NXAction.parse(data) # noqa + else: + cls_ = cls._ACTION_VENDORS.get(vendor, None) + + if cls_ is None: + obj = OFPActionVendorUnknown(vendor, data) + else: + obj = cls_.parser(buf, offset) + + obj.len = len_ + return obj + + def serialize(self, buf, offset): + msg_pack_into(ofproto.OFP_ACTION_VENDOR_HEADER_PACK_STR, + buf, offset, self.type, self.len, self.vendor) + +# OpenFlow1.2 or later compatible +OFPActionExperimenter = OFPActionVendor + + +class OFPActionVendorUnknown(OFPActionVendor): + def __init__(self, vendor, data=None, type_=None, len_=None): + super(OFPActionVendorUnknown, + self).__init__(vendor=vendor) + self.data = data + + def serialize(self, buf, offset): + # fixup + data = self.data + if data is None: + data = bytearray() + self.len = (utils.round_up(len(data), 8) + + ofproto.OFP_ACTION_VENDOR_HEADER_SIZE) + super(OFPActionVendorUnknown, self).serialize(buf, offset) + msg_pack_into('!%ds' % len(self.data), + buf, + offset + ofproto.OFP_ACTION_VENDOR_HEADER_SIZE, + self.data) @OFPActionVendor.register_action_vendor(ofproto_common.NX_EXPERIMENTER_ID) @@ -822,476 +873,6 @@ class NXActionHeader(OFPActionVendor): return cls_.parser(buf, offset) -class NXActionResubmitBase(NXActionHeader): - def __init__(self, in_port, table): - super(NXActionResubmitBase, self).__init__() - assert self.subtype in (ofproto.NXAST_RESUBMIT, - ofproto.NXAST_RESUBMIT_TABLE) - self.in_port = in_port - self.table = table - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.in_port, self.table) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_RESUBMIT, ofproto.NX_ACTION_RESUBMIT_SIZE) -class NXActionResubmit(NXActionResubmitBase): - def __init__(self, in_port=ofproto.OFPP_IN_PORT): - super(NXActionResubmit, self).__init__(in_port, 0) - - @classmethod - def parser(cls, buf, offset): - type_, len_, vendor, subtype, in_port, table = struct.unpack_from( - ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset) - return cls(in_port) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_RESUBMIT_TABLE, ofproto.NX_ACTION_RESUBMIT_SIZE) -class NXActionResubmitTable(NXActionResubmitBase): - def __init__(self, in_port=ofproto.OFPP_IN_PORT, table=0xff): - super(NXActionResubmitTable, self).__init__(in_port, table) - - @classmethod - def parser(cls, buf, offset): - type_, len_, vendor, subtype, in_port, table = struct.unpack_from( - ofproto.NX_ACTION_RESUBMIT_PACK_STR, buf, offset) - return cls(in_port, table) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_SET_TUNNEL, ofproto.NX_ACTION_SET_TUNNEL_SIZE) -class NXActionSetTunnel(NXActionHeader): - def __init__(self, tun_id): - super(NXActionSetTunnel, self).__init__() - self.tun_id = tun_id - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_SET_TUNNEL_PACK_STR, buf, - offset, self.type, self.len, self.vendor, self.subtype, - self.tun_id) - - @classmethod - def parser(cls, buf, offset): - type_, len_, vendor, subtype, tun_id = struct.unpack_from( - ofproto.NX_ACTION_SET_TUNNEL_PACK_STR, buf, offset) - return cls(tun_id) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_SET_QUEUE, ofproto.NX_ACTION_SET_QUEUE_SIZE) -class NXActionSetQueue(NXActionHeader): - def __init__(self, queue_id): - super(NXActionSetQueue, self).__init__() - self.queue_id = queue_id - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_SET_QUEUE_PACK_STR, buf, - offset, self.type, self.len, self.vendor, - self.subtype, self.queue_id) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, queue_id) = struct.unpack_from( - ofproto.NX_ACTION_SET_QUEUE_PACK_STR, buf, offset) - return cls(queue_id) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_POP_QUEUE, ofproto.NX_ACTION_POP_QUEUE_SIZE) -class NXActionPopQueue(NXActionHeader): - def __init__(self): - super(NXActionPopQueue, self).__init__() - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_POP_QUEUE_PACK_STR, buf, - offset, self.type, self.len, self.vendor, - self.subtype) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype) = struct.unpack_from( - ofproto.NX_ACTION_POP_QUEUE_PACK_STR, buf, offset) - return cls() - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_REG_MOVE, ofproto.NX_ACTION_REG_MOVE_SIZE) -class NXActionRegMove(NXActionHeader): - def __init__(self, n_bits, src_ofs, dst_ofs, src, dst): - super(NXActionRegMove, self).__init__() - self.n_bits = n_bits - self.src_ofs = src_ofs - self.dst_ofs = dst_ofs - self.src = src - self.dst = dst - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_REG_MOVE_PACK_STR, buf, - offset, self.type, self.len, self.vendor, - self.subtype, self.n_bits, self.src_ofs, self.dst_ofs, - self.src, self.dst) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, n_bits, src_ofs, dst_ofs, - src, dst) = struct.unpack_from( - ofproto.NX_ACTION_REG_MOVE_PACK_STR, buf, offset) - return cls(n_bits, src_ofs, dst_ofs, src, dst) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_REG_LOAD, ofproto.NX_ACTION_REG_LOAD_SIZE) -class NXActionRegLoad(NXActionHeader): - def __init__(self, ofs_nbits, dst, value): - super(NXActionRegLoad, self).__init__() - self.ofs_nbits = ofs_nbits - self.dst = dst - self.value = value - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_REG_LOAD_PACK_STR, buf, - offset, self.type, self.len, self.vendor, - self.subtype, self.ofs_nbits, self.dst, self.value) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, ofs_nbits, dst, - value) = struct.unpack_from( - ofproto.NX_ACTION_REG_LOAD_PACK_STR, buf, offset) - return cls(ofs_nbits, dst, value) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_SET_TUNNEL64, ofproto.NX_ACTION_SET_TUNNEL64_SIZE) -class NXActionSetTunnel64(NXActionHeader): - def __init__(self, tun_id): - super(NXActionSetTunnel64, self).__init__() - self.tun_id = tun_id - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR, buf, - offset, self.type, self.len, self.vendor, self.subtype, - self.tun_id) - - @classmethod - def parser(cls, buf, offset): - type_, len_, vendor, subtype, tun_id = struct.unpack_from( - ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR, buf, offset) - return cls(tun_id) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_MULTIPATH, ofproto.NX_ACTION_MULTIPATH_SIZE) -class NXActionMultipath(NXActionHeader): - def __init__(self, fields, basis, algorithm, max_link, arg, - ofs_nbits, dst): - super(NXActionMultipath, self).__init__() - self.fields = fields - self.basis = basis - self.algorithm = algorithm - self.max_link = max_link - self.arg = arg - self.ofs_nbits = ofs_nbits - self.dst = dst - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_MULTIPATH_PACK_STR, buf, - offset, self.type, self.len, self.vendor, self.subtype, - self.fields, self.basis, self.algorithm, self.max_link, - self.arg, self.ofs_nbits, self.dst) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, fields, basis, algorithm, - max_link, arg, ofs_nbits, dst) = struct.unpack_from( - ofproto.NX_ACTION_MULTIPATH_PACK_STR, buf, offset) - return cls(fields, basis, algorithm, max_link, arg, ofs_nbits, - dst) - - -@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_NOTE, 0) -class NXActionNote(NXActionHeader): - def __init__(self, note): - super(NXActionNote, self).__init__() - # should check here if the note is valid (only hex values) - pad = (len(note) + 10) % 8 - if pad: - note += [0x0 for i in range(8 - pad)] - self.note = note - self.len = len(note) + 10 - - def serialize(self, buf, offset): - note = self.note - extra = None - extra_len = len(self.note) - 6 - if extra_len > 0: - extra = note[6:] - note = note[0:6] - msg_pack_into(ofproto.NX_ACTION_NOTE_PACK_STR, buf, - offset, self.type, self.len, self.vendor, self.subtype, - *note) - if extra_len > 0: - msg_pack_into('B' * extra_len, buf, - offset + ofproto.NX_ACTION_NOTE_SIZE, - *extra) - - @classmethod - def parser(cls, buf, offset): - note = struct.unpack_from( - ofproto.NX_ACTION_NOTE_PACK_STR, buf, offset) - (type_, len_, vendor, subtype) = note[0:4] - note = [i for i in note[4:]] - if len_ > ofproto.NX_ACTION_NOTE_SIZE: - note_start = offset + ofproto.NX_ACTION_NOTE_SIZE - note_end = note_start + len_ - ofproto.NX_ACTION_NOTE_SIZE - note += [int(binascii.b2a_hex(i), 16) for i - in buf[note_start:note_end]] - return cls(note) - - -class NXActionBundleBase(NXActionHeader): - def __init__(self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves): - super(NXActionBundleBase, self).__init__() - _len = ofproto.NX_ACTION_BUNDLE_SIZE + len(slaves) * 2 - _len += (_len % 8) - self.len = _len - - self.algorithm = algorithm - self.fields = fields - self.basis = basis - self.slave_type = slave_type - self.n_slaves = n_slaves - self.ofs_nbits = ofs_nbits - self.dst = dst - self.slaves = slaves - - def serialize(self, buf, offset): - slave_offset = offset + ofproto.NX_ACTION_BUNDLE_SIZE - - for s in self.slaves: - msg_pack_into('!H', buf, slave_offset, s) - slave_offset += 2 - - pad_len = (len(self.slaves) * 2 + - ofproto.NX_ACTION_BUNDLE_SIZE) % 8 - - if pad_len != 0: - msg_pack_into('%dx' % pad_len, buf, slave_offset) - - msg_pack_into(ofproto.NX_ACTION_BUNDLE_PACK_STR, buf, - offset, self.type, self.len, self.vendor, self.subtype, - self.algorithm, self.fields, self.basis, - self.slave_type, self.n_slaves, - self.ofs_nbits, self.dst) - - @classmethod - def parser(cls, action_cls, buf, offset): - (type_, len_, vendor, subtype, algorithm, fields, basis, - slave_type, n_slaves, ofs_nbits, dst) = struct.unpack_from( - ofproto.NX_ACTION_BUNDLE_PACK_STR, buf, offset) - slave_offset = offset + ofproto.NX_ACTION_BUNDLE_SIZE - - slaves = [] - for i in range(0, n_slaves): - s = struct.unpack_from('!H', buf, slave_offset) - slaves.append(s[0]) - slave_offset += 2 - - return action_cls(algorithm, fields, basis, slave_type, - n_slaves, ofs_nbits, dst, slaves) - - -@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_BUNDLE, 0) -class NXActionBundle(NXActionBundleBase): - def __init__(self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves): - super(NXActionBundle, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves) - - @classmethod - def parser(cls, buf, offset): - return NXActionBundleBase.parser(NXActionBundle, buf, offset) - - -@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_BUNDLE_LOAD, 0) -class NXActionBundleLoad(NXActionBundleBase): - def __init__(self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves): - super(NXActionBundleLoad, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves) - - @classmethod - def parser(cls, buf, offset): - return NXActionBundleBase.parser(NXActionBundleLoad, buf, offset) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_AUTOPATH, ofproto.NX_ACTION_AUTOPATH_SIZE) -class NXActionAutopath(NXActionHeader): - def __init__(self, ofs_nbits, dst, id_): - super(NXActionAutopath, self).__init__() - self.ofs_nbits = ofs_nbits - self.dst = dst - self.id = id_ - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_AUTOPATH_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.ofs_nbits, self.dst, self.id) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, ofs_nbits, dst, - id_) = struct.unpack_from( - ofproto.NX_ACTION_AUTOPATH_PACK_STR, buf, offset) - return cls(ofs_nbits, dst, id_) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_OUTPUT_REG, ofproto.NX_ACTION_OUTPUT_REG_SIZE) -class NXActionOutputReg(NXActionHeader): - def __init__(self, ofs_nbits, src, max_len): - super(NXActionOutputReg, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_OUTPUT_REG_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.ofs_nbits, self.src, self.max_len) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, ofs_nbits, src, - max_len) = struct.unpack_from( - ofproto.NX_ACTION_OUTPUT_REG_PACK_STR, buf, offset) - return cls(ofs_nbits, src, max_len) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_EXIT, ofproto.NX_ACTION_HEADER_SIZE) -class NXActionExit(NXActionHeader): - def __init__(self): - super(NXActionExit, self).__init__() - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype) = struct.unpack_from( - ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset) - return cls() - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_DEC_TTL, ofproto.NX_ACTION_HEADER_SIZE) -class NXActionDecTtl(NXActionHeader): - def __init__(self): - super(NXActionDecTtl, self).__init__() - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype) = struct.unpack_from( - ofproto.NX_ACTION_HEADER_PACK_STR, buf, offset) - return cls() - - -@NXActionHeader.register_nx_action_subtype(ofproto.NXAST_LEARN, 0) -class NXActionLearn(NXActionHeader): - def __init__(self, idle_timeout, hard_timeout, priority, cookie, flags, - table_id, fin_idle_timeout, fin_hard_timeout, spec): - super(NXActionLearn, self).__init__() - len_ = len(spec) + ofproto.NX_ACTION_LEARN_SIZE - pad_len = 8 - (len_ % 8) - self.len = len_ + pad_len - - self.idle_timeout = idle_timeout - self.hard_timeout = hard_timeout - self.priority = priority - self.cookie = cookie - self.flags = flags - self.table_id = table_id - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - self.spec = spec + bytearray(b'\x00' * pad_len) - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_LEARN_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.idle_timeout, self.hard_timeout, self.priority, - self.cookie, self.flags, self.table_id, - self.fin_idle_timeout, self.fin_hard_timeout) - buf += self.spec - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, idle_timeout, hard_timeout, priority, - cookie, flags, table_id, fin_idle_timeout, - fin_hard_timeout) = struct.unpack_from( - ofproto.NX_ACTION_LEARN_PACK_STR, buf, offset) - spec = buf[offset + ofproto.NX_ACTION_LEARN_SIZE:] - return cls(idle_timeout, hard_timeout, priority, - cookie, flags, table_id, fin_idle_timeout, - fin_hard_timeout, spec) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_CONTROLLER, ofproto.NX_ACTION_CONTROLLER_SIZE) -class NXActionController(NXActionHeader): - def __init__(self, max_len, controller_id, reason): - super(NXActionController, self).__init__() - self.max_len = max_len - self.controller_id = controller_id - self.reason = reason - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_CONTROLLER_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.max_len, self.controller_id, self.reason, 0) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, max_len, controller_id, reason, - _zero) = struct.unpack_from( - ofproto.NX_ACTION_CONTROLLER_PACK_STR, buf, offset) - return cls(max_len, controller_id, reason) - - -@NXActionHeader.register_nx_action_subtype( - ofproto.NXAST_FIN_TIMEOUT, ofproto.NX_ACTION_FIN_TIMEOUT_SIZE) -class NXActionFinTimeout(NXActionHeader): - def __init__(self, fin_idle_timeout, fin_hard_timeout): - super(NXActionFinTimeout, self).__init__() - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - - def serialize(self, buf, offset): - msg_pack_into(ofproto.NX_ACTION_FIN_TIMEOUT_PACK_STR, buf, offset, - self.type, self.len, self.vendor, self.subtype, - self.fin_idle_timeout, self.fin_hard_timeout) - - @classmethod - def parser(cls, buf, offset): - (type_, len_, vendor, subtype, fin_idle_timeout, - fin_hard_timeout) = struct.unpack_from( - ofproto.NX_ACTION_FIN_TIMEOUT_PACK_STR, buf, offset) - return cls(fin_idle_timeout, fin_hard_timeout) - - class OFPDescStats(ofproto_parser.namedtuple('OFPDescStats', ( 'mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc'))): @@ -1958,8 +1539,8 @@ class NXTFlowRemoved(NiciraHeader): idle_timeout, match_len, packet_count, byte_count) = struct.unpack_from( ofproto.NX_FLOW_REMOVED_PACK_STR, buf, offset) - offset += (ofproto.NX_FLOW_REMOVED_SIZE - - ofproto.NICIRA_HEADER_SIZE) + offset += (ofproto.NX_FLOW_REMOVED_SIZE - + ofproto.NICIRA_HEADER_SIZE) match = nx_match.NXMatch.parser(buf, offset, match_len) return cls(datapath, cookie, priority, reason, duration_sec, duration_nsec, idle_timeout, match_len, packet_count, @@ -2000,8 +1581,8 @@ class NXTPacketIn(NiciraHeader): cookie, match_len) = struct.unpack_from( ofproto.NX_PACKET_IN_PACK_STR, buf, offset) - offset += (ofproto.NX_PACKET_IN_SIZE - - ofproto.NICIRA_HEADER_SIZE) + offset += (ofproto.NX_PACKET_IN_SIZE - + ofproto.NICIRA_HEADER_SIZE) match = nx_match.NXMatch.parser(buf, offset, match_len) offset += (match_len + 7) // 8 * 8 @@ -3070,6 +2651,7 @@ class OFPPacketOut(MsgBase): self.buffer_id, self.in_port, self._actions_len) +@_register_parser @_set_msg_type(ofproto.OFPT_FLOW_MOD) class OFPFlowMod(MsgBase): """ @@ -3126,15 +2708,14 @@ class OFPFlowMod(MsgBase): priority, buffer_id, out_port, flags, actions) datapath.send_msg(req) """ - def __init__(self, datapath, match, cookie, command, + def __init__(self, datapath, match=None, cookie=0, + command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY, buffer_id=0xffffffff, out_port=ofproto.OFPP_NONE, flags=0, actions=None): - if actions is None: - actions = [] super(OFPFlowMod, self).__init__(datapath) - self.match = match + self.match = OFPMatch() if match is None else match self.cookie = cookie self.command = command self.idle_timeout = idle_timeout @@ -3143,7 +2724,7 @@ class OFPFlowMod(MsgBase): self.buffer_id = buffer_id self.out_port = out_port self.flags = flags - self.actions = actions + self.actions = [] if actions is None else actions def _serialize_body(self): offset = ofproto.OFP_HEADER_SIZE @@ -3162,6 +2743,30 @@ class OFPFlowMod(MsgBase): a.serialize(self.buf, offset) offset += a.len + @classmethod + def parser(cls, datapath, version, msg_type, msg_len, xid, buf): + msg = super(OFPFlowMod, cls).parser( + datapath, version, msg_type, msg_len, xid, buf) + offset = ofproto.OFP_HEADER_SIZE + + msg.match = OFPMatch.parse(msg.buf, offset) + offset += ofproto.OFP_MATCH_SIZE + + (msg.cookie, msg.command, msg.idle_timeout, msg.hard_timeout, + msg.priority, msg.buffer_id, msg.out_port, + msg.flags) = struct.unpack_from( + ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, offset) + offset = ofproto.OFP_FLOW_MOD_SIZE + + actions = [] + while offset < msg_len: + a = OFPAction.parser(buf, offset) + actions.append(a) + offset += a.len + msg.actions = actions + + return msg + @_set_msg_type(ofproto.OFPT_PORT_MOD) class OFPPortMod(MsgBase): @@ -3619,3 +3224,9 @@ class NXAggregateStatsRequest(NXStatsRequest): ofproto.NX_AGGREGATE_STATS_REQUEST_PACK_STR, self.buf, ofproto.NX_STATS_MSG_SIZE, self.out_port, self.match_len, self.table_id) + + +nx_actions.generate( + 'ryu.ofproto.ofproto_v1_0', + 'ryu.ofproto.ofproto_v1_0_parser' +) diff --git a/ryu/ofproto/ofproto_v1_2.py b/ryu/ofproto/ofproto_v1_2.py index adf131cf..9c8aa906 100644 --- a/ryu/ofproto/ofproto_v1_2.py +++ b/ryu/ofproto/ofproto_v1_2.py @@ -19,7 +19,7 @@ OpenFlow 1.2 definitions. """ from ryu.lib import type_desc -from ryu.ofproto import nx_match +from ryu.ofproto import nicira_ext from ryu.ofproto import ofproto_utils from ryu.ofproto import oxm_fields @@ -836,7 +836,7 @@ oxm_types = [ # EXT-233 Output match Extension # NOTE(yamamoto): The spec says uint64_t but I assume it's an error. oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4), -] + nx_match.oxm_types +] + nicira_ext.oxm_types oxm_fields.generate(__name__) diff --git a/ryu/ofproto/ofproto_v1_2_parser.py b/ryu/ofproto/ofproto_v1_2_parser.py index c0255c53..b755499c 100644 --- a/ryu/ofproto/ofproto_v1_2_parser.py +++ b/ryu/ofproto/ofproto_v1_2_parser.py @@ -862,6 +862,7 @@ class OFPPacketOut(MsgBase): self.buffer_id, self.in_port, self.actions_len) +@_register_parser @_set_msg_type(ofproto.OFPT_FLOW_MOD) class OFPFlowMod(MsgBase): """ @@ -971,6 +972,31 @@ class OFPFlowMod(MsgBase): inst.serialize(self.buf, offset) offset += inst.len + @classmethod + def parser(cls, datapath, version, msg_type, msg_len, xid, buf): + msg = super(OFPFlowMod, cls).parser( + datapath, version, msg_type, msg_len, xid, buf) + + (msg.cookie, msg.cookie_mask, msg.table_id, + msg.command, msg.idle_timeout, msg.hard_timeout, + msg.priority, msg.buffer_id, msg.out_port, + msg.out_group, msg.flags) = struct.unpack_from( + ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, + ofproto.OFP_HEADER_SIZE) + offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE + + msg.match = OFPMatch.parser(buf, offset) + offset += utils.round_up(msg.match.length, 8) + + instructions = [] + while offset < msg_len: + i = OFPInstruction.parser(buf, offset) + instructions.append(i) + offset += i.len + msg.instructions = instructions + + return msg + class OFPInstruction(StringifyMixin): _INSTRUCTION_TYPES = {} @@ -1557,19 +1583,11 @@ class OFPActionSetField(OFPAction): return not hasattr(self, 'value') def to_jsondict(self): - # XXX old api compat - if self._composed_with_old_api(): - # copy object first because serialize_old is destructive - o2 = OFPActionSetField(self.field) - # serialize and parse to fill new fields - buf = bytearray() - o2.serialize(buf, 0) - o = OFPActionSetField.parser(six.binary_type(buf), 0) - else: - o = self return { self.__class__.__name__: { - 'field': ofproto.oxm_to_jsondict(self.key, self.value) + 'field': ofproto.oxm_to_jsondict(self.key, self.value), + "len": self.len, + "type": self.type } } diff --git a/ryu/ofproto/ofproto_v1_3.py b/ryu/ofproto/ofproto_v1_3.py index 6b31ec2a..8a562a8d 100644 --- a/ryu/ofproto/ofproto_v1_3.py +++ b/ryu/ofproto/ofproto_v1_3.py @@ -19,7 +19,7 @@ OpenFlow 1.3 definitions. """ from ryu.lib import type_desc -from ryu.ofproto import nx_match +from ryu.ofproto import nicira_ext from ryu.ofproto import ofproto_utils from ryu.ofproto import oxm_fields @@ -1195,7 +1195,7 @@ oxm_types = [ # EXT-233 Output match Extension # NOTE(yamamoto): The spec says uint64_t but I assume it's an error. oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4), -] + nx_match.oxm_types +] + nicira_ext.oxm_types oxm_fields.generate(__name__) diff --git a/ryu/ofproto/ofproto_v1_3_parser.py b/ryu/ofproto/ofproto_v1_3_parser.py index 374604aa..067ae19c 100644 --- a/ryu/ofproto/ofproto_v1_3_parser.py +++ b/ryu/ofproto/ofproto_v1_3_parser.py @@ -1686,7 +1686,6 @@ class OFPMatchField(StringifyMixin): @classmethod def field_parser(cls, header, buf, offset): - hasmask = (header >> 8) & 1 mask = None if ofproto.oxm_tlv_header_extract_hasmask(header): pack_str = '!' + cls.pack_str[1:] * 2 @@ -2155,7 +2154,6 @@ class MTPbbIsid(OFPMatchField): @classmethod def field_parser(cls, header, buf, offset): - hasmask = (header >> 8) & 1 mask = None if ofproto.oxm_tlv_header_extract_hasmask(header): pack_str = '!' + cls.pack_str[1:] * 2 @@ -2547,6 +2545,7 @@ class OFPPacketOut(MsgBase): self.buffer_id, self.in_port, self.actions_len) +@_register_parser @_set_msg_type(ofproto.OFPT_FLOW_MOD) class OFPFlowMod(MsgBase): """ @@ -2659,6 +2658,31 @@ class OFPFlowMod(MsgBase): inst.serialize(self.buf, offset) offset += inst.len + @classmethod + def parser(cls, datapath, version, msg_type, msg_len, xid, buf): + msg = super(OFPFlowMod, cls).parser( + datapath, version, msg_type, msg_len, xid, buf) + + (msg.cookie, msg.cookie_mask, msg.table_id, + msg.command, msg.idle_timeout, msg.hard_timeout, + msg.priority, msg.buffer_id, msg.out_port, + msg.out_group, msg.flags) = struct.unpack_from( + ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, + ofproto.OFP_HEADER_SIZE) + offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE + + msg.match = OFPMatch.parser(buf, offset) + offset += utils.round_up(msg.match.length, 8) + + instructions = [] + while offset < msg_len: + i = OFPInstruction.parser(buf, offset) + instructions.append(i) + offset += i.len + msg.instructions = instructions + + return msg + class OFPInstruction(StringifyMixin): _INSTRUCTION_TYPES = {} @@ -3273,19 +3297,11 @@ class OFPActionSetField(OFPAction): return not hasattr(self, 'value') def to_jsondict(self): - # XXX old api compat - if self._composed_with_old_api(): - # copy object first because serialize_old is destructive - o2 = OFPActionSetField(self.field) - # serialize and parse to fill new fields - buf = bytearray() - o2.serialize(buf, 0) - o = OFPActionSetField.parser(six.binary_type(buf), 0) - else: - o = self return { self.__class__.__name__: { - 'field': ofproto.oxm_to_jsondict(self.key, self.value) + 'field': ofproto.oxm_to_jsondict(self.key, self.value), + "len": self.len, + "type": self.type } } @@ -3402,7 +3418,7 @@ class OFPActionExperimenter(OFPAction): data = buf[(offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE ): offset + len_] if experimenter == ofproto_common.NX_EXPERIMENTER_ID: - obj = NXAction.parse(data) + obj = NXAction.parse(data) # noqa else: obj = OFPActionExperimenterUnknown(experimenter, data) obj.len = len_ @@ -5137,7 +5153,8 @@ class OFPInstructionId(StringifyMixin): @classmethod def parse(cls, buf): - (type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0) + (type_, len_,) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf), 0) rest = buf[len_:] return cls(type_=type_, len_=len_), rest @@ -5190,7 +5207,8 @@ class OFPTableFeaturePropNextTables(OFPTableFeatureProp): rest = cls.get_rest(buf) ids = [] while rest: - (i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, six.binary_type(rest), 0) + (i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, + six.binary_type(rest), 0) rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):] ids.append(i) return cls(table_ids=ids) @@ -5223,7 +5241,8 @@ class OFPActionId(StringifyMixin): @classmethod def parse(cls, buf): - (type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0) + (type_, len_,) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf), 0) rest = buf[len_:] return cls(type_=type_, len_=len_), rest @@ -6119,12 +6138,13 @@ class OFPSetAsync(MsgBase): ofp = datapath.ofproto ofp_parser = datapath.ofproto_parser - packet_in_mask = ofp.OFPR_ACTION | ofp.OFPR_INVALID_TTL - port_status_mask = (ofp.OFPPR_ADD | ofp.OFPPR_DELETE | - ofp.OFPPR_MODIFY) - flow_removed_mask = (ofp.OFPRR_IDLE_TIMEOUT | - ofp.OFPRR_HARD_TIMEOUT | - ofp.OFPRR_DELETE) + packet_in_mask = 1 << ofp.OFPR_ACTION | 1 << ofp.OFPR_INVALID_TTL + port_status_mask = (1 << ofp.OFPPR_ADD + | 1 << ofp.OFPPR_DELETE + | 1 << ofp.OFPPR_MODIFY) + flow_removed_mask = (1 << ofp.OFPRR_IDLE_TIMEOUT + | 1 << ofp.OFPRR_HARD_TIMEOUT + | 1 << ofp.OFPRR_DELETE) req = ofp_parser.OFPSetAsync(datapath, [packet_in_mask, 0], [port_status_mask, 0], diff --git a/ryu/ofproto/ofproto_v1_4.py b/ryu/ofproto/ofproto_v1_4.py index 3f107eb7..d1c48225 100644 --- a/ryu/ofproto/ofproto_v1_4.py +++ b/ryu/ofproto/ofproto_v1_4.py @@ -19,7 +19,7 @@ OpenFlow 1.4 definitions. """ from ryu.lib import type_desc -from ryu.ofproto import nx_match +from ryu.ofproto import nicira_ext from ryu.ofproto import ofproto_utils from ryu.ofproto import oxm_fields @@ -396,7 +396,7 @@ oxm_types = [ # EXT-233 Output match Extension # NOTE(yamamoto): The spec says uint64_t but I assume it's an error. oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4), -] + nx_match.oxm_types +] + nicira_ext.oxm_types oxm_fields.generate(__name__) @@ -1406,8 +1406,15 @@ OFPACPT_TABLE_STATUS_SLAVE = 8 # Table status mask for slave. OFPACPT_TABLE_STATUS_MASTER = 9 # Table status mask for master. OFPACPT_REQUESTFORWARD_SLAVE = 10 # RequestForward mask for slave. OFPACPT_REQUESTFORWARD_MASTER = 11 # RequestForward mask for master. -OFPTFPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave. -OFPTFPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master. +OFPTFPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave (depracated). +OFPTFPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master (depracated). + # New or updated Ryu applications shall use + # OFPACPT_EXPERIMENTER_SLAVE and OFPACPT_EXPERIMENTER_MASTER. + # The variable name is a typo of in specifications before v1.5.0. +OFPACPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave. +OFPACPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master. + # Backporting from ofproto_v1_5 for consistency with + # later OF specs. # struct ofp_async_config_prop_reasons OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR = '!HHI' diff --git a/ryu/ofproto/ofproto_v1_4_parser.py b/ryu/ofproto/ofproto_v1_4_parser.py index 53d3f1e5..50acb3b7 100644 --- a/ryu/ofproto/ofproto_v1_4_parser.py +++ b/ryu/ofproto/ofproto_v1_4_parser.py @@ -24,7 +24,7 @@ import struct from ryu.lib import addrconv from ryu.lib.pack_utils import msg_pack_into from ryu import utils -from ryu.ofproto.ofproto_parser import StringifyMixin, MsgBase, MsgInMsgBase, msg_str_attr +from ryu.ofproto.ofproto_parser import StringifyMixin, MsgBase, MsgInMsgBase from ryu.ofproto import ether from ryu.ofproto import nx_actions from ryu.ofproto import ofproto_parser @@ -1783,7 +1783,8 @@ class OFPInstructionId(StringifyMixin): @classmethod def parse(cls, buf): - (type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0) + (type_, len_,) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf), 0) rest = buf[len_:] return cls(type_=type_, len_=len_), rest @@ -1834,7 +1835,8 @@ class OFPActionId(StringifyMixin): @classmethod def parse(cls, buf): - (type_, len_,) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf), 0) + (type_, len_,) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf), 0) rest = buf[len_:] return cls(type_=type_, len_=len_), rest @@ -1889,7 +1891,8 @@ class OFPTableFeaturePropNextTables(OFPTableFeatureProp): rest = cls.get_rest(buf) ids = [] while rest: - (i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, six.binary_type(rest), 0) + (i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, + six.binary_type(rest), 0) rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):] ids.append(i) return cls(table_ids=ids) @@ -4210,6 +4213,7 @@ class OFPPacketOut(MsgBase): self.buffer_id, self.in_port, self.actions_len) +@_register_parser @_set_msg_type(ofproto.OFPT_FLOW_MOD) class OFPFlowMod(MsgBase): """ @@ -4326,6 +4330,31 @@ class OFPFlowMod(MsgBase): inst.serialize(self.buf, offset) offset += inst.len + @classmethod + def parser(cls, datapath, version, msg_type, msg_len, xid, buf): + msg = super(OFPFlowMod, cls).parser( + datapath, version, msg_type, msg_len, xid, buf) + + (msg.cookie, msg.cookie_mask, msg.table_id, + msg.command, msg.idle_timeout, msg.hard_timeout, + msg.priority, msg.buffer_id, msg.out_port, + msg.out_group, msg.flags, msg.importance) = struct.unpack_from( + ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, + ofproto.OFP_HEADER_SIZE) + offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE + + msg.match = OFPMatch.parser(buf, offset) + offset += utils.round_up(msg.match.length, 8) + + instructions = [] + while offset < msg_len: + i = OFPInstruction.parser(buf, offset) + instructions.append(i) + offset += i.len + msg.instructions = instructions + + return msg + class OFPInstruction(StringifyMixin): _INSTRUCTION_TYPES = {} @@ -4908,7 +4937,9 @@ class OFPActionSetField(OFPAction): def to_jsondict(self): return { self.__class__.__name__: { - 'field': ofproto.oxm_to_jsondict(self.key, self.value) + 'field': ofproto.oxm_to_jsondict(self.key, self.value), + "len": self.len, + "type": self.type } } @@ -5002,7 +5033,7 @@ class OFPActionExperimenter(OFPAction): data = buf[(offset + ofproto.OFP_ACTION_EXPERIMENTER_HEADER_SIZE ): offset + len_] if experimenter == ofproto_common.NX_EXPERIMENTER_ID: - obj = NXAction.parse(data) + obj = NXAction.parse(data) # noqa else: obj = OFPActionExperimenterUnknown(experimenter, data) obj.len = len_ @@ -5417,8 +5448,8 @@ class OFPAsyncConfigPropReasons(OFPAsyncConfigProp): return buf -@OFPAsyncConfigProp.register_type(ofproto.OFPTFPT_EXPERIMENTER_SLAVE) -@OFPAsyncConfigProp.register_type(ofproto.OFPTFPT_EXPERIMENTER_MASTER) +@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_SLAVE) +@OFPAsyncConfigProp.register_type(ofproto.OFPACPT_EXPERIMENTER_MASTER) class OFPAsyncConfigPropExperimenter(OFPPropCommonExperimenter4ByteData): pass @@ -5505,13 +5536,11 @@ class OFPSetAsync(MsgBase): ofp = datapath.ofproto ofp_parser = datapath.ofproto_parser - properties = [ofp_parser.OFPAsyncConfigPropReasons( - 8, ofp_parser.OFPACPT_PACKET_IN_SLAVE, - (ofp_parser.OFPR_APPLY_ACTION | - ofp_parser.OFPR_INVALID_TTL)), - ofp_parser.OFPAsyncConfigPropExperimenter( - ofp.OFPTFPT_EXPERIMENTER_MASTER, - 16, 100, 2, bytearray())] + properties = [ + ofp_parser.OFPAsyncConfigPropReasons( + ofp.OFPACPT_PACKET_IN_SLAVE, 8, + (1 << ofp.OFPR_APPLY_ACTION + | 1 << ofp.OFPR_INVALID_TTL))] req = ofp_parser.OFPSetAsync(datapath, properties) datapath.send_msg(req) """ diff --git a/ryu/ofproto/ofproto_v1_5.py b/ryu/ofproto/ofproto_v1_5.py index 45191dd8..83531845 100644 --- a/ryu/ofproto/ofproto_v1_5.py +++ b/ryu/ofproto/ofproto_v1_5.py @@ -19,7 +19,7 @@ OpenFlow 1.5 definitions. """ from ryu.lib import type_desc -from ryu.ofproto import nx_match +from ryu.ofproto import nicira_ext from ryu.ofproto import ofproto_utils from ryu.ofproto import oxm_fields from ryu.ofproto import oxs_fields @@ -431,7 +431,7 @@ oxm_types = [ oxm_fields.OpenFlowBasic('tcp_flags', 42, type_desc.Int2), oxm_fields.OpenFlowBasic('actset_output', 43, type_desc.Int4), oxm_fields.OpenFlowBasic('packet_type', 44, type_desc.Int4), -] + nx_match.oxm_types +] + nicira_ext.oxm_types oxm_fields.generate(__name__) diff --git a/ryu/ofproto/ofproto_v1_5_parser.py b/ryu/ofproto/ofproto_v1_5_parser.py index ac91af91..c6fe9867 100644 --- a/ryu/ofproto/ofproto_v1_5_parser.py +++ b/ryu/ofproto/ofproto_v1_5_parser.py @@ -2814,7 +2814,6 @@ class OFPGroupDescStats(StringifyMixin): self.length = length self.type = type_ self.group_id = group_id - self.bucket_array_len = bucket_array_len self.buckets = buckets self.properties = properties @@ -3223,11 +3222,11 @@ class OFPMeterDescStats(StringifyMixin): (meter_config.length, meter_config.flags, meter_config.meter_id) = struct.unpack_from( - ofproto.OFP_METER_CONFIG_PACK_STR, buf, offset) - offset += ofproto.OFP_METER_CONFIG_SIZE + ofproto.OFP_METER_DESC_PACK_STR, buf, offset) + offset += ofproto.OFP_METER_DESC_SIZE meter_config.bands = [] - length = ofproto.OFP_METER_CONFIG_SIZE + length = ofproto.OFP_METER_DESC_SIZE while length < meter_config.length: band = OFPMeterBandHeader.parser(buf, offset) meter_config.bands.append(band) @@ -3309,7 +3308,7 @@ class OFPMeterDescStatsReply(OFPMultipartReply): class OFPMeterFeaturesStats(ofproto_parser.namedtuple('OFPMeterFeaturesStats', ('max_meter', 'band_types', 'capabilities', - 'max_bands', 'max_color'))): + 'max_bands', 'max_color', 'features'))): @classmethod def parser(cls, buf, offset): meter_features = struct.unpack_from( @@ -5067,6 +5066,7 @@ class OFPPacketOut(MsgBase): self.buffer_id, self.actions_len) +@_register_parser @_set_msg_type(ofproto.OFPT_FLOW_MOD) class OFPFlowMod(MsgBase): """ @@ -5183,6 +5183,31 @@ class OFPFlowMod(MsgBase): inst.serialize(self.buf, offset) offset += inst.len + @classmethod + def parser(cls, datapath, version, msg_type, msg_len, xid, buf): + msg = super(OFPFlowMod, cls).parser( + datapath, version, msg_type, msg_len, xid, buf) + + (msg.cookie, msg.cookie_mask, msg.table_id, + msg.command, msg.idle_timeout, msg.hard_timeout, + msg.priority, msg.buffer_id, msg.out_port, + msg.out_group, msg.flags, msg.importance) = struct.unpack_from( + ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, + ofproto.OFP_HEADER_SIZE) + offset = ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE + + msg.match = OFPMatch.parser(buf, offset) + offset += utils.round_up(msg.match.length, 8) + + instructions = [] + while offset < msg_len: + i = OFPInstruction.parser(buf, offset) + instructions.append(i) + offset += i.len + msg.instructions = instructions + + return msg + class OFPInstruction(StringifyMixin): _INSTRUCTION_TYPES = {} @@ -5781,7 +5806,9 @@ class OFPActionSetField(OFPAction): def to_jsondict(self): return { self.__class__.__name__: { - 'field': ofproto.oxm_to_jsondict(self.key, self.value) + 'field': ofproto.oxm_to_jsondict(self.key, self.value), + "len": self.len, + "type": self.type } } @@ -5887,12 +5914,14 @@ class OFPActionCopyField(OFPAction): return cls(n_bits, src_offset, dst_offset, oxm_ids, type_, len_) def serialize(self, buf, offset): + oxm_ids_buf = bytearray() + for i in self.oxm_ids: + oxm_ids_buf += i.serialize() + self.len += len(oxm_ids_buf) msg_pack_into(ofproto.OFP_ACTION_COPY_FIELD_PACK_STR, buf, offset, self.type, self.len, self.n_bits, self.src_offset, self.dst_offset) - - for i in self.oxm_ids: - buf += i.serialize() + buf += oxm_ids_buf @OFPAction.register_action_type(ofproto.OFPAT_METER, @@ -6580,13 +6609,11 @@ class OFPSetAsync(MsgBase): ofp = datapath.ofproto ofp_parser = datapath.ofproto_parser - properties = [ofp_parser.OFPAsyncConfigPropReasons( - 8, ofp_parser.OFPACPT_PACKET_IN_SLAVE, - (ofp_parser.OFPR_APPLY_ACTION | - ofp_parser.OFPR_INVALID_TTL)), - ofp_parser.OFPAsyncConfigPropExperimenter( - ofp.OFPTFPT_EXPERIMENTER_MASTER, - 16, 100, 2, bytearray())] + properties = [ + ofp_parser.OFPAsyncConfigPropReasons( + ofp.OFPACPT_PACKET_IN_SLAVE, 8, + (1 << ofp.OFPR_APPLY_ACTION + | 1 << ofp.OFPR_INVALID_TTL))] req = ofp_parser.OFPSetAsync(datapath, properties) datapath.send_msg(req) """ diff --git a/ryu/ofproto/oxx_fields.py b/ryu/ofproto/oxx_fields.py index e1e47aaf..e9c1fb97 100644 --- a/ryu/ofproto/oxx_fields.py +++ b/ryu/ofproto/oxx_fields.py @@ -82,7 +82,7 @@ def _get_field_info_by_number(oxx, num_to_field, n): name = f.name except KeyError: t = type_desc.UnknownType - if isinstance(n, int): + if isinstance(n, six.integer_types): name = 'field_%d' % (n,) else: raise KeyError('unknown %s field number: %s' % (oxx.upper(), n)) diff --git a/ryu/services/protocols/bgp/api/base.py b/ryu/services/protocols/bgp/api/base.py index 489e318e..33a4d8b8 100644 --- a/ryu/services/protocols/bgp/api/base.py +++ b/ryu/services/protocols/bgp/api/base.py @@ -18,6 +18,8 @@ This API can be used by various services like RPC, CLI, IoC, etc. """ +from __future__ import absolute_import + import inspect import logging import traceback @@ -208,7 +210,7 @@ def call(symbol, **kwargs): LOG.info("API method %s called with args: %s", symbol, str(kwargs)) # TODO(PH, JK) improve the way api function modules are loaded - import all # noqa + from . import all # noqa if not is_call_registered(symbol): message = 'Did not find any method registered by symbol %s' % symbol raise MethodNotFound(message) diff --git a/ryu/services/protocols/bgp/api/jsonrpc.py b/ryu/services/protocols/bgp/api/jsonrpc.py index d73a8bec..07d547b8 100644 --- a/ryu/services/protocols/bgp/api/jsonrpc.py +++ b/ryu/services/protocols/bgp/api/jsonrpc.py @@ -14,10 +14,9 @@ # limitations under the License. -import json from ryu.base import app_manager from ryu.lib import hub -from ryu.app.wsgi import route, websocket, ControllerBase, WSGIApplication +from ryu.app.wsgi import websocket, ControllerBase, WSGIApplication from ryu.app.wsgi import rpc_public, WebSocketRPCServer from ryu.services.protocols.bgp.api.base import call from ryu.services.protocols.bgp.api.base import PREFIX diff --git a/ryu/services/protocols/bgp/application.py b/ryu/services/protocols/bgp/application.py index 82844447..a1f4291b 100644 --- a/ryu/services/protocols/bgp/application.py +++ b/ryu/services/protocols/bgp/application.py @@ -18,7 +18,6 @@ import imp import logging import traceback -from os import path from oslo_config import cfg from ryu.lib import hub diff --git a/ryu/services/protocols/bgp/base.py b/ryu/services/protocols/bgp/base.py index ed662d01..9d23adb7 100644 --- a/ryu/services/protocols/bgp/base.py +++ b/ryu/services/protocols/bgp/base.py @@ -15,12 +15,17 @@ """ Defines some base class related to managing green threads. """ +from __future__ import absolute_import + import abc +from collections import OrderedDict import logging +import six import socket import time import traceback import weakref + import netaddr from ryu.lib import hub @@ -38,12 +43,6 @@ from ryu.services.protocols.bgp.utils.evtlet import LoopingCall # Logger instance for this module. LOG = logging.getLogger('bgpspeaker.base') - -try: - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict - # Pointer to active/available OrderedDict. OrderedDict = OrderedDict @@ -127,6 +126,7 @@ class ActivityException(BGPSException): pass +@six.add_metaclass(abc.ABCMeta) class Activity(object): """Base class for a thread of execution that provides some custom settings. @@ -135,7 +135,6 @@ class Activity(object): to start another activity or greenthread. Activity is also holds pointers to sockets that it or its child activities of threads have create. """ - __metaclass__ = abc.ABCMeta def __init__(self, name=None): self._name = name @@ -367,7 +366,9 @@ class Activity(object): sock.bind(sa) sock.listen(50) listen_sockets[sa] = sock - except socket.error: + except socket.error as e: + LOG.error('Error creating socket: %s', e) + if sock: sock.close() @@ -454,7 +455,7 @@ class Sink(object): self.index = Sink.next_index() # Event used to signal enqueing. - from utils.evtlet import EventletIOFactory + from .utils.evtlet import EventletIOFactory self.outgoing_msg_event = EventletIOFactory.create_custom_event() self.messages_queued = 0 diff --git a/ryu/services/protocols/bgp/bgpspeaker.py b/ryu/services/protocols/bgp/bgpspeaker.py index c94b5794..ce3eaedf 100644 --- a/ryu/services/protocols/bgp/bgpspeaker.py +++ b/ryu/services/protocols/bgp/bgpspeaker.py @@ -34,8 +34,6 @@ from ryu.services.protocols.bgp.rtconf.common \ import DEFAULT_REFRESH_MAX_EOR_TIME from ryu.services.protocols.bgp.rtconf.common \ import DEFAULT_REFRESH_STALEPATH_TIME -from ryu.services.protocols.bgp.rtconf.common \ - import DEFAULT_BGP_CONN_RETRY_TIME from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME @@ -57,8 +55,6 @@ from ryu.services.protocols.bgp.rtconf.neighbors \ from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD -from ryu.services.protocols.bgp.rtconf.neighbors import IN_FILTER -from ryu.services.protocols.bgp.rtconf.neighbors import OUT_FILTER from ryu.services.protocols.bgp.rtconf.neighbors import IS_ROUTE_SERVER_CLIENT from ryu.services.protocols.bgp.rtconf.neighbors import IS_NEXT_HOP_SELF from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE @@ -244,7 +240,8 @@ class BGPSpeaker(object): next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=False, is_next_hop_self=False, local_address=None, - local_port=None, connect_mode=DEFAULT_CONNECT_MODE): + local_port=None, local_as=None, + connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). @@ -265,11 +262,14 @@ class BGPSpeaker(object): ``enable_vpnv6`` enables VPNv6 address family for this neighbor. The default is False. + ``enable_enhanced_refresh`` enable Enhanced Route Refresh for this + neighbor. The default is False. + ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's - specified. By default, the MD5 authenticaiton is disabled. + specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value. The default is None and if not specified, MED value is @@ -284,17 +284,19 @@ class BGPSpeaker(object): ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. - ``connect_mode`` specifies how to connect to this neighbor. - CONNECT_MODE_ACTIVE tries to connect from us. - CONNECT_MODE_PASSIVE just listens and wait for the connection. - CONNECT_MODE_BOTH use both methods. - The default is CONNECT_MODE_BOTH - ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. + ``local_as`` specifies local AS number per-peer. + The default is the AS number of BGPSpeaker instance. + + ``connect_mode`` specifies how to connect to this neighbor. + CONNECT_MODE_ACTIVE tries to connect from us. + CONNECT_MODE_PASSIVE just listens and wait for the connection. + CONNECT_MODE_BOTH use both methods. + The default is CONNECT_MODE_BOTH. """ bgp_neighbor = {} bgp_neighbor[neighbors.IP_ADDRESS] = address @@ -332,6 +334,9 @@ class BGPSpeaker(object): if local_port: bgp_neighbor[LOCAL_PORT] = local_port + if local_as: + bgp_neighbor[LOCAL_AS] = local_as + call('neighbor.create', **bgp_neighbor) def neighbor_del(self, address): diff --git a/ryu/services/protocols/bgp/bmp.py b/ryu/services/protocols/bgp/bmp.py index 7d123dc9..eba642d9 100644 --- a/ryu/services/protocols/bgp/bmp.py +++ b/ryu/services/protocols/bgp/bmp.py @@ -17,15 +17,12 @@ from ryu.services.protocols.bgp.base import Activity from ryu.lib import hub from ryu.lib.packet import bmp from ryu.lib.packet import bgp -from ryu.services.protocols.bgp import constants as const import socket import logging from calendar import timegm from ryu.services.protocols.bgp.signals.emit import BgpSignalBus from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path from ryu.lib.packet.bgp import BGPUpdate -from ryu.lib.packet.bgp import BGPPathAttributeNextHop -from ryu.lib.packet.bgp import BGPPathAttributeMpReachNLRI from ryu.lib.packet.bgp import BGPPathAttributeMpUnreachNLRI LOG = logging.getLogger('bgpspeaker.bmp') @@ -82,9 +79,7 @@ class BMPClient(Activity): if not self._socket: return assert isinstance(msg, bmp.BMPMessage) - serialized_msg = msg.serialize() - - ret = self._socket.send(msg.serialize()) + self._socket.send(msg.serialize()) def on_adj_rib_in_changed(self, data): peer = data['peer'] diff --git a/ryu/services/protocols/bgp/core.py b/ryu/services/protocols/bgp/core.py index 3e0caa84..b9531407 100644 --- a/ryu/services/protocols/bgp/core.py +++ b/ryu/services/protocols/bgp/core.py @@ -472,7 +472,8 @@ class CoreService(Factory, Activity): if (host, port) in self.bmpclients: bmpclient = self.bmpclients[(host, port)] if bmpclient.started: - LOG.warn("bmpclient is already running for %s:%s", host, port) + LOG.warning("bmpclient is already running for %s:%s", + host, port) return False bmpclient = BMPClient(self, host, port) self.bmpclients[(host, port)] = bmpclient @@ -481,7 +482,7 @@ class CoreService(Factory, Activity): def stop_bmp(self, host, port): if (host, port) not in self.bmpclients: - LOG.warn("no bmpclient is running for %s:%s", host, port) + LOG.warning("no bmpclient is running for %s:%s", host, port) return False bmpclient = self.bmpclients[(host, port)] diff --git a/ryu/services/protocols/bgp/core_managers/__init__.py b/ryu/services/protocols/bgp/core_managers/__init__.py index 883de2d2..8b76bdc6 100644 --- a/ryu/services/protocols/bgp/core_managers/__init__.py +++ b/ryu/services/protocols/bgp/core_managers/__init__.py @@ -13,10 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import -from configuration_manager import ConfigurationManager -from import_map_manager import ImportMapManager -from peer_manager import PeerManager -from table_manager import TableCoreManager +from .configuration_manager import ConfigurationManager +from .import_map_manager import ImportMapManager +from .peer_manager import PeerManager +from .table_manager import TableCoreManager __all__ = ['ImportMapManager', 'TableCoreManager', 'PeerManager', 'ConfigurationManager'] diff --git a/ryu/services/protocols/bgp/core_managers/peer_manager.py b/ryu/services/protocols/bgp/core_managers/peer_manager.py index 9e8543d7..ce881d8f 100644 --- a/ryu/services/protocols/bgp/core_managers/peer_manager.py +++ b/ryu/services/protocols/bgp/core_managers/peer_manager.py @@ -7,10 +7,6 @@ from ryu.services.protocols.bgp.peer import Peer from ryu.lib.packet.bgp import BGPPathAttributeCommunities from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC from ryu.lib.packet.bgp import BGP_ATTR_TYPE_COMMUNITIES -from ryu.lib.packet.bgp import RF_IPv4_UC -from ryu.lib.packet.bgp import RF_IPv6_UC -from ryu.lib.packet.bgp import RF_IPv4_VPN -from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import RouteTargetMembershipNLRI from ryu.services.protocols.bgp.utils.bgp \ diff --git a/ryu/services/protocols/bgp/core_managers/table_manager.py b/ryu/services/protocols/bgp/core_managers/table_manager.py index fafbd2aa..7858fea8 100644 --- a/ryu/services/protocols/bgp/core_managers/table_manager.py +++ b/ryu/services/protocols/bgp/core_managers/table_manager.py @@ -445,8 +445,8 @@ class TableCoreManager(object): # of the given path and import this path into them. route_dist = vpn_path.nlri.route_dist for vrf_table in interested_tables: - if not (vpn_path.source is None - and route_dist == vrf_table.vrf_conf.route_dist): + if (vpn_path.source is not None and + route_dist != vrf_table.vrf_conf.route_dist): update_vrf_dest = vrf_table.import_vpn_path(vpn_path) # Queue the destination for further processing. if update_vrf_dest is not None: diff --git a/ryu/services/protocols/bgp/info_base/base.py b/ryu/services/protocols/bgp/info_base/base.py index 7eb9c48d..54e02bb5 100644 --- a/ryu/services/protocols/bgp/info_base/base.py +++ b/ryu/services/protocols/bgp/info_base/base.py @@ -23,7 +23,9 @@ from abc import ABCMeta from abc import abstractmethod from copy import copy import logging +import functools import netaddr +import six from ryu.lib.packet.bgp import RF_IPv4_UC from ryu.lib.packet.bgp import RouteTargetMembershipNLRI @@ -42,6 +44,7 @@ from ryu.services.protocols.bgp.processor import BPR_UNKNOWN LOG = logging.getLogger('bgpspeaker.info_base.base') +@six.add_metaclass(ABCMeta) class Table(object): """A container for holding information about destination/prefixes. @@ -49,7 +52,6 @@ class Table(object): This is a base class which should be sub-classed for different route family. A table can be uniquely identified by (Route Family, Scope Id). """ - __metaclass__ = abc.ABCMeta ROUTE_FAMILY = RF_IPv4_UC def __init__(self, scope_id, core_service, signal_bus): @@ -81,9 +83,6 @@ class Table(object): raise NotImplementedError() def values(self): - return self._destinations.values() - - def itervalues(self): return iter(self._destinations.values()) def insert(self, path): @@ -225,6 +224,10 @@ class NonVrfPathProcessingMixin(object): because they are processed at VRF level, so different logic applies. """ + def __init__(self): + self._core_service = None # not assigned yet + self._known_path_list = [] + def _best_path_lost(self): self._best_path = None @@ -249,8 +252,9 @@ class NonVrfPathProcessingMixin(object): LOG.debug('New best path selected for destination %s', self) # If old best path was withdrawn - if (old_best_path and old_best_path not in self._known_path_list - and self._sent_routes): + if (old_best_path and + old_best_path not in self._known_path_list and + self._sent_routes): # Have to clear sent_route list for this destination as # best path is removed. self._sent_routes = {} @@ -272,6 +276,7 @@ class NonVrfPathProcessingMixin(object): self._sent_routes = {} +@six.add_metaclass(ABCMeta) class Destination(object): """State about a particular destination. @@ -279,7 +284,6 @@ class Destination(object): a routing information base table *Table*. """ - __metaclass__ = abc.ABCMeta ROUTE_FAMILY = RF_IPv4_UC def __init__(self, table, nlri): @@ -662,13 +666,13 @@ class Destination(object): return result +@six.add_metaclass(ABCMeta) class Path(object): """Represents a way of reaching an IP destination. Also contains other meta-data given to us by a specific source (such as a peer). """ - __metaclass__ = ABCMeta __slots__ = ('_source', '_path_attr_map', '_nlri', '_source_version_num', '_exported_from', '_nexthop', 'next_path', 'prev_path', '_is_withdraw', 'med_set_by_target_neighbor') @@ -810,7 +814,7 @@ class Path(object): return not interested_rts.isdisjoint(curr_rts) def is_local(self): - return self._source == None + return self._source is None def has_nexthop(self): return not (not self._nexthop or self._nexthop == '0.0.0.0' or @@ -832,6 +836,7 @@ class Path(object): self._path_attr_map, self._nexthop, self._is_withdraw)) +@six.add_metaclass(ABCMeta) class Filter(object): """Represents a general filter for in-bound and out-bound filter @@ -842,7 +847,6 @@ class Filter(object): ================ ================================================== """ - __metaclass__ = ABCMeta ROUTE_FAMILY = RF_IPv4_UC @@ -880,6 +884,7 @@ class Filter(object): raise NotImplementedError() +@functools.total_ordering class PrefixFilter(Filter): """ used to specify a prefix for filter. @@ -934,8 +939,11 @@ class PrefixFilter(Filter): self._ge = ge self._le = le - def __cmp__(self, other): - return cmp(self.prefix, other.prefix) + def __lt__(self, other): + return self._network < other._network + + def __eq__(self, other): + return self._network == other._network def __repr__(self): policy = 'PERMIT' \ @@ -1009,6 +1017,7 @@ class PrefixFilter(Filter): le=self._le) +@functools.total_ordering class ASPathFilter(Filter): """ used to specify a prefix for AS_PATH attribute. @@ -1055,8 +1064,11 @@ class ASPathFilter(Filter): super(ASPathFilter, self).__init__(policy) self._as_number = as_number - def __cmp__(self, other): - return cmp(self.as_number, other.as_number) + def __lt__(self, other): + return self.as_number < other.as_number + + def __eq__(self, other): + return self.as_number == other.as_number def __repr__(self): policy = 'TOP' @@ -1223,5 +1235,8 @@ class AttributeMap(object): if self.attr_type == self.ATTR_LOCAL_PREF else None filter_string = ','.join(repr(f) for f in self.filters) - return 'AttributeMap(filters=[%s],attribute_type=%s,attribute_value=%s)'\ - % (filter_string, attr_type, self.attr_value) + return ('AttributeMap(filters=[%s],' + 'attribute_type=%s,' + 'attribute_value=%s)' % (filter_string, + attr_type, + self.attr_value)) diff --git a/ryu/services/protocols/bgp/info_base/vpn.py b/ryu/services/protocols/bgp/info_base/vpn.py index e5cdef4d..0f591070 100644 --- a/ryu/services/protocols/bgp/info_base/vpn.py +++ b/ryu/services/protocols/bgp/info_base/vpn.py @@ -19,6 +19,7 @@ import abc import logging +import six from ryu.services.protocols.bgp.info_base.base import Destination from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin @@ -55,8 +56,8 @@ class VpnTable(Table): ) +@six.add_metaclass(abc.ABCMeta) class VpnPath(Path): - __metaclass__ = abc.ABCMeta ROUTE_FAMILY = None VRF_PATH_CLASS = None NLRI_CLASS = None @@ -82,11 +83,10 @@ class VpnPath(Path): return vrf_path +@six.add_metaclass(abc.ABCMeta) class VpnDest(Destination, NonVrfPathProcessingMixin): """Base class for VPN destinations.""" - __metaclass__ = abc.ABCMeta - def _best_path_lost(self): old_best_path = self._best_path NonVrfPathProcessingMixin._best_path_lost(self) diff --git a/ryu/services/protocols/bgp/info_base/vrf.py b/ryu/services/protocols/bgp/info_base/vrf.py index fe6c3d65..c3f6603c 100644 --- a/ryu/services/protocols/bgp/info_base/vrf.py +++ b/ryu/services/protocols/bgp/info_base/vrf.py @@ -19,6 +19,7 @@ import abc import logging +import six from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH @@ -44,12 +45,12 @@ from ryu.services.protocols.bgp.utils.stats import RESOURCE_NAME LOG = logging.getLogger('bgpspeaker.info_base.vrf') +@six.add_metaclass(abc.ABCMeta) class VrfTable(Table): """Virtual Routing and Forwarding information base. Keeps destination imported to given vrf in represents. """ - __metaclass__ = abc.ABCMeta ROUTE_FAMILY = None VPN_ROUTE_FAMILY = None NLRI_CLASS = None @@ -104,8 +105,8 @@ class VrfTable(Table): local_route_count = 0 for dest in self.values(): for path in dest.known_path_list: - if (hasattr(path.source, 'version_num') - or path.source == VPN_TABLE): + if (hasattr(path.source, 'version_num') or + path.source == VPN_TABLE): remote_route_count += 1 else: local_route_count += 1 @@ -273,9 +274,9 @@ class VrfTable(Table): return super(VrfTable, self).clean_uninteresting_paths(interested_rts) +@six.add_metaclass(abc.ABCMeta) class VrfDest(Destination): """Base class for VRF destination.""" - __metaclass__ = abc.ABCMeta def __init__(self, table, nlri): super(VrfDest, self).__init__(table, nlri) @@ -424,11 +425,11 @@ class VrfDest(Destination): 'with attribute label_list got %s' % path) +@six.add_metaclass(abc.ABCMeta) class VrfPath(Path): """Represents a way of reaching an IP destination with a VPN. """ __slots__ = ('_label_list', '_puid') - __metaclass__ = abc.ABCMeta ROUTE_FAMILY = None VPN_PATH_CLASS = None diff --git a/ryu/services/protocols/bgp/operator/commands/show/rib.py b/ryu/services/protocols/bgp/operator/commands/show/rib.py index 440234ce..27d5b73c 100644 --- a/ryu/services/protocols/bgp/operator/commands/show/rib.py +++ b/ryu/services/protocols/bgp/operator/commands/show/rib.py @@ -1,4 +1,6 @@ -from route_formatter_mixin import RouteFormatterMixin +from __future__ import absolute_import + +from .route_formatter_mixin import RouteFormatterMixin from ryu.services.protocols.bgp.operator.command import Command from ryu.services.protocols.bgp.operator.command import CommandsResponse diff --git a/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py b/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py index c20c97c9..e29c7c7f 100644 --- a/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py +++ b/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py @@ -17,12 +17,7 @@ class RouteFormatterMixin(object): @classmethod def _format_family(cls, dest_list): - if six.PY3: - import io - msg = io.StringIO() - else: - import StringIO - msg = StringIO.StringIO() + msg = six.StringIO() def _append_path_info(buff, path, is_best, show_prefix): aspath = path.get('aspath') diff --git a/ryu/services/protocols/bgp/operator/commands/show/vrf.py b/ryu/services/protocols/bgp/operator/commands/show/vrf.py index b78a7cf5..8730665c 100644 --- a/ryu/services/protocols/bgp/operator/commands/show/vrf.py +++ b/ryu/services/protocols/bgp/operator/commands/show/vrf.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import logging import pprint @@ -9,7 +11,7 @@ from ryu.services.protocols.bgp.operator.commands.responses import \ WrongParamResp from ryu.services.protocols.bgp.operator.views.conf import ConfDetailView from ryu.services.protocols.bgp.operator.views.conf import ConfDictView -from route_formatter_mixin import RouteFormatterMixin +from .route_formatter_mixin import RouteFormatterMixin LOG = logging.getLogger('bgpspeaker.operator.commands.show.vrf') @@ -77,6 +79,8 @@ class Routes(Command, RouteFormatterMixin): class CountRoutesMixin(object): + api = None # not assigned yet + def _count_routes(self, vrf_name, vrf_rf): return len(self.api.get_single_vrf_routes(vrf_name, vrf_rf)) diff --git a/ryu/services/protocols/bgp/operator/ssh.py b/ryu/services/protocols/bgp/operator/ssh.py index d4e0bf70..8a584db9 100644 --- a/ryu/services/protocols/bgp/operator/ssh.py +++ b/ryu/services/protocols/bgp/operator/ssh.py @@ -22,14 +22,6 @@ import sys from copy import copy import os.path -CONF = { - "ssh_port": 4990, - "ssh_host": "localhost", - "ssh_hostkey": None, - "ssh_username": "ryu", - "ssh_password": "ryu", -} - from ryu.lib import hub from ryu import version from ryu.services.protocols.bgp.operator.command import Command @@ -39,6 +31,14 @@ from ryu.services.protocols.bgp.operator.internal_api import InternalApi from ryu.services.protocols.bgp.operator.command import STATUS_OK from ryu.services.protocols.bgp.base import Activity +CONF = { + "ssh_port": 4990, + "ssh_host": "localhost", + "ssh_hostkey": None, + "ssh_username": "ryu", + "ssh_password": "ryu", +} + LOG = logging.getLogger('bgpspeaker.cli') @@ -83,7 +83,7 @@ Hello, this is Ryu BGP speaker (version %s). def _find_ssh_server_key(self): if CONF["ssh_hostkey"]: - return paramiko.RSAKey.from_private_key_file(ssh_hostkey) + return paramiko.RSAKey.from_private_key_file(CONF['ssh_hostkey']) elif os.path.exists("/etc/ssh_host_rsa_key"): # OSX return paramiko.RSAKey.from_private_key_file( diff --git a/ryu/services/protocols/bgp/operator/views/fields.py b/ryu/services/protocols/bgp/operator/views/fields.py index ad219549..bb6dfef9 100644 --- a/ryu/services/protocols/bgp/operator/views/fields.py +++ b/ryu/services/protocols/bgp/operator/views/fields.py @@ -1,6 +1,8 @@ import importlib import inspect +import six + class Field(object): def __init__(self, field_name): @@ -19,7 +21,7 @@ class RelatedViewField(Field): def _operator_view_class(self): if inspect.isclass(self.__operator_view_class): return self.__operator_view_class - elif isinstance(self.__operator_view_class, basestring): + elif isinstance(self.__operator_view_class, six.string_types): try: module_name, class_name =\ self.__operator_view_class.rsplit('.', 1) diff --git a/ryu/services/protocols/bgp/peer.py b/ryu/services/protocols/bgp/peer.py index f95836ce..53582b3a 100644 --- a/ryu/services/protocols/bgp/peer.py +++ b/ryu/services/protocols/bgp/peer.py @@ -22,7 +22,6 @@ import time import traceback from ryu.services.protocols.bgp.base import Activity -from ryu.services.protocols.bgp.base import OrderedDict from ryu.services.protocols.bgp.base import Sink from ryu.services.protocols.bgp.base import Source from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF @@ -49,7 +48,6 @@ from ryu.lib.packet import bgp from ryu.lib.packet.bgp import RouteFamily from ryu.lib.packet.bgp import RF_IPv4_UC -from ryu.lib.packet.bgp import RF_IPv6_UC from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_RTC_UC @@ -391,6 +389,10 @@ class Peer(Source, Sink, NeighborConfListener, Activity): def med(self): return self._neigh_conf.multi_exit_disc + @property + def local_as(self): + return self._neigh_conf.local_as + @property def in_filters(self): return self._in_filters @@ -698,7 +700,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity): # Collect update statistics. self.state.incr(PeerCounterNames.SENT_UPDATES) else: - LOG.debug('prefix : %s is not sent by filter : %s', path.nlri, blocked_cause) + LOG.debug('prefix : %s is not sent by filter : %s', + path.nlri, blocked_cause) # We have to create sent_route for every OutgoingRoute which is # not a withdraw or was for route-refresh msg. @@ -919,9 +922,9 @@ class Peer(Source, Sink, NeighborConfListener, Activity): if (len(path_seg_list) > 0 and isinstance(path_seg_list[0], list) and len(path_seg_list[0]) < 255): - path_seg_list[0].insert(0, self._core_service.asn) + path_seg_list[0].insert(0, self.local_as) else: - path_seg_list.insert(0, [self._core_service.asn]) + path_seg_list.insert(0, [self.local_as]) aspath_attr = BGPPathAttributeAsPath(path_seg_list) # MULTI_EXIT_DISC Attribute. @@ -1035,7 +1038,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity): if self._neigh_conf.enabled: self._connect_retry_event.set() - while 1: + while True: self._connect_retry_event.wait() # Reconnecting immediately after closing connection may be not very @@ -1065,11 +1068,11 @@ class Peer(Source, Sink, NeighborConfListener, Activity): tcp_conn_timeout = self._common_conf.tcp_conn_timeout try: password = self._neigh_conf.password - sock = self._connect_tcp(peer_address, - client_factory, - time_out=tcp_conn_timeout, - bind_address=bind_addr, - password=password) + self._connect_tcp(peer_address, + client_factory, + time_out=tcp_conn_timeout, + bind_address=bind_addr, + password=password) except socket.error: self.state.bgp_state = const.BGP_FSM_ACTIVE if LOG.isEnabledFor(logging.DEBUG): @@ -1187,7 +1190,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity): Current setting include capabilities, timers and ids. """ - asnum = self._common_conf.local_as + asnum = self.local_as bgpid = self._common_conf.router_id holdtime = self._neigh_conf.hold_time @@ -1370,7 +1373,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity): aspath = umsg_pattrs.get(BGP_ATTR_TYPE_AS_PATH) # Check if AS_PATH has loops. - if aspath.has_local_as(self._common_conf.local_as): + if aspath.has_local_as(self.local_as): LOG.error('Update message AS_PATH has loops. Ignoring this' ' UPDATE. %s', update_msg) return @@ -1405,7 +1408,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity): tm = self._core_service.table_manager tm.learn_path(new_path) else: - LOG.debug('prefix : %s is blocked by in-bound filter: %s', msg_nlri, blocked_cause) + LOG.debug('prefix : %s is blocked by in-bound filter: %s', + msg_nlri, blocked_cause) # If update message had any qualifying new paths, do some book-keeping. if msg_nlri_list: @@ -1467,7 +1471,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity): tm = self._core_service.table_manager tm.learn_path(w_path) else: - LOG.debug('prefix : %s is blocked by in-bound filter: %s', nlri_str, blocked_cause) + LOG.debug('prefix : %s is blocked by in-bound filter: %s', + nlri_str, blocked_cause) def _extract_and_handle_mpbgp_new_paths(self, update_msg): """Extracts new paths advertised in the given update message's @@ -1496,7 +1501,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity): aspath = umsg_pattrs.get(BGP_ATTR_TYPE_AS_PATH) # Check if AS_PATH has loops. - if aspath.has_local_as(self._common_conf.local_as): + if aspath.has_local_as(self.local_as): LOG.error('Update message AS_PATH has loops. Ignoring this' ' UPDATE. %s', update_msg) return @@ -1562,7 +1567,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity): tm = self._core_service.table_manager tm.learn_path(new_path) else: - LOG.debug('prefix : %s is blocked by in-bound filter: %s', msg_nlri, blocked_cause) + LOG.debug('prefix : %s is blocked by in-bound filter: %s', + msg_nlri, blocked_cause) # If update message had any qualifying new paths, do some book-keeping. if msg_nlri_list: @@ -1623,7 +1629,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity): tm = self._core_service.table_manager tm.learn_path(w_path) else: - LOG.debug('prefix : %s is blocked by in-bound filter: %s', w_nlri, blocked_cause) + LOG.debug('prefix : %s is blocked by in-bound filter: %s', + w_nlri, blocked_cause) def _handle_eor(self, route_family): """Currently we only handle EOR for RTC address-family. diff --git a/ryu/services/protocols/bgp/protocol.py b/ryu/services/protocols/bgp/protocol.py index 43f45347..79283059 100644 --- a/ryu/services/protocols/bgp/protocol.py +++ b/ryu/services/protocols/bgp/protocol.py @@ -19,8 +19,10 @@ from abc import ABCMeta from abc import abstractmethod +import six +@six.add_metaclass(ABCMeta) class Protocol(object): """Interface for various protocols. @@ -31,7 +33,6 @@ class Protocol(object): facilitate or provide hooks to sub-classes to override behavior as appropriate. """ - __metaclass__ = ABCMeta @abstractmethod def data_received(self, data): @@ -62,12 +63,12 @@ class Protocol(object): pass +@six.add_metaclass(ABCMeta) class Factory(object): """This is a factory which produces protocols. Can also act as context for protocols. """ - __metaclass__ = ABCMeta # Put a subclass of Protocol here: protocol = None diff --git a/ryu/services/protocols/bgp/rtconf/base.py b/ryu/services/protocols/bgp/rtconf/base.py index ee8948a9..c61798dc 100644 --- a/ryu/services/protocols/bgp/rtconf/base.py +++ b/ryu/services/protocols/bgp/rtconf/base.py @@ -18,6 +18,7 @@ """ from abc import ABCMeta from abc import abstractmethod +import functools import numbers import logging import six @@ -142,13 +143,13 @@ class ConfigValueError(RuntimeConfigError): # Configuration base classes. # ============================================================================= +@six.add_metaclass(ABCMeta) class BaseConf(object): """Base class for a set of configuration values. Configurations can be required or optional. Also acts as a container of configuration change listeners. """ - __metaclass__ = ABCMeta def __init__(self, **kwargs): self._req_settings = self.get_req_settings() @@ -425,9 +426,9 @@ class ConfWithStats(BaseConf): **kwargs) +@six.add_metaclass(ABCMeta) class BaseConfListener(object): """Base class of all configuration listeners.""" - __metaclass__ = ABCMeta def __init__(self, base_conf): pass @@ -479,6 +480,7 @@ class ConfWithStatsListener(BaseConfListener): raise NotImplementedError() +@functools.total_ordering class ConfEvent(object): """Encapsulates configuration settings change/update event.""" @@ -517,9 +519,13 @@ class ConfEvent(object): return ('ConfEvent(src=%s, name=%s, value=%s)' % (self.src, self.name, self.value)) - def __cmp__(self, other): - return cmp((other.src, other.name, other.value), - (self.src, self.name, self.value)) + def __lt__(self, other): + return ((self.src, self.name, self.value) < + (other.src, other.name, other.value)) + + def __eq__(self, other): + return ((self.src, self.name, self.value) == + (other.src, other.name, other.value)) # ============================================================================= @@ -598,10 +604,10 @@ def validate_cap_mbgp_ipv4(cmv4): @validate(name=CAP_MBGP_IPV6) -def validate_cap_mbgp_ipv4(cmv6): +def validate_cap_mbgp_ipv6(cmv6): if cmv6 not in (True, False): raise ConfigTypeError(desc='Invalid Enhanced Refresh capability ' - 'settings: %s boolean value expected' % cmv4) + 'settings: %s boolean value expected' % cmv6) return cmv6 diff --git a/ryu/services/protocols/bgp/rtconf/common.py b/ryu/services/protocols/bgp/rtconf/common.py index cfef8419..d285bb6d 100644 --- a/ryu/services/protocols/bgp/rtconf/common.py +++ b/ryu/services/protocols/bgp/rtconf/common.py @@ -129,10 +129,11 @@ def validate_refresh_max_eor_time(rmet): @validate(name=LABEL_RANGE) def validate_label_range(label_range): min_label, max_label = label_range - if (not min_label or not max_label - or not isinstance(min_label, numbers.Integral) - or not isinstance(max_label, numbers.Integral) or min_label < 17 - or min_label >= max_label): + if (not min_label or + not max_label or + not isinstance(min_label, numbers.Integral) or + not isinstance(max_label, numbers.Integral) or min_label < 17 or + min_label >= max_label): raise ConfigValueError(desc=('Invalid label_range configuration value:' ' (%s).' % label_range)) diff --git a/ryu/services/protocols/bgp/rtconf/neighbors.py b/ryu/services/protocols/bgp/rtconf/neighbors.py index 5efe383c..a12af816 100644 --- a/ryu/services/protocols/bgp/rtconf/neighbors.py +++ b/ryu/services/protocols/bgp/rtconf/neighbors.py @@ -60,7 +60,6 @@ from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS from ryu.services.protocols.bgp.rtconf.base import validate from ryu.services.protocols.bgp.rtconf.base import validate_med from ryu.services.protocols.bgp.rtconf.base import validate_soo_list -from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4 from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn from ryu.services.protocols.bgp.info_base.base import Filter from ryu.services.protocols.bgp.info_base.base import PrefixFilter @@ -75,6 +74,7 @@ ENABLED = 'enabled' CHANGES = 'changes' LOCAL_ADDRESS = 'local_address' LOCAL_PORT = 'local_port' +LOCAL_AS = 'local_as' PEER_NEXT_HOP = 'peer_next_hop' PASSWORD = 'password' IN_FILTER = 'in_filter' @@ -300,7 +300,7 @@ class NeighborConf(ConfWithId, ConfWithStats): CAP_RTC, RTC_AS, HOLD_TIME, ENABLED, MULTI_EXIT_DISC, MAX_PREFIXES, ADVERTISE_PEER_AS, SITE_OF_ORIGINS, - LOCAL_ADDRESS, LOCAL_PORT, + LOCAL_ADDRESS, LOCAL_PORT, LOCAL_AS, PEER_NEXT_HOP, PASSWORD, IN_FILTER, OUT_FILTER, IS_ROUTE_SERVER_CLIENT, CHECK_FIRST_AS, @@ -367,6 +367,13 @@ class NeighborConf(ConfWithId, ConfWithStats): self._settings[LOCAL_PORT] = compute_optional_conf( LOCAL_PORT, None, **kwargs) + # We use the global defined local (router) AS as the default + # local AS. + from ryu.services.protocols.bgp.core_manager import CORE_MANAGER + g_local_as = CORE_MANAGER.common_conf.local_as + self._settings[LOCAL_AS] = compute_optional_conf( + LOCAL_AS, g_local_as, **kwargs) + self._settings[PEER_NEXT_HOP] = compute_optional_conf( PEER_NEXT_HOP, None, **kwargs) @@ -374,14 +381,11 @@ class NeighborConf(ConfWithId, ConfWithStats): PASSWORD, None, **kwargs) # RTC configurations. - self._settings[CAP_RTC] = \ - compute_optional_conf(CAP_RTC, DEFAULT_CAP_RTC, **kwargs) + self._settings[CAP_RTC] = compute_optional_conf( + CAP_RTC, DEFAULT_CAP_RTC, **kwargs) # Default RTC_AS is local (router) AS. - from ryu.services.protocols.bgp.core_manager import \ - CORE_MANAGER - default_rt_as = CORE_MANAGER.common_conf.local_as - self._settings[RTC_AS] = \ - compute_optional_conf(RTC_AS, default_rt_as, **kwargs) + self._settings[RTC_AS] = compute_optional_conf( + RTC_AS, g_local_as, **kwargs) # Since ConfWithId' default values use str(self) and repr(self), we # call super method after we have initialized other settings. @@ -437,6 +441,10 @@ class NeighborConf(ConfWithId, ConfWithStats): # Optional attributes with valid defaults. # ========================================================================= + @property + def local_as(self): + return self._settings[LOCAL_AS] + @property def hold_time(self): return self._settings[HOLD_TIME] diff --git a/ryu/services/protocols/bgp/rtconf/vrfs.py b/ryu/services/protocols/bgp/rtconf/vrfs.py index ea9c695c..ecf6463c 100644 --- a/ryu/services/protocols/bgp/rtconf/vrfs.py +++ b/ryu/services/protocols/bgp/rtconf/vrfs.py @@ -22,7 +22,6 @@ import logging from ryu.lib.packet.bgp import RF_IPv4_UC from ryu.lib.packet.bgp import RF_IPv6_UC -from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities from ryu.services.protocols.bgp.utils import validation from ryu.services.protocols.bgp.base import get_validator diff --git a/ryu/services/protocols/bgp/signals/__init__.py b/ryu/services/protocols/bgp/signals/__init__.py index 0b69c99d..27d60d48 100644 --- a/ryu/services/protocols/bgp/signals/__init__.py +++ b/ryu/services/protocols/bgp/signals/__init__.py @@ -1,5 +1,4 @@ -__author__ = 'yak' - from ryu.services.protocols.bgp.signals.base import SignalBus __all__ = [SignalBus] +__author__ = 'yak' diff --git a/ryu/services/protocols/bgp/speaker.py b/ryu/services/protocols/bgp/speaker.py index 1e92166b..31553b27 100644 --- a/ryu/services/protocols/bgp/speaker.py +++ b/ryu/services/protocols/bgp/speaker.py @@ -24,14 +24,11 @@ from socket import IPPROTO_TCP, TCP_NODELAY from eventlet import semaphore from ryu.lib.packet import bgp -from ryu.lib.packet.bgp import RouteFamily -from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import BGPMessage from ryu.lib.packet.bgp import BGPOpen from ryu.lib.packet.bgp import BGPUpdate from ryu.lib.packet.bgp import BGPKeepAlive from ryu.lib.packet.bgp import BGPNotification -from ryu.lib.packet.bgp import BGPRouteRefresh from ryu.lib.packet.bgp import BGP_MSG_OPEN from ryu.lib.packet.bgp import BGP_MSG_UPDATE from ryu.lib.packet.bgp import BGP_MSG_KEEPALIVE @@ -39,7 +36,6 @@ from ryu.lib.packet.bgp import BGP_MSG_NOTIFICATION from ryu.lib.packet.bgp import BGP_MSG_ROUTE_REFRESH from ryu.lib.packet.bgp import BGP_CAP_ENHANCED_ROUTE_REFRESH from ryu.lib.packet.bgp import BGP_CAP_MULTIPROTOCOL -from ryu.lib.packet.bgp import BGP_CAP_ROUTE_REFRESH from ryu.lib.packet.bgp import BGP_ERROR_HOLD_TIMER_EXPIRED from ryu.lib.packet.bgp import BGP_ERROR_SUB_HOLD_TIMER_EXPIRED from ryu.lib.packet.bgp import get_rf @@ -105,7 +101,7 @@ class BgpProtocol(Protocol, Activity): Activity.__init__(self, name=activity_name) # Intialize instance variables. self._peer = None - self._recv_buff = '' + self._recv_buff = b'' self._socket = socket self._socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) self._sendlock = semaphore.Semaphore() @@ -143,7 +139,7 @@ class BgpProtocol(Protocol, Activity): '`BgpProtocol`') # Compare protocol connection end point's addresses - if (self._remotename[0] == other_protoco._remotename[0] and + if (self._remotename[0] == other_protocol._remotename[0] and self._localname[0] == other_protocol._localname[0]): return True @@ -324,25 +320,23 @@ class BgpProtocol(Protocol, Activity): raise bgp.NotSync() # Check if we have valid bgp message length. - check = lambda: length < BGP_MIN_MSG_LEN\ - or length > BGP_MAX_MSG_LEN + check = (length < BGP_MIN_MSG_LEN or length > BGP_MAX_MSG_LEN) # RFC says: The minimum length of the OPEN message is 29 # octets (including the message header). - check2 = lambda: ptype == BGP_MSG_OPEN\ - and length < BGPOpen._MIN_LEN + check2 = (ptype == BGP_MSG_OPEN and length < BGPOpen._MIN_LEN) # RFC says: A KEEPALIVE message consists of only the # message header and has a length of 19 octets. - check3 = lambda: ptype == BGP_MSG_KEEPALIVE\ - and length != BGPKeepAlive._MIN_LEN + check3 = (ptype == BGP_MSG_KEEPALIVE and + length != BGPKeepAlive._MIN_LEN) # RFC says: The minimum length of the UPDATE message is 23 # octets. - check4 = lambda: ptype == BGP_MSG_UPDATE\ - and length < BGPUpdate._MIN_LEN + check4 = (ptype == BGP_MSG_UPDATE and + length < BGPUpdate._MIN_LEN) - if check() or check2() or check3() or check4(): + if any((check, check2, check3, check4)): raise bgp.BadLen(ptype, length) # If we have partial message we wait for rest of the message. @@ -380,7 +374,7 @@ class BgpProtocol(Protocol, Activity): self._sendlock.acquire() try: self._socket.sendall(msg.serialize()) - except socket.error as err: + except socket.error: self.connection_lost('failed to write to socket') finally: self._sendlock.release() diff --git a/ryu/services/protocols/bgp/utils/circlist.py b/ryu/services/protocols/bgp/utils/circlist.py index d22ec215..4a04f4fb 100644 --- a/ryu/services/protocols/bgp/utils/circlist.py +++ b/ryu/services/protocols/bgp/utils/circlist.py @@ -13,7 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six.moves import intern +import six +if six.PY3: + from sys import intern class CircularListType(object): diff --git a/ryu/services/protocols/bgp/utils/dictconfig.py b/ryu/services/protocols/bgp/utils/dictconfig.py index e21e8eed..22f9afcf 100644 --- a/ryu/services/protocols/bgp/utils/dictconfig.py +++ b/ryu/services/protocols/bgp/utils/dictconfig.py @@ -234,7 +234,7 @@ class BaseConfigurator(object): isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self - elif isinstance(value, basestring): # str for py3k + elif isinstance(value, six.string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() diff --git a/ryu/services/protocols/bgp/utils/internable.py b/ryu/services/protocols/bgp/utils/internable.py deleted file mode 100644 index 9f5e8d95..00000000 --- a/ryu/services/protocols/bgp/utils/internable.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import weakref -from six.moves import intern - -dict_name = intern('_internable_dict') - - -# -# Internable -# -class Internable(object): - """Class that allows instances to be 'interned'. That is, given an - instance of this class, one can obtain a canonical (interned) - copy. - - This saves memory when there are likely to be many identical - instances of the class -- users hold references to a single - interned object instead of references to different objects that - are identical. - - The interned version of a given instance is created on demand if - necessary, and automatically cleaned up when nobody holds a - reference to it. - - Instances of sub-classes must be usable as dictionary keys for - Internable to work. - """ - - class Stats(object): - - def __init__(self): - self.d = {} - - def incr(self, name): - self.d[name] = self.d.get(name, 0) + 1 - - def __repr__(self): - return repr(self.d) - - def __str__(self): - return str(self.d) - - @classmethod - def _internable_init(kls): - # Objects to be interned are held as keys in a dictionary that - # only holds weak references to keys. As a result, when the - # last reference to an interned object goes away, the object - # will be removed from the dictionary. - kls._internable_dict = weakref.WeakKeyDictionary() - kls._internable_stats = Internable.Stats() - - @classmethod - def intern_stats(kls): - return kls._internable_stats - - def intern(self): - """Returns either itself or a canonical copy of itself.""" - - # If this is an interned object, return it - if hasattr(self, '_interned'): - return self._internable_stats.incr('self') - - # - # Got to find or create an interned object identical to this - # one. Auto-initialize the class if need be. - # - kls = self.__class__ - - if not hasattr(kls, dict_name): - kls._internable_init() - - obj = kls._internable_dict.get(self) - if (obj): - # Found an interned copy. - kls._internable_stats.incr('found') - return obj - - # Create an interned copy. Take care to only keep a weak - # reference to the object itself. - def object_collected(obj): - kls._internable_stats.incr('collected') - # print("Object %s garbage collected" % obj) - pass - - ref = weakref.ref(self, object_collected) - kls._internable_dict[self] = ref - self._interned = True - kls._internable_stats.incr('inserted') - return self diff --git a/ryu/services/protocols/bgp/utils/logs.py b/ryu/services/protocols/bgp/utils/logs.py deleted file mode 100644 index aa07d5c1..00000000 --- a/ryu/services/protocols/bgp/utils/logs.py +++ /dev/null @@ -1,36 +0,0 @@ -import json -import logging -import six -import time - -from datetime import datetime - - -class ApgwFormatter(logging.Formatter): - LOG_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' - COMPONENT_NAME = 'BGPSpeaker' - - def format(self, record): - msg = { - 'component_name': self.COMPONENT_NAME, - 'timestamp': datetime.utcfromtimestamp( - time.time() - ).strftime(self.LOG_TIME_FORMAT), - 'msg': six.text_type(record.msg), - 'level': record.levelname - - } - - if hasattr(record, 'log_type'): - assert record.log_type in ('log', 'stats', 'state') - msg['log_type'] = record.log_type - else: - msg['log_type'] = 'log' - if hasattr(record, 'resource_id'): - msg['resource_id'] = record.resource_id - if hasattr(record, 'resource_name'): - msg['resource_name'] = record.resource_name - - record.msg = json.dumps(msg) - - return super(ApgwFormatter, self).format(record) diff --git a/ryu/services/protocols/bgp/utils/other.py b/ryu/services/protocols/bgp/utils/other.py deleted file mode 100644 index 94f849a8..00000000 --- a/ryu/services/protocols/bgp/utils/other.py +++ /dev/null @@ -1,11 +0,0 @@ -def bytes2hex(given_bytes): - return ''.join(["%02X " % ord(x) for x in given_bytes]).strip() - - -def hex2byte(given_hex): - given_hex = ''.join(given_hex.split()) - result = [] - for offset in range(0, len(given_hex), 2): - result.append(chr(int(given_hex[offset:offset + 2], 16))) - - return ''.join(result) diff --git a/ryu/services/protocols/ovsdb/api.py b/ryu/services/protocols/ovsdb/api.py index ea73cbf2..c1f04398 100644 --- a/ryu/services/protocols/ovsdb/api.py +++ b/ryu/services/protocols/ovsdb/api.py @@ -15,6 +15,34 @@ from ryu.lib import dpid as dpidlib from ryu.services.protocols.ovsdb import event as ovsdb_event +import six + +import uuid + + +def _get_table_row(table, attr_name, attr_value, tables): + sentinel = object() + + for row in tables[table].rows.values(): + if getattr(row, attr_name, sentinel) == attr_value: + return row + + +def _get_controller(tables, attr_val, attr_name='target'): + return _get_table_row('Controller', attr_name, attr_val, tables=tables) + + +def _get_bridge(tables, attr_val, attr_name='name'): + return _get_table_row('Bridge', attr_name, attr_val, tables=tables) + + +def _get_port(tables, attr_val, attr_name='name'): + return _get_table_row('Port', attr_name, attr_val, tables=tables) + + +def _get_iface(tables, attr_val, attr_name='name'): + return _get_table_row('Interface', attr_name, attr_val, tables=tables) + def match_row(manager, system_id, table, fn): def _match_row(tables): @@ -46,6 +74,30 @@ def row_by_name(manager, system_id, name, table='Bridge', fn=None): return matched_row +def rows_by_external_id(manager, system_id, key, value, + table='Bridge', fn=None): + matched_rows = match_rows(manager, system_id, table, + lambda r: (key in r.external_ids and + r.external_ids.get(key) == value)) + + if matched_rows and fn is not None: + return [fn(row) for row in matched_rows] + + return matched_rows + + +def rows_by_other_config(manager, system_id, key, value, + table='Bridge', fn=None): + matched_rows = match_rows(manager, system_id, table, + lambda r: (key in r.other_config and + r.other_config.get(key) == value)) + + if matched_rows and fn is not None: + return [fn(row) for row in matched_rows] + + return matched_rows + + def get_column_value(manager, table, record, column): """ Example : To get datapath_id from Bridge table @@ -69,6 +121,25 @@ def get_iface_by_name(manager, system_id, name, fn=None): return iface +def get_ifaces_by_external_id(manager, system_id, key, value, fn=None): + return rows_by_external_id(manager, system_id, key, value, + 'Interface', fn) + + +def get_ifaces_by_other_config(manager, system_id, key, value, fn=None): + return rows_by_other_config(manager, system_id, key, value, + 'Interface', fn) + + +def get_port_by_name(manager, system_id, name, fn=None): + port = row_by_name(manager, system_id, name, 'Port') + + if fn is not None: + return fn(port) + + return port + + def get_bridge_for_iface_name(manager, system_id, iface_name, fn=None): iface = row_by_name(manager, system_id, iface_name, 'Interface') port = match_row(manager, system_id, 'Port', @@ -125,8 +196,44 @@ def get_datapath_ids_for_systemd_id(manager, system_id): return reply.result -def get_bridges_by_system_id(manager, system_id): - return get_table(manager, system_id, 'Bridge').rows.values() +def get_system_id_for_datapath_id(manager, datapath_id): + def _get_dp_ids(tables): + bridges = tables.get('Bridge') + + if not bridges: + return None + + for bridge in bridges.rows.values(): + datapath_ids = [dpidlib.str_to_dpid(dp_id) + for dp_id in bridge.datapath_id] + + if datapath_id in datapath_ids: + openvswitch = tables['Open_vSwitch'].rows + + if openvswitch: + row = openvswitch.get(list(openvswitch.keys())[0]) + return row.external_ids.get('system-id') + + return None + + request = ovsdb_event.EventReadRequest(None, _get_dp_ids) + reply = manager.send_request(request) + + # NOTE(jkoelker) Bulk reads return a tuple of (system_id, result) + for result in reply.result: + if result[1]: + return result[0] + + return None + + +def get_bridges_by_system_id(manager, system_id, fn=None): + bridges = get_table(manager, system_id, 'Bridge').rows.values() + + if fn is not None: + return fn(bridges) + + return bridges def bridge_exists(manager, system_id, bridge_name): @@ -135,3 +242,196 @@ def bridge_exists(manager, system_id, bridge_name): def port_exists(manager, system_id, port_name): return bool(row_by_name(manager, system_id, port_name, 'Port')) + + +def set_external_id(manager, system_id, key, val, fn): + val = str(val) + + def _set_iface_external_id(tables, *_): + row = fn(tables) + + if not row: + return None + + external_ids = row.external_ids + external_ids[key] = val + row.external_ids = external_ids + + req = ovsdb_event.EventModifyRequest(system_id, _set_iface_external_id) + return manager.send_request(req) + + +def set_iface_external_id(manager, system_id, iface_name, key, val): + return set_external_id(manager, system_id, key, val, + lambda tables: _get_iface(tables, iface_name)) + + +def set_other_config(manager, system_id, key, val, fn): + val = str(val) + + def _set_iface_other_config(tables, *_): + row = fn(tables) + + if not row: + return None + + other_config = row.other_config + other_config[key] = val + row.other_config = other_config + + req = ovsdb_event.EventModifyRequest(system_id, _set_iface_other_config) + return manager.send_request(req) + + +def set_iface_other_config(manager, system_id, iface_name, key, val): + return set_other_config(manager, system_id, key, val, + lambda tables: _get_iface(tables, iface_name)) + + +def del_external_id(manager, system_id, key, fn): + def _del_iface_external_id(tables, *_): + row = fn(tables) + + if not row: + return None + + external_ids = row.external_ids + if key in external_ids: + external_ids.pop(key) + row.external_ids = external_ids + + req = ovsdb_event.EventModifyRequest(system_id, _del_iface_external_id) + return manager.send_request(req) + + +def del_iface_external_id(manager, system_id, iface_name, key): + return del_external_id(manager, system_id, key, + lambda tables: _get_iface(tables, iface_name)) + + +def del_other_config(manager, system_id, key, fn): + def _del_iface_other_config(tables, *_): + row = fn(tables) + + if not row: + return None + + other_config = row.other_config + if key in other_config: + other_config.pop(key) + row.other_config = other_config + + req = ovsdb_event.EventModifyRequest(system_id, _del_iface_other_config) + return manager.send_request(req) + + +def del_iface_other_config(manager, system_id, iface_name, key): + return del_other_config(manager, system_id, key, + lambda tables: _get_iface(tables, iface_name)) + + +def del_port(manager, system_id, bridge_name, fn): + def _delete_port(tables, *_): + bridge = _get_bridge(tables, bridge_name) + + if not bridge: + return + + port = fn(tables) + + if not port: + return + + ports = bridge.ports + ports.remove(port) + bridge.ports = ports + + req = ovsdb_event.EventModifyRequest(system_id, _delete_port) + + return manager.send_request(req) + + +def del_port_by_uuid(manager, system_id, bridge_name, port_uuid): + return del_port(manager, system_id, bridge_name, + lambda tables: _get_port(tables, port_uuid, + attr_name='uuid')) + + +def del_port_by_name(manager, system_id, bridge_name, port_name): + return del_port(manager, system_id, bridge_name, + lambda tables, _: _get_port(tables, port_name)) + + +def set_controller(manager, system_id, bridge_name, + target, controller_info=None): + def _set_controller(tables, insert): + bridge = _get_bridge(tables, bridge_name) + + controller = _get_controller(tables, target) + _uuid = None + if not controller: + _uuid = controller_info.get('uuid', uuid.uuid4()) + controller = insert(tables['Controller'], _uuid) + controller.target = target + controller.connection_mode = ['out-of-band'] + + elif 'out-of-band' not in controller.connection_mode: + controller.connection_mode = ['out-of-band'] + + if controller_info: + for key, val in six.iteritems(controller_info): + setattr(controller, key, val) + + bridge.controller = [controller] + + return _uuid + + req = ovsdb_event.EventModifyRequest(system_id, _set_controller) + return manager.send_request(req) + + +def create_port(manager, system_id, bridge_name, port_info, iface_info=None, + port_insert_uuid=None, iface_insert_uuid=None): + if iface_info is None: + iface_info = {} + + if not port_insert_uuid: + port_insert_uuid = uuid.uuid4() + + if not iface_insert_uuid: + iface_insert_uuid = uuid.uuid4() + + def _create_port(tables, insert): + bridge = _get_bridge(tables, bridge_name) + + if not bridge: + return + + default_port_name = 'port' + str(port_insert_uuid) + + if 'name' not in iface_info: + iface_info['name'] = port_info.get('name', default_port_name) + + if 'type' not in iface_info: + iface_info['type'] = 'internal' + + if 'name' not in port_info: + port_info['name'] = default_port_name + + iface = insert(tables['Interface'], iface_insert_uuid) + for key, val in six.iteritems(iface_info): + setattr(iface, key, val) + + port = insert(tables['Port'], port_insert_uuid) + for key, val in six.iteritems(port_info): + setattr(port, key, val) + + port.interfaces = [iface] + + bridge.ports = bridge.ports + [port] + + return port_insert_uuid, iface_insert_uuid + + req = ovsdb_event.EventModifyRequest(system_id, _create_port) + + return manager.send_request(req) diff --git a/ryu/services/protocols/ovsdb/client.py b/ryu/services/protocols/ovsdb/client.py index 387d91b6..0f62f86e 100644 --- a/ryu/services/protocols/ovsdb/client.py +++ b/ryu/services/protocols/ovsdb/client.py @@ -14,25 +14,12 @@ # limitations under the License. import collections -import logging +import errno +import six import uuid -# NOTE(jkoelker) Patch Vlog so that is uses standard logging -from ovs import vlog - - -class Vlog(vlog.Vlog): - def __init__(self, name): - self.log = logging.getLogger('ovs.%s' % name) - - def __log(self, level, message, **kwargs): - level = vlog.LEVELS.get(level, logging.DEBUG) - self.log.log(level, message, **kwargs) - -vlog.Vlog = Vlog - - from ovs import jsonrpc +from ovs import poller from ovs import reconnect from ovs import stream from ovs import timeval @@ -63,8 +50,46 @@ def dictify(row): if row is None: return - return dict([(k, v.to_python(_uuid_to_row)) - for k, v in row._data.items()]) + result = {} + + for key, value in row._data.items(): + result[key] = value.to_python(_uuid_to_row) + hub.sleep(0) + + return result + + +def transact_block(request, connection): + """Emulate jsonrpc.Connection.transact_block without blocking eventlet. + """ + error = connection.send(request) + reply = None + + if error: + return error, reply + + ovs_poller = poller.Poller() + while not error: + ovs_poller.immediate_wake() + error, reply = connection.recv() + + if error != errno.EAGAIN: + break + + if (reply and + reply.id == request.id and + reply.type in (jsonrpc.Message.T_REPLY, + jsonrpc.Message.T_ERROR)): + break + + connection.run() + connection.wait(ovs_poller) + connection.recv_wait(ovs_poller) + ovs_poller.block() + + hub.sleep(0) + + return error, reply def discover_schemas(connection): @@ -72,7 +97,7 @@ def discover_schemas(connection): # is supported. # TODO(jkoelker) support arbitrary schemas req = jsonrpc.Message.create_request('list_dbs', []) - error, reply = connection.transact_block(req) + error, reply = transact_block(req, connection) if error or reply.error: return @@ -83,7 +108,7 @@ def discover_schemas(connection): continue req = jsonrpc.Message.create_request('get_schema', [db]) - error, reply = connection.transact_block(req) + error, reply = transact_block(req, connection) if error or reply.error: # TODO(jkoelker) Error handling @@ -108,6 +133,68 @@ def discover_system_id(idl): return system_id +def _filter_schemas(schemas, schema_tables, exclude_table_columns): + """Wrapper method for _filter_schema to filter multiple schemas.""" + return [_filter_schema(s, schema_tables, exclude_table_columns) + for s in schemas] + + +def _filter_schema(schema, schema_tables, exclude_table_columns): + """Filters a schema to only include the specified tables in the + schema_tables parameter. This will also filter out any colums for + included tables that reference tables that are not included + in the schema_tables parameter + + :param schema: Schema dict to be filtered + :param schema_tables: List of table names to filter on. + EX: ['Bridge', 'Controller', 'Interface'] + NOTE: This list is case sensitive. + :return: Schema dict: + filtered if the schema_table parameter contains table names, + else the original schema dict + """ + + tables = {} + for tbl_name, tbl_data in schema['tables'].iteritems(): + if not schema_tables or tbl_name in schema_tables: + columns = {} + + exclude_columns = exclude_table_columns.get(tbl_name, []) + for col_name, col_data in tbl_data['columns'].iteritems(): + if col_name in exclude_columns: + continue + + # NOTE(Alan Quillin) Needs to check and remove + # and columns that have references to tables that + # are not to be configured + type_ = col_data.get('type') + if type_: + if type_ and isinstance(type_, dict): + key = type_.get('key') + if key and isinstance(key, dict): + ref_tbl = key.get('refTable') + if ref_tbl and isinstance(ref_tbl, + six.string_types): + if ref_tbl not in schema_tables: + continue + value = type_.get('value') + if value and isinstance(value, dict): + ref_tbl = value.get('refTable') + if ref_tbl and isinstance(ref_tbl, + six.string_types): + if ref_tbl not in schema_tables: + continue + + columns[col_name] = col_data + + tbl_data['columns'] = columns + tables[tbl_name] = tbl_data + + schema['tables'] = tables + + return schema + + # NOTE(jkoelker) Wrap ovs's Idl to accept an existing session, and # trigger callbacks on changes class Idl(idl.Idl): @@ -122,6 +209,7 @@ class Idl(idl.Idl): self._events = [] self.tables = schema.tables + self.readonly = schema.readonly self._db = schema self._session = session self._monitor_request_id = None @@ -192,7 +280,9 @@ class RemoteOvsdb(app_manager.RyuApp): event.EventPortUpdated] @classmethod - def factory(cls, sock, address, *args, **kwargs): + def factory(cls, sock, address, probe_interval=None, min_backoff=None, + max_backoff=None, schema_tables=None, + schema_exclude_columns={}, *args, **kwargs): ovs_stream = stream.Stream(sock, None, None) connection = jsonrpc.Connection(ovs_stream) schemas = discover_schemas(connection) @@ -200,11 +290,28 @@ class RemoteOvsdb(app_manager.RyuApp): if not schemas: return + if schema_tables or schema_exclude_columns: + schemas = _filter_schemas(schemas, schema_tables, + schema_exclude_columns) + fsm = reconnect.Reconnect(now()) fsm.set_name('%s:%s' % address) fsm.enable(now()) fsm.set_passive(True, now()) fsm.set_max_tries(-1) + + if probe_interval is not None: + fsm.set_probe_interval(probe_interval) + + if min_backoff is None: + min_backoff = fsm.get_min_backoff() + + if max_backoff is None: + max_backoff = fsm.get_max_backoff() + + if min_backoff and max_backoff: + fsm.set_backoff(min_backoff, max_backoff) + fsm.connected(now()) session = jsonrpc.Session(fsm, connection) @@ -261,9 +368,9 @@ class RemoteOvsdb(app_manager.RyuApp): hub.sleep(0.1) continue - for event in events: - ev = event[0] - args = event[1] + for e in events: + ev = e[0] + args = e[1] self._submit_event(ev(self.system_id, *args)) hub.sleep(0) @@ -334,8 +441,15 @@ class RemoteOvsdb(app_manager.RyuApp): def modify_request_handler(self, ev): self._txn_q.append(ev) - def read_request_handler(self, ev): + def read_request_handler(self, ev, bulk=False): result = ev.func(self._idl.tables) + + # NOTE(jkoelker) If this was a bulk request, the parent OVSDB app is + # responsible for the reply + + if bulk: + return (self.system_id, result) + rep = event.EventReadReply(self.system_id, result) self.reply_to_request(ev, rep) diff --git a/ryu/services/protocols/ovsdb/event.py b/ryu/services/protocols/ovsdb/event.py index 2353a4ff..486e5c74 100644 --- a/ryu/services/protocols/ovsdb/event.py +++ b/ryu/services/protocols/ovsdb/event.py @@ -98,6 +98,9 @@ class EventModifyRequest(ryu_event.EventRequestBase): self.system_id = system_id self.func = func + def __str__(self): + return '%s' % (self.__class__.__name__, self.system_id) + class EventModifyReply(ryu_event.EventReplyBase): def __init__(self, system_id, status, insert_uuids, err_msg): @@ -106,6 +109,14 @@ class EventModifyReply(ryu_event.EventReplyBase): self.insert_uuids = insert_uuids self.err_msg = err_msg + def __str__(self): + return ('%s' + % (self.__class__.__name__, + self.system_id, + self.status, + self.insert_uuids, + self.err_msg)) + class EventNewOVSDBConnection(ryu_event.EventBase): def __init__(self, system_id): diff --git a/ryu/services/protocols/ovsdb/manager.py b/ryu/services/protocols/ovsdb/manager.py index 327f2167..86a2d1ff 100644 --- a/ryu/services/protocols/ovsdb/manager.py +++ b/ryu/services/protocols/ovsdb/manager.py @@ -26,10 +26,24 @@ from ryu.controller import handler opts = (cfg.StrOpt('address', default='0.0.0.0', help='OVSDB address'), cfg.IntOpt('port', default=6640, help='OVSDB port'), + cfg.IntOpt('probe-interval', help='OVSDB reconnect probe interval'), + cfg.IntOpt('min-backoff', + help=('OVSDB reconnect minimum milliseconds between ' + 'connection attemps')), + cfg.IntOpt('max-backoff', + help=('OVSDB reconnect maximum milliseconds between ' + 'connection attemps')), cfg.StrOpt('mngr-privkey', default=None, help='manager private key'), cfg.StrOpt('mngr-cert', default=None, help='manager certificate'), cfg.ListOpt('whitelist', default=[], - help='Whitelist of address to allow to connect')) + help='Whitelist of address to allow to connect'), + cfg.ListOpt('schema-tables', default=[], + help='Tables in the OVSDB schema to configure'), + cfg.ListOpt('schema-exclude-columns', default=[], + help='Table columns in the OVSDB schema to filter out. ' + 'Values should be in the format: ..' + 'Ex: Bridge.netflow,Interface.statistics') + ) cfg.CONF.register_opts(opts, 'ovsdb') @@ -43,6 +57,9 @@ class OVSDB(app_manager.RyuApp): super(OVSDB, self).__init__(*args, **kwargs) self._address = self.CONF.ovsdb.address self._port = self.CONF.ovsdb.port + self._probe_interval = self.CONF.ovsdb.probe_interval + self._min_backoff = self.CONF.ovsdb.min_backoff + self._max_backoff = self.CONF.ovsdb.max_backoff self._clients = {} def _accept(self, server): @@ -78,6 +95,26 @@ class OVSDB(app_manager.RyuApp): t = hub.spawn(self._start_remote, sock, client_address) self.threads.append(t) + def _bulk_read_handler(self, ev): + results = [] + + def done(gt, *args, **kwargs): + if gt in self.threads: + self.threads.remove(gt) + + results.append(gt.wait()) + + threads = [] + for c in self._clients.values(): + gt = hub.spawn(c.read_request_handler, ev, bulk=True) + threads.append(gt) + self.threads.append(gt) + gt.link(done) + + hub.joinall(threads) + rep = event.EventReadReply(None, results) + self.reply_to_request(ev, rep) + def _proxy_event(self, ev): system_id = ev.system_id client_name = client.RemoteOvsdb.instance_name(system_id) @@ -89,7 +126,22 @@ class OVSDB(app_manager.RyuApp): return self.send_event(client_name, ev) def _start_remote(self, sock, client_address): - app = client.RemoteOvsdb.factory(sock, client_address) + schema_tables = cfg.CONF.ovsdb.schema_tables + schema_ex_col = {} + if cfg.CONF.ovsdb.schema_exclude_columns: + for c in cfg.CONF.ovsdb.schema_exclude_columns: + tbl, col = c.split('.') + if tbl in schema_ex_col: + schema_ex_col[tbl].append(col) + else: + schema_ex_col[tbl] = [col] + + app = client.RemoteOvsdb.factory(sock, client_address, + probe_interval=self._probe_interval, + min_backoff=self._min_backoff, + max_backoff=self._max_backoff, + schema_tables=schema_tables, + schema_exclude_columns=schema_ex_col) if app: self._clients[app.name] = app @@ -137,8 +189,8 @@ class OVSDB(app_manager.RyuApp): self.main_thread = None # NOTE(jkoelker) Stop all the clients - for client in self._clients.values(): - client.stop() + for c in self._clients.values(): + c.stop() # NOTE(jkoelker) super will only take care of the event and joining now super(OVSDB, self).stop() @@ -161,6 +213,16 @@ class OVSDB(app_manager.RyuApp): @handler.set_ev_cls(event.EventReadRequest) def read_request_handler(self, ev): system_id = ev.system_id + + if system_id is None: + def done(gt, *args, **kwargs): + if gt in self.threads: + self.threads.remove(gt) + + thread = hub.spawn(self._bulk_read_handler, ev) + self.threads.append(thread) + return thread.link(done) + client_name = client.RemoteOvsdb.instance_name(system_id) remote = self._clients.get(client_name) diff --git a/ryu/services/protocols/vrrp/router.py b/ryu/services/protocols/vrrp/router.py index 5f95291b..8278831f 100644 --- a/ryu/services/protocols/vrrp/router.py +++ b/ryu/services/protocols/vrrp/router.py @@ -283,26 +283,27 @@ class VRRPRouter(app_manager.RyuApp): class VRRPV2StateInitialize(VRRPState): # In theory this shouldn't be called. def master_down(self, ev): - self.vrrp_router.logger.warn('%s master_down', self.__class__.__name__) + self.vrrp_router.logger.warning('%s master_down', + self.__class__.__name__) def adver(self, ev): - self.vrrp_router.logger.warn('%s adver', self.__class__.__name__) + self.vrrp_router.logger.warning('%s adver', self.__class__.__name__) def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) def vrrp_received(self, ev): - self.vrrp_router.logger.warn('%s vrrp_received', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s vrrp_received', + self.__class__.__name__) def vrrp_shutdown_request(self, ev): - self.vrrp_router.logger.warn('%s vrrp_shutdown_request', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s vrrp_shutdown_request', + self.__class__.__name__) def vrrp_config_change_request(self, ev): - self.vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) class VRRPV2StateMaster(VRRPState): @@ -325,8 +326,8 @@ class VRRPV2StateMaster(VRRPState): self._adver() def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) def vrrp_received(self, ev): vrrp_router = self.vrrp_router @@ -360,8 +361,8 @@ class VRRPV2StateMaster(VRRPState): def vrrp_config_change_request(self, ev): vrrp_router = self.vrrp_router - vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) if ev.priority is not None or ev.advertisement_interval is not None: vrrp_router.adver_timer.cancel() self._adver() @@ -404,8 +405,8 @@ class VRRPV2StateBackup(VRRPState): ev.__class__.__name__, vrrp_router.state) def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) self._master_down() def vrrp_received(self, ev): @@ -440,8 +441,8 @@ class VRRPV2StateBackup(VRRPState): def vrrp_config_change_request(self, ev): vrrp_router = self.vrrp_router - vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) if ev.priority is not None and vrrp_router.config.address_owner: vrrp_router.master_down_timer.cancel() self._master_down() @@ -494,8 +495,8 @@ class VRRPV3StateInitialize(VRRPState): self.vrrp_router.logger.debug('%s adver', self.__class__.__name__) def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) def vrrp_received(self, ev): self.vrrp_router.logger.debug('%s vrrp_received', @@ -506,8 +507,8 @@ class VRRPV3StateInitialize(VRRPState): self.__class__.__name__) def vrrp_config_change_request(self, ev): - self.vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) class VRRPV3StateMaster(VRRPState): @@ -530,8 +531,8 @@ class VRRPV3StateMaster(VRRPState): self._adver() def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) def vrrp_received(self, ev): vrrp_router = self.vrrp_router @@ -566,8 +567,8 @@ class VRRPV3StateMaster(VRRPState): def vrrp_config_change_request(self, ev): vrrp_router = self.vrrp_router - vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) if ev.priority is not None or ev.advertisement_interval is not None: vrrp_router.adver_timer.cancel() self._adver() @@ -621,8 +622,8 @@ class VRRPV3StateBackup(VRRPState): ev.__class__.__name__, vrrp_router.state) def preempt_delay(self, ev): - self.vrrp_router.logger.warn('%s preempt_delay', - self.__class__.__name__) + self.vrrp_router.logger.warning('%s preempt_delay', + self.__class__.__name__) self._master_down() def vrrp_received(self, ev): @@ -657,8 +658,8 @@ class VRRPV3StateBackup(VRRPState): def vrrp_config_change_request(self, ev): vrrp_router = self.vrrp_router - vrrp_router.logger.warn('%s vrrp_config_change_request', - self.__class__.__name__) + vrrp_router.logger.warning('%s vrrp_config_change_request', + self.__class__.__name__) if ev.priority is not None and vrrp_router.config.address_owner: vrrp_router.master_down_timer.cancel() self._master_down() diff --git a/ryu/tests/packet_data/bgp4/bgp4-keepalive b/ryu/tests/packet_data/bgp4/bgp4-keepalive deleted file mode 100644 index 78ebde2e..00000000 Binary files a/ryu/tests/packet_data/bgp4/bgp4-keepalive and /dev/null differ diff --git a/ryu/tests/packet_data/bgp4/bgp4-keepalive.pcap b/ryu/tests/packet_data/bgp4/bgp4-keepalive.pcap new file mode 100644 index 00000000..355e9dab Binary files /dev/null and b/ryu/tests/packet_data/bgp4/bgp4-keepalive.pcap differ diff --git a/ryu/tests/packet_data/bgp4/bgp4-open b/ryu/tests/packet_data/bgp4/bgp4-open deleted file mode 100644 index 701f2fbd..00000000 Binary files a/ryu/tests/packet_data/bgp4/bgp4-open and /dev/null differ diff --git a/ryu/tests/packet_data/bgp4/bgp4-open.pcap b/ryu/tests/packet_data/bgp4/bgp4-open.pcap new file mode 100644 index 00000000..1ef507bb Binary files /dev/null and b/ryu/tests/packet_data/bgp4/bgp4-open.pcap differ diff --git a/ryu/tests/packet_data/bgp4/bgp4-update b/ryu/tests/packet_data/bgp4/bgp4-update deleted file mode 100644 index 8fbc238d..00000000 Binary files a/ryu/tests/packet_data/bgp4/bgp4-update and /dev/null differ diff --git a/ryu/tests/packet_data/bgp4/bgp4-update.pcap b/ryu/tests/packet_data/bgp4/bgp4-update.pcap new file mode 100644 index 00000000..37354938 Binary files /dev/null and b/ryu/tests/packet_data/bgp4/bgp4-update.pcap differ diff --git a/ryu/tests/packet_data/of13/4-12-ofp_flow_stats_reply.packet b/ryu/tests/packet_data/of13/4-12-ofp_flow_stats_reply.packet index 90ff2490..c7c1ac88 100644 Binary files a/ryu/tests/packet_data/of13/4-12-ofp_flow_stats_reply.packet and b/ryu/tests/packet_data/of13/4-12-ofp_flow_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of13/4-2-ofp_flow_mod.packet b/ryu/tests/packet_data/of13/4-2-ofp_flow_mod.packet index 5e2a9d69..0c2029bd 100644 Binary files a/ryu/tests/packet_data/of13/4-2-ofp_flow_mod.packet and b/ryu/tests/packet_data/of13/4-2-ofp_flow_mod.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_conjunction.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_conjunction.packet index 40a91016..ba029b2c 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_conjunction.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_conjunction.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_controller.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_controller.packet new file mode 100644 index 00000000..8fba9040 Binary files /dev/null and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_controller.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct.packet index 3d802d98..3aff2bdc 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_exec.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_exec.packet index 88bdf52b..3f0b3431 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_exec.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_exec.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat.packet index 2d5ec7a8..e210a38c 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat_v6.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat_v6.packet index a3b94113..94b2aaa9 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat_v6.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_ct_nat_v6.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_fintimeout.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_fintimeout.packet new file mode 100644 index 00000000..78c3eab4 Binary files /dev/null and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_fintimeout.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_learn.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_learn.packet index 387e59ec..39c71e20 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_learn.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_learn.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_note.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_note.packet new file mode 100644 index 00000000..9a1491c9 Binary files /dev/null and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_note.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_resubmit.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_resubmit.packet index 9e8560cf..690b978b 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_resubmit.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-action_resubmit.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_conj.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_conj.packet index 74a1c0ef..1f9c57cd 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_conj.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_conj.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_load_nx_register.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_load_nx_register.packet new file mode 100644 index 00000000..03ce7374 Binary files /dev/null and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_load_nx_register.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_move_nx_register.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_move_nx_register.packet index 536db74a..eb9cf8a6 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_move_nx_register.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_move_nx_register.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark.packet index a9e4cb8c..46ea0dc9 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark.packet differ diff --git a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet index 99393128..26db6b92 100644 Binary files a/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet and b/ryu/tests/packet_data/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-barrier_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-barrier_reply.packet new file mode 100644 index 00000000..fc15112c Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-barrier_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-desc_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-desc_reply.packet new file mode 100644 index 00000000..261333c4 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-desc_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-desc_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-desc_request.packet new file mode 100644 index 00000000..11b39877 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-desc_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-error_msg_experimenter.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-error_msg_experimenter.packet new file mode 100644 index 00000000..3fba70c2 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-error_msg_experimenter.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter.packet new file mode 100644 index 00000000..c96393aa Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_reply.packet new file mode 100644 index 00000000..9575ae9b Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_request.packet new file mode 100644 index 00000000..739437cb Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-experimenter_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-features_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-features_request.packet new file mode 100644 index 00000000..8d7159e5 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-features_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_reply.packet new file mode 100644 index 00000000..94129c1c Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_request.packet new file mode 100644 index 00000000..203af31f Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_monitor_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-flow_stats_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_stats_request.packet new file mode 100644 index 00000000..8481a1bb Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-flow_stats_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_reply.packet new file mode 100644 index 00000000..aa14b1e2 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_request.packet new file mode 100644 index 00000000..b8672b82 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-get_async_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-get_config_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-get_config_request.packet new file mode 100644 index 00000000..15fc95f0 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-get_config_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-group_features_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-group_features_reply.packet new file mode 100644 index 00000000..14ddc120 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-group_features_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-group_stats_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-group_stats_reply.packet new file mode 100644 index 00000000..4853628d Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-group_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_reply.packet new file mode 100644 index 00000000..026199ae Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_request.packet new file mode 100644 index 00000000..3a008900 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_desc_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_reply.packet new file mode 100644 index 00000000..b7e5a935 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_request.packet new file mode 100644 index 00000000..d85918df Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_features_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-meter_stats_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_stats_reply.packet new file mode 100644 index 00000000..2db08d65 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-meter_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-port_desc_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-port_desc_reply.packet new file mode 100644 index 00000000..c687e5cd Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-port_desc_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-port_mod.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-port_mod.packet new file mode 100644 index 00000000..1aab2c48 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-port_mod.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-port_stats_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-port_stats_reply.packet new file mode 100644 index 00000000..9d190d01 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-port_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-port_status.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-port_status.packet new file mode 100644 index 00000000..1d2d5305 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-port_status.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-queue_desc_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-queue_desc_reply.packet new file mode 100644 index 00000000..d8f8e318 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-queue_desc_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-queue_stats_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-queue_stats_reply.packet new file mode 100644 index 00000000..11744f90 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-queue_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-set_async.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-set_async.packet new file mode 100644 index 00000000..9fbfd415 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-set_async.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-table_desc_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-table_desc_reply.packet new file mode 100644 index 00000000..4177a743 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-table_desc_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_reply.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_reply.packet new file mode 100644 index 00000000..6693467f Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_reply.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_request.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_request.packet new file mode 100644 index 00000000..aae25d76 Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-table_stats_request.packet differ diff --git a/ryu/tests/packet_data/of15/libofproto-OFP15-table_status.packet b/ryu/tests/packet_data/of15/libofproto-OFP15-table_status.packet new file mode 100644 index 00000000..3410d23c Binary files /dev/null and b/ryu/tests/packet_data/of15/libofproto-OFP15-table_status.packet differ diff --git a/ryu/tests/packet_data/pcap/big_endian.pcap b/ryu/tests/packet_data/pcap/big_endian.pcap new file mode 100644 index 00000000..1387f5dd Binary files /dev/null and b/ryu/tests/packet_data/pcap/big_endian.pcap differ diff --git a/ryu/tests/packet_data/pcap/little_endian.pcap b/ryu/tests/packet_data/pcap/little_endian.pcap new file mode 100644 index 00000000..11072bdd Binary files /dev/null and b/ryu/tests/packet_data/pcap/little_endian.pcap differ diff --git a/ryu/tests/packet_data_generator/src/x4.erl b/ryu/tests/packet_data_generator/src/x4.erl index 6098b456..a749fbc8 100644 --- a/ryu/tests/packet_data_generator/src/x4.erl +++ b/ryu/tests/packet_data_generator/src/x4.erl @@ -268,6 +268,9 @@ x() -> #ofp_action_set_mpls_ttl{mpls_ttl = 10}, #ofp_action_dec_nw_ttl{}, #ofp_action_set_nw_ttl{nw_ttl = 10}, + #ofp_action_experimenter{ + experimenter = 101, + data = <<0,1,2,3,4,5,6,7>>}, #ofp_action_set_queue{queue_id = 3}, #ofp_action_group{group_id = 99}, #ofp_action_output{port = 6,max_len = no_buffer}]}, @@ -449,7 +452,12 @@ x() -> #ofp_action_set_queue{queue_id = 3}, #ofp_action_group{group_id = 99}, #ofp_action_output{port = 6, - max_len = no_buffer}]}, + max_len = no_buffer}, + #ofp_action_experimenter{experimenter = 98765432, + data = <<"exp_data">>}, + #ofp_action_experimenter{experimenter = 8992, + data = <<"exp_data">>} + ]}, #ofp_instruction_apply_actions{ actions = [#ofp_action_set_field{ diff --git a/ryu/tests/packet_data_generator3/gen.py b/ryu/tests/packet_data_generator3/gen.py index 6a7dd71e..cb1dedf0 100644 --- a/ryu/tests/packet_data_generator3/gen.py +++ b/ryu/tests/packet_data_generator3/gen.py @@ -79,6 +79,14 @@ MESSAGES = [ 'cookie=0x123456789abcdef0/0xffffffffffffffff'] + STD_MATCH + ['actions=conjunction(0xabcdef,1/2)'])}, + {'name': 'match_load_nx_register', + 'versions': [4], + 'cmd': 'mod-flows', + 'args': ['table=3', + 'cookie=0x123456789abcdef0/0xffffffffffffffff', + 'reg0=0x1234', + 'reg5=0xabcd/0xffff', + 'actions=load:0xdeadbee->NXM_NX_REG0[4..31]']}, {'name': 'match_move_nx_register', 'versions': [4], 'cmd': 'mod-flows', @@ -122,6 +130,21 @@ MESSAGES = [ 'importance=39032'] + ['dl_type=0x86dd'] + ['actions=ct(commit,nat(dst=2001:1::1-2001:1::ffff)'])}, + {'name': 'action_note', + 'versions': [4], + 'cmd': 'add-flow', + 'args': (['priority=100'] + + ['actions=note:04.05.06.07.00.00'])}, + {'name': 'action_controller', + 'versions': [4], + 'cmd': 'add-flow', + 'args': (['priority=100'] + + ['actions=controller(reason=packet_out,max_len=1024,id=1)'])}, + {'name': 'action_fintimeout', + 'versions': [4], + 'cmd': 'add-flow', + 'args': (['priority=100,tcp'] + + ['actions=fin_timeout(idle_timeout=30,hard_timeout=60)'])}, ] buf = [] @@ -154,7 +177,8 @@ class MyHandler(socketserver.BaseRequestHandler): hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: - buf.append(data[:msg_len]) + # HACK: Clear xid into zero + buf.append(data[:4] + b'\x00\x00\x00\x00' + data[8:msg_len]) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid diff --git a/ryu/tests/switch/run_mininet.py b/ryu/tests/switch/run_mininet.py index a5acfdea..176c6106 100755 --- a/ryu/tests/switch/run_mininet.py +++ b/ryu/tests/switch/run_mininet.py @@ -4,53 +4,48 @@ import sys from mininet.cli import CLI from mininet.net import Mininet -from mininet.link import Link from mininet.node import RemoteController from mininet.node import OVSSwitch from mininet.node import UserSwitch -from mininet.term import makeTerm from oslo_config import cfg from ryu import version +from ryu.ofproto.ofproto_common import OFP_TCP_PORT + if '__main__' == __name__: opts = [ cfg.StrOpt('switch', default='ovs', - help='test switch (ovs|ovs13|ovs14|cpqd)') + help='test switch [ovs|cpqd]'), + cfg.StrOpt('protocols', default='OpenFlow13', + help='"protocols" option for ovs-vsctl (e.g. OpenFlow13)') ] conf = cfg.ConfigOpts() conf.register_cli_opts(opts) conf(project='ryu', version='run_mininet.py %s' % version) conf(sys.argv[1:]) - switch_type = {'ovs': OVSSwitch, 'ovs13': OVSSwitch, - 'ovs14': OVSSwitch, 'cpqd': UserSwitch} - switch = switch_type.get(conf.switch) + switch_type = {'ovs': OVSSwitch, 'cpqd': UserSwitch} + switch = switch_type.get(conf.switch, None) if switch is None: raise ValueError('Invalid switch type. [%s]', conf.switch) net = Mininet(switch=switch, controller=RemoteController) - c0 = net.addController('c0') + c0 = net.addController('c0', port=OFP_TCP_PORT) s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') - Link(s1, s2) - Link(s1, s2) - Link(s1, s2) + net.addLink(s1, s2) + net.addLink(s1, s2) + net.addLink(s1, s2) - net.build() - c0.start() - s1.start([c0]) - s2.start([c0]) + net.start() - if conf.switch in ['ovs', 'ovs13']: - s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow13') - s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow13') - elif conf.switch == 'ovs14': - s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow14') - s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow14') + if conf.switch == 'ovs': + s1.cmd('ovs-vsctl set Bridge s1 protocols=%s' % conf.protocols) + s2.cmd('ovs-vsctl set Bridge s2 protocols=%s' % conf.protocols) CLI(net) diff --git a/ryu/tests/unit/app/ofctl_rest_json/of10.json b/ryu/tests/unit/app/ofctl_rest_json/of10.json new file mode 100644 index 00000000..266eb80c --- /dev/null +++ b/ryu/tests/unit/app/ofctl_rest_json/of10.json @@ -0,0 +1,101 @@ +[ + { + "method": "GET", + "path": "/stats/switches" + }, + { + "method": "GET", + "path": "/stats/desc/1" + }, + { + "method": "GET", + "path": "/stats/flow/1" + }, + { + "method": "POST", + "path": "/stats/flow/1" + }, + { + "method": "GET", + "path": "/stats/aggregateflow/1" + }, + { + "method": "POST", + "path": "/stats/aggregateflow/1" + }, + { + "method": "GET", + "path": "/stats/port/1" + }, + { + "method": "GET", + "path": "/stats/port/1/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1" + }, + { + "method": "GET", + "path": "/stats/queue/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1/1" + }, + { + "method": "GET", + "path": "/stats/table/1" + }, + { + "method": "POST", + "path": "/stats/flowentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "DELETE", + "path": "/stats/flowentry/clear/1" + }, + { + "method": "POST", + "path": "/stats/portdesc/modify", + "body": { + "dpid": 1, + "port_no": 1 + } + } +] diff --git a/ryu/tests/unit/app/ofctl_rest_json/of12.json b/ryu/tests/unit/app/ofctl_rest_json/of12.json new file mode 100644 index 00000000..89a81919 --- /dev/null +++ b/ryu/tests/unit/app/ofctl_rest_json/of12.json @@ -0,0 +1,150 @@ +[ + { + "method": "GET", + "path": "/stats/switches" + }, + { + "method": "GET", + "path": "/stats/desc/1" + }, + { + "method": "GET", + "path": "/stats/flow/1" + }, + { + "method": "POST", + "path": "/stats/flow/1" + }, + { + "method": "GET", + "path": "/stats/aggregateflow/1" + }, + { + "method": "POST", + "path": "/stats/aggregateflow/1" + }, + { + "method": "GET", + "path": "/stats/port/1" + }, + { + "method": "GET", + "path": "/stats/port/1/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1" + }, + { + "method": "GET", + "path": "/stats/queue/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1/1" + }, + { + "method": "GET", + "path": "/stats/queueconfig/1" + }, + { + "method": "GET", + "path": "/stats/queueconfig/1/1" + }, + { + "method": "GET", + "path": "/stats/group/1" + }, + { + "method": "GET", + "path": "/stats/group/1/1" + }, + { + "method": "GET", + "path": "/stats/groupdesc/1" + }, + { + "method": "GET", + "path": "/stats/groupfeatures/1" + }, + { + "method": "GET", + "path": "/stats/table/1" + }, + { + "method": "POST", + "path": "/stats/flowentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "DELETE", + "path": "/stats/flowentry/clear/1" + }, + { + "method": "POST", + "path": "/stats/groupentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/portdesc/modify", + "body": { + "dpid": 1, + "port_no": 1 + } + }, + { + "method": "POST", + "path": "/stats/experimenter/1" + } +] diff --git a/ryu/tests/unit/app/ofctl_rest_json/of13.json b/ryu/tests/unit/app/ofctl_rest_json/of13.json new file mode 100644 index 00000000..d515f628 --- /dev/null +++ b/ryu/tests/unit/app/ofctl_rest_json/of13.json @@ -0,0 +1,191 @@ +[ + { + "method": "GET", + "path": "/stats/switches" + }, + { + "method": "GET", + "path": "/stats/desc/1" + }, + { + "method": "GET", + "path": "/stats/flow/1" + }, + { + "method": "POST", + "path": "/stats/flow/1" + }, + { + "method": "GET", + "path": "/stats/aggregateflow/1" + }, + { + "method": "POST", + "path": "/stats/aggregateflow/1" + }, + { + "method": "GET", + "path": "/stats/port/1" + }, + { + "method": "GET", + "path": "/stats/port/1/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1" + }, + { + "method": "GET", + "path": "/stats/queue/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1/1" + }, + { + "method": "GET", + "path": "/stats/queueconfig/1" + }, + { + "method": "GET", + "path": "/stats/queueconfig/1/1" + }, + { + "method": "GET", + "path": "/stats/group/1" + }, + { + "method": "GET", + "path": "/stats/group/1/1" + }, + { + "method": "GET", + "path": "/stats/groupdesc/1" + }, + { + "method": "GET", + "path": "/stats/groupfeatures/1" + }, + { + "method": "GET", + "path": "/stats/meter/1" + }, + { + "method": "GET", + "path": "/stats/meter/1/1" + }, + { + "method": "GET", + "path": "/stats/meterconfig/1" + }, + { + "method": "GET", + "path": "/stats/meterconfig/1/1" + }, + { + "method": "GET", + "path": "/stats/meterfeatures/1" + }, + { + "method": "GET", + "path": "/stats/table/1" + }, + { + "method": "POST", + "path": "/stats/flowentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "DELETE", + "path": "/stats/flowentry/clear/1" + }, + { + "method": "POST", + "path": "/stats/groupentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/portdesc/modify", + "body": { + "dpid": 1, + "port_no": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/experimenter/1" + } +] diff --git a/ryu/tests/unit/app/ofctl_rest_json/of14.json b/ryu/tests/unit/app/ofctl_rest_json/of14.json new file mode 100644 index 00000000..0cc4eb6c --- /dev/null +++ b/ryu/tests/unit/app/ofctl_rest_json/of14.json @@ -0,0 +1,195 @@ +[ + { + "method": "GET", + "path": "/stats/switches" + }, + { + "method": "GET", + "path": "/stats/desc/1" + }, + { + "method": "GET", + "path": "/stats/flow/1" + }, + { + "method": "POST", + "path": "/stats/flow/1" + }, + { + "method": "GET", + "path": "/stats/aggregateflow/1" + }, + { + "method": "POST", + "path": "/stats/aggregateflow/1" + }, + { + "method": "GET", + "path": "/stats/port/1" + }, + { + "method": "GET", + "path": "/stats/port/1/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1" + }, + { + "method": "GET", + "path": "/stats/queue/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1/1/1" + }, + { + "method": "GET", + "path": "/stats/group/1" + }, + { + "method": "GET", + "path": "/stats/group/1/1" + }, + { + "method": "GET", + "path": "/stats/groupdesc/1" + }, + { + "method": "GET", + "path": "/stats/groupfeatures/1" + }, + { + "method": "GET", + "path": "/stats/meter/1" + }, + { + "method": "GET", + "path": "/stats/meter/1/1" + }, + { + "method": "GET", + "path": "/stats/meterconfig/1" + }, + { + "method": "GET", + "path": "/stats/meterconfig/1/1" + }, + { + "method": "GET", + "path": "/stats/meterfeatures/1" + }, + { + "method": "GET", + "path": "/stats/table/1" + }, + { + "method": "POST", + "path": "/stats/flowentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "DELETE", + "path": "/stats/flowentry/clear/1" + }, + { + "method": "POST", + "path": "/stats/groupentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/portdesc/modify", + "body": { + "dpid": 1, + "port_no": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/experimenter/1" + } +] diff --git a/ryu/tests/unit/app/ofctl_rest_json/of15.json b/ryu/tests/unit/app/ofctl_rest_json/of15.json new file mode 100644 index 00000000..a7569223 --- /dev/null +++ b/ryu/tests/unit/app/ofctl_rest_json/of15.json @@ -0,0 +1,203 @@ +[ + { + "method": "GET", + "path": "/stats/switches" + }, + { + "method": "GET", + "path": "/stats/desc/1" + }, + { + "method": "GET", + "path": "/stats/flow/1" + }, + { + "method": "POST", + "path": "/stats/flow/1" + }, + { + "method": "GET", + "path": "/stats/aggregateflow/1" + }, + { + "method": "POST", + "path": "/stats/aggregateflow/1" + }, + { + "method": "GET", + "path": "/stats/port/1" + }, + { + "method": "GET", + "path": "/stats/port/1/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1" + }, + { + "method": "GET", + "path": "/stats/portdesc/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1" + }, + { + "method": "GET", + "path": "/stats/queue/1/1/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1/1" + }, + { + "method": "GET", + "path": "/stats/queuedesc/1/1/1" + }, + { + "method": "GET", + "path": "/stats/group/1" + }, + { + "method": "GET", + "path": "/stats/group/1/1" + }, + { + "method": "GET", + "path": "/stats/groupdesc/1" + }, + { + "method": "GET", + "path": "/stats/groupdesc/1/1" + }, + { + "method": "GET", + "path": "/stats/groupfeatures/1" + }, + { + "method": "GET", + "path": "/stats/meter/1" + }, + { + "method": "GET", + "path": "/stats/meter/1/1" + }, + { + "method": "GET", + "path": "/stats/meterdesc/1" + }, + { + "method": "GET", + "path": "/stats/meterdesc/1/1" + }, + { + "method": "GET", + "path": "/stats/meterfeatures/1" + }, + { + "method": "GET", + "path": "/stats/table/1" + }, + { + "method": "POST", + "path": "/stats/flowentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/modify_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/flowentry/delete_strict", + "body": { + "dpid": 1 + } + }, + { + "method": "DELETE", + "path": "/stats/flowentry/clear/1" + }, + { + "method": "POST", + "path": "/stats/groupentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/groupentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/portdesc/modify", + "body": { + "dpid": 1, + "port_no": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/add", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/modify", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/meterentry/delete", + "body": { + "dpid": 1 + } + }, + { + "method": "POST", + "path": "/stats/experimenter/1" + } +] diff --git a/ryu/tests/unit/app/test_ofctl_rest.py b/ryu/tests/unit/app/test_ofctl_rest.py new file mode 100644 index 00000000..095924ad --- /dev/null +++ b/ryu/tests/unit/app/test_ofctl_rest.py @@ -0,0 +1,137 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import functools +import json +import logging +from nose.tools import eq_ +import os +import sys +import unittest +from webob.request import Request +try: + import mock # Python 2 +except ImportError: + from unittest import mock # Python 3 + +from ryu.app import ofctl_rest +from ryu.app.wsgi import WSGIApplication +from ryu.controller.dpset import DPSet +from ryu.ofproto import ofproto_protocol +from ryu.ofproto import ofproto_v1_0 +from ryu.ofproto import ofproto_v1_2 +from ryu.ofproto import ofproto_v1_3 +from ryu.ofproto import ofproto_v1_4 +from ryu.ofproto import ofproto_v1_5 +from ryu.tests import test_lib + + +LOG = logging.getLogger(__name__) + + +class DummyDatapath(ofproto_protocol.ProtocolDesc): + + def __init__(self, version): + super(DummyDatapath, self).__init__(version) + self.id = 1 + _kw = {'port_no': 1, 'hw_addr': 'aa:bb:cc:dd:ee:ff', + 'name': 's1-eth1', 'config': 1, 'state': 1} + # for OpenFlow 1.0 + if version in [ofproto_v1_0.OFP_VERSION]: + _kw.update( + {'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0}) + port_info = self.ofproto_parser.OFPPhyPort(**_kw) + # for OpenFlow 1.2 or 1.3 + elif version in [ofproto_v1_2.OFP_VERSION, ofproto_v1_3.OFP_VERSION]: + _kw.update( + {'curr': 2112, 'advertised': 0, 'supported': 0, 'peer': 0, + 'curr_speed': 10000000, 'max_speed': 0}) + port_info = self.ofproto_parser.OFPPort(**_kw) + # for OpenFlow 1.4+ + else: + _kw.update({'properties': []}) + port_info = self.ofproto_parser.OFPPort(**_kw) + self.ports = {1: port_info} + + +class Test_ofctl_rest(unittest.TestCase): + + def _test(self, name, dp, method, path, body): + print('processing %s ...' % name) + + dpset = DPSet() + dpset._register(dp) + wsgi = WSGIApplication() + contexts = { + 'dpset': dpset, + 'wsgi': wsgi, + } + ofctl_rest.RestStatsApi(**contexts) + + req = Request.blank(path) + req.body = json.dumps(body).encode('utf-8') + req.method = method + + with mock.patch('ryu.lib.ofctl_utils.send_stats_request'),\ + mock.patch('ryu.lib.ofctl_utils.send_msg'): + res = req.get_response(wsgi) + eq_(res.status, '200 OK') + + +def _add_tests(): + _ofp_vers = { + 'of10': ofproto_v1_0.OFP_VERSION, + 'of12': ofproto_v1_2.OFP_VERSION, + 'of13': ofproto_v1_3.OFP_VERSION, + 'of14': ofproto_v1_4.OFP_VERSION, + 'of15': ofproto_v1_5.OFP_VERSION, + } + + this_dir = os.path.dirname(sys.modules[__name__].__file__) + ofctl_rest_json_dir = os.path.join(this_dir, 'ofctl_rest_json/') + + for ofp_ver in _ofp_vers.keys(): + # read a json file + json_path = os.path.join(ofctl_rest_json_dir, ofp_ver + '.json') + if os.path.exists(json_path): + _test_cases = json.load(open(json_path)) + else: + print("Skip to load test cases for %s" % ofp_ver) + continue + + # add test + for test in _test_cases: + method = test['method'] + path = test['path'] + body = test.get('body', {}) + + name = 'test_ofctl_rest_' + method + '_' + ofp_ver + '_' + path + print('adding %s ...' % name) + f = functools.partial( + Test_ofctl_rest._test, + name=name, + dp=DummyDatapath(_ofp_vers[ofp_ver]), + method=test['method'], + path=test['path'], + body=body + ) + test_lib.add_method(Test_ofctl_rest, name, f) + +_add_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/ryu/tests/unit/controller/test_controller.py b/ryu/tests/unit/controller/test_controller.py index 0eeb30df..27a2bce6 100644 --- a/ryu/tests/unit/controller/test_controller.py +++ b/ryu/tests/unit/controller/test_controller.py @@ -72,7 +72,7 @@ class Test_Datapath(unittest.TestCase): self.assertTrue(issubclass(msg.category, UserWarning)) def test_ports_accessibility_v13(self): - self._test_ports_accessibility(ofproto_v1_3_parser, 2) + self._test_ports_accessibility(ofproto_v1_3_parser, 0) def test_ports_accessibility_v12(self): self._test_ports_accessibility(ofproto_v1_2_parser, 0) diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_group_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_group_stats_request.packet.json new file mode 100644 index 00000000..7646a77a --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_group_stats_request.packet.json @@ -0,0 +1,3 @@ +{ + "group_id": 1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_port_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_port_stats_request.packet.json new file mode 100644 index 00000000..7d56f29d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_port_stats_request.packet.json @@ -0,0 +1,3 @@ +{ + "port": 7 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json new file mode 100644 index 00000000..e116fdc1 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json @@ -0,0 +1,3 @@ +{ + "port": null +} diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json new file mode 100644 index 00000000..7d56f29d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json @@ -0,0 +1,3 @@ +{ + "port": 7 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json new file mode 100644 index 00000000..78077510 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json @@ -0,0 +1,4 @@ +{ + "port": 7, + "queue_id":1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json new file mode 100644 index 00000000..ee465693 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json @@ -0,0 +1,3 @@ +{ + "queue_id":1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/4-12-ofp_flow_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/4-12-ofp_flow_stats_reply.packet.json index 763586f9..f9b8e68f 100644 --- a/ryu/tests/unit/lib/ofctl_json/of13/4-12-ofp_flow_stats_reply.packet.json +++ b/ryu/tests/unit/lib/ofctl_json/of13/4-12-ofp_flow_stats_reply.packet.json @@ -74,7 +74,9 @@ "SET_NW_TTL:10", "SET_QUEUE:3", "GROUP:99", - "OUTPUT:6" + "OUTPUT:6", + "EXPERIMENTER: {experimenter:98765432, data:ZXhwX2RhdGE=}", + "NX_UNKNOWN: {subtype: 25976, data: cF9kYXRh}" ] }, "SET_FIELD: {eth_src:01:02:03:04:05:06}", diff --git a/ryu/tests/unit/lib/ofctl_json/of13/4-2-ofp_flow_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/4-2-ofp_flow_mod.packet.json index 636bfb8c..fbd5c214 100644 --- a/ryu/tests/unit/lib/ofctl_json/of13/4-2-ofp_flow_mod.packet.json +++ b/ryu/tests/unit/lib/ofctl_json/of13/4-2-ofp_flow_mod.packet.json @@ -61,6 +61,12 @@ "type": "SET_NW_TTL", "nw_ttl": 10 }, + { + "type": "EXPERIMENTER", + "experimenter": 101, + "data": "AAECAwQFBgc=", + "data_type": "base64" + }, { "type": "SET_QUEUE", "queue_id": 3 diff --git a/ryu/tests/unit/lib/ofctl_json/of13/4-48-ofp_meter_config_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/4-48-ofp_meter_config_reply.packet.json index 799bde48..d5efcd97 100644 --- a/ryu/tests/unit/lib/ofctl_json/of13/4-48-ofp_meter_config_reply.packet.json +++ b/ryu/tests/unit/lib/ofctl_json/of13/4-48-ofp_meter_config_reply.packet.json @@ -9,9 +9,9 @@ } ], "flags": [ - "STATS", "PKTPS", - "BURST" + "BURST", + "STATS" ], "meter_id": 100 } diff --git a/ryu/tests/unit/lib/ofctl_json/of13/4-52-ofp_meter_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/4-52-ofp_meter_features_reply.packet.json index 025e6ccf..24dac7d4 100644 --- a/ryu/tests/unit/lib/ofctl_json/of13/4-52-ofp_meter_features_reply.packet.json +++ b/ryu/tests/unit/lib/ofctl_json/of13/4-52-ofp_meter_features_reply.packet.json @@ -6,10 +6,10 @@ "DSCP_REMARK" ], "capabilities": [ - "STATS", "KBPS", "PKTPS", - "BURST" + "BURST", + "STATS" ], "max_bands": 255, "max_color": 0, diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_group_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_group_stats_request.packet.json new file mode 100644 index 00000000..7646a77a --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_group_stats_request.packet.json @@ -0,0 +1,3 @@ +{ + "group_id": 1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_config_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_config_request.packet.json new file mode 100644 index 00000000..3ad622e0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_config_request.packet.json @@ -0,0 +1,3 @@ +{ + "meter_id": 1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_stats_request.packet.json new file mode 100644 index 00000000..3ad622e0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_meter_stats_request.packet.json @@ -0,0 +1,3 @@ +{ + "meter_id": 1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_port_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_port_stats_request.packet.json new file mode 100644 index 00000000..7d56f29d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_port_stats_request.packet.json @@ -0,0 +1,3 @@ +{ + "port": 7 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json new file mode 100644 index 00000000..e116fdc1 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json @@ -0,0 +1,3 @@ +{ + "port": null +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json new file mode 100644 index 00000000..7d56f29d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json @@ -0,0 +1,3 @@ +{ + "port": 7 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json new file mode 100644 index 00000000..78077510 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json @@ -0,0 +1,4 @@ +{ + "port": 7, + "queue_id":1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json new file mode 100644 index 00000000..ee465693 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json @@ -0,0 +1,3 @@ +{ + "queue_id":1 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-0-ofp_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-0-ofp_desc_reply.packet.json new file mode 100644 index 00000000..8fc54b74 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-0-ofp_desc_reply.packet.json @@ -0,0 +1,9 @@ +{ + "1": { + "dp_desc": "dp", + "hw_desc": "hw", + "mfr_desc": "mfr", + "serial_num": "serial", + "sw_desc": "sw" + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-11-ofp_flow_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-11-ofp_flow_stats_request.packet.json new file mode 100644 index 00000000..a42dfef0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-11-ofp_flow_stats_request.packet.json @@ -0,0 +1,11 @@ +{ + "flow": { + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "match": {}, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 0 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-12-ofp_flow_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-12-ofp_flow_stats_reply.packet.json new file mode 100644 index 00000000..134c3f12 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-12-ofp_flow_stats_reply.packet.json @@ -0,0 +1,197 @@ +{ + "1": [ + { + "byte_count": 0, + "cookie": 0, + "duration_nsec": 115277000, + "duration_sec": 358, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [], + "length": 0, + "match": {}, + "packet_count": 0, + "priority": 65535, + "table_id": 0 + }, + { + "byte_count": 0, + "cookie": 0, + "duration_nsec": 115055000, + "duration_sec": 358, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [ + { + "actions": [ + { + "max_len": 0, + "port": 4294967290, + "type": "OUTPUT" + } + ], + "type": "APPLY_ACTIONS" + } + ], + "length": 0, + "match": { + "eth_type": 2054 + }, + "packet_count": 0, + "priority": 65534, + "table_id": 0 + }, + { + "byte_count": 238, + "cookie": 0, + "duration_nsec": 511582000, + "duration_sec": 316220, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [ + { + "table_id": 1, + "type": "GOTO_TABLE" + } + ], + "length": 0, + "match": { + "eth_src": "f2:0b:a4:7d:f8:ea", + "in_port": 6 + }, + "packet_count": 3, + "priority": 123, + "table_id": 0 + }, + { + "byte_count": 98, + "cookie": 0, + "duration_nsec": 980901000, + "duration_sec": 313499, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [ + { + "actions": [ + { + "field": "vlan_vid", + "mask": null, + "type": "SET_FIELD", + "value": 258 + }, + { + "type": "COPY_TTL_OUT" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "POP_PBB" + }, + { + "ethertype": 4660, + "type": "PUSH_PBB" + }, + { + "ethertype": 39030, + "type": "POP_MPLS" + }, + { + "ethertype": 34887, + "type": "PUSH_MPLS" + }, + { + "type": "POP_VLAN" + }, + { + "ethertype": 33024, + "type": "PUSH_VLAN" + }, + { + "type": "DEC_MPLS_TTL" + }, + { + "mpls_ttl": 10, + "type": "SET_MPLS_TTL" + }, + { + "type": "DEC_NW_TTL" + }, + { + "nw_ttl": 10, + "type": "SET_NW_TTL" + }, + { + "queue_id": 3, + "type": "SET_QUEUE" + }, + { + "group_id": 99, + "type": "GROUP" + }, + { + "max_len": 65535, + "port": 6, + "type": "OUTPUT" + }, + { + "data": "ZXhwX2RhdGE=", + "experimenter": 98765432, + "type": "EXPERIMENTER" + }, + { + "data": "cF9kYXRh", + "experimenter": 8992, + "subtype": 25976, + "type": "EXPERIMENTER" + } + ], + "type": "WRITE_ACTIONS" + }, + { + "actions": [ + { + "field": "eth_src", + "mask": null, + "type": "SET_FIELD", + "value": "01:02:03:04:05:06" + }, + { + "field": "pbb_uca", + "mask": null, + "type": "SET_FIELD", + "value": 1 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "actions": [ + { + "max_len": 65535, + "port": 4294967293, + "type": "OUTPUT" + } + ], + "type": "WRITE_ACTIONS" + } + ], + "length": 0, + "match": {}, + "packet_count": 1, + "priority": 0, + "table_id": 0 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-16-ofp_experimenter.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-16-ofp_experimenter.packet.json new file mode 100644 index 00000000..c655f8f0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-16-ofp_experimenter.packet.json @@ -0,0 +1,8 @@ +{ + "exp": { + "data": "bmF6bw==", + "data_type": "base64", + "exp_type": 123456789, + "experimenter": 98765432 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-2-ofp_flow_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-2-ofp_flow_mod.packet.json new file mode 100644 index 00000000..72054e33 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-2-ofp_flow_mod.packet.json @@ -0,0 +1,103 @@ +{ + "cmd": 0, + "flow": { + "instructions": [ + { + "actions": [ + { + "field": "vlan_vid", + "type": "SET_FIELD", + "value": 258 + }, + { + "type": "COPY_TTL_OUT" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "POP_PBB" + }, + { + "ethertype": 4660, + "type": "PUSH_PBB" + }, + { + "ethertype": 39030, + "type": "POP_MPLS" + }, + { + "ethertype": 34887, + "type": "PUSH_MPLS" + }, + { + "type": "POP_VLAN" + }, + { + "ethertype": 33024, + "type": "PUSH_VLAN" + }, + { + "type": "DEC_MPLS_TTL" + }, + { + "mpls_ttl": 10, + "type": "SET_MPLS_TTL" + }, + { + "type": "DEC_NW_TTL" + }, + { + "nw_ttl": 10, + "type": "SET_NW_TTL" + }, + { + "data": "AAECAwQFBgc=", + "data_type": "base64", + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + "queue_id": 3, + "type": "SET_QUEUE" + }, + { + "group_id": 99, + "type": "GROUP" + }, + { + "max_len": 65535, + "port": 6, + "type": "OUTPUT" + } + ], + "type": "WRITE_ACTIONS" + }, + { + "actions": [ + { + "field": "eth_src", + "type": "SET_FIELD", + "value": "01:02:03:04:05:06" + }, + { + "field": "pbb_uca", + "type": "SET_FIELD", + "value": 1 + } + ], + "type": "APPLY_ACTIONS" + } + ], + "buffer_id": 65535, + "importance": 0, + "match": { + "eth_dst": "f2:0b:a4:7d:f8:ea" + }, + "priority": 123, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-21-ofp_group_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-21-ofp_group_mod.packet.json new file mode 100644 index 00000000..fa48425a --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-21-ofp_group_mod.packet.json @@ -0,0 +1,21 @@ +{ + "cmd": 0, + "group": { + "buckets": [ + { + "actions": [ + { + "max_len": 65535, + "port": 2, + "type": "OUTPUT" + } + ], + "watch_group": 1, + "watch_port": 1, + "weight": 1 + } + ], + "group_id": 1, + "type": "ALL" + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-22-ofp_port_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-22-ofp_port_mod.packet.json new file mode 100644 index 00000000..980456eb --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-22-ofp_port_mod.packet.json @@ -0,0 +1,50 @@ +{ + "port_config": { + "config": 0, + "hw_addr": "00:11:00:00:11:11", + "mask": 0, + "port_no": 1, + "properties": [ + { + "advertise": 4096, + "length": 8, + "type": 0 + }, + { + "configure": 3, + "fl_offset": 2000, + "freq_lmda": 1500, + "grid_span": 3000, + "length": 24, + "tx_pwr": 300, + "type": 1 + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": 65535 + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": 65535 + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": 65535 + } + ] + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-25-ofp_aggregate_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-25-ofp_aggregate_stats_request.packet.json new file mode 100644 index 00000000..845af65b --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-25-ofp_aggregate_stats_request.packet.json @@ -0,0 +1,11 @@ +{ + "flow": { + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "match": {}, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 255 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-26-ofp_aggregate_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-26-ofp_aggregate_stats_reply.packet.json new file mode 100644 index 00000000..a4b23d12 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-26-ofp_aggregate_stats_reply.packet.json @@ -0,0 +1,9 @@ +{ + "1": [ + { + "byte_count": 574, + "flow_count": 6, + "packet_count": 7 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-28-ofp_table_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-28-ofp_table_stats_reply.packet.json new file mode 100644 index 00000000..95b2c495 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-28-ofp_table_stats_reply.packet.json @@ -0,0 +1,16 @@ +{ + "1": [ + { + "active_count": 4, + "lookup_count": 4, + "matched_count": 4, + "table_id": 0 + }, + { + "active_count": 4, + "lookup_count": 4, + "matched_count": 4, + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-30-ofp_port_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-30-ofp_port_stats_reply.packet.json new file mode 100644 index 00000000..f9f1d86f --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-30-ofp_port_stats_reply.packet.json @@ -0,0 +1,85 @@ +{ + "1": [ + { + "duration_nsec": 0, + "duration_sec": 0, + "port_no": 7, + "properties": [ + { + "collisions": 0, + "rx_crc_err": 0, + "rx_frame_err": 0, + "rx_over_err": 0, + "type": "ETHERNET" + }, + { + "bias_current": 300, + "flags": 3, + "rx_freq_lmda": 1500, + "rx_grid_span": 500, + "rx_offset": 700, + "rx_pwr": 2000, + "temperature": 273, + "tx_freq_lmda": 1500, + "tx_grid_span": 500, + "tx_offset": 700, + "tx_pwr": 2000, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "type": "EXPERIMENTER" + } + ], + "rx_bytes": 0, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 0, + "tx_bytes": 336, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 4 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "port_no": 6, + "properties": [ + { + "collisions": 0, + "rx_crc_err": 0, + "rx_frame_err": 0, + "rx_over_err": 0, + "type": "ETHERNET" + } + ], + "rx_bytes": 336, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 4, + "tx_bytes": 336, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 4 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-32-ofp_group_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-32-ofp_group_features_reply.packet.json new file mode 100644 index 00000000..2a29ac2d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-32-ofp_group_features_reply.packet.json @@ -0,0 +1,104 @@ +{ + "1": [ + { + "actions": [ + { + "ALL": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "SELECT": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "INDIRECT": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "FF": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + } + ], + "capabilities": [ + "SELECT_WEIGHT", + "CHAINING" + ], + "max_groups": [ + { + "ALL": 16777216 + }, + { + "SELECT": 16777216 + }, + { + "INDIRECT": 16777216 + }, + { + "FF": 16777216 + } + ], + "types": [ + "ALL", + "SELECT", + "INDIRECT", + "FF" + ] + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-34-ofp_group_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-34-ofp_group_desc_reply.packet.json new file mode 100644 index 00000000..df0ef359 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-34-ofp_group_desc_reply.packet.json @@ -0,0 +1,24 @@ +{ + "1": [ + { + "buckets": [ + { + "actions": [ + { + "max_len": 65535, + "port": 2, + "type": "OUTPUT" + } + ], + "len": 32, + "watch_group": 1, + "watch_port": 1, + "weight": 1 + } + ], + "group_id": 1, + "length": 40, + "type": "ALL" + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-36-ofp_queue_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-36-ofp_queue_stats_reply.packet.json new file mode 100644 index 00000000..a6d246d0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-36-ofp_queue_stats_reply.packet.json @@ -0,0 +1,64 @@ +{ + "1": [ + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 104, + "port_no": 7, + "properties": [ + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "queue_id": 1, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 48, + "port_no": 6, + "properties": [], + "queue_id": 1, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 48, + "port_no": 7, + "properties": [], + "queue_id": 2, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-43-ofp_meter_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-43-ofp_meter_mod.packet.json new file mode 100644 index 00000000..b78ea6be --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-43-ofp_meter_mod.packet.json @@ -0,0 +1,31 @@ +{ + "cmd": 0, + "meter": { + "flags": [ + "PKTPS", + "BURST", + "STATS" + ], + "meter_id": 100, + "bands": [ + { + "burst_size": 10, + "rate": 1000, + "type": "DROP" + }, + { + "burst_size": 10, + "prec_level": 1, + "rate": 1000, + "type": "DSCP_REMARK" + }, + { + "burst_size": 10, + "experimenter": 999, + "len": 16, + "rate": 1000, + "type": "EXPERIMENTER" + } + ] + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-46-ofp_meter_config_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-46-ofp_meter_config_reply.packet.json new file mode 100644 index 00000000..d5efcd97 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-46-ofp_meter_config_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "bands": [ + { + "burst_size": 10, + "rate": 1000, + "type": "DROP" + } + ], + "flags": [ + "PKTPS", + "BURST", + "STATS" + ], + "meter_id": 100 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-48-ofp_meter_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-48-ofp_meter_stats_reply.packet.json new file mode 100644 index 00000000..f21ec519 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-48-ofp_meter_stats_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "band_stats": [ + { + "byte_band_count": 0, + "packet_band_count": 0 + } + ], + "byte_in_count": 0, + "duration_nsec": 480000, + "duration_sec": 0, + "flow_count": 0, + "len": 56, + "meter_id": 100, + "packet_in_count": 0 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-50-ofp_meter_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-50-ofp_meter_features_reply.packet.json new file mode 100644 index 00000000..24dac7d4 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-50-ofp_meter_features_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "band_types": [ + "DROP", + "DSCP_REMARK" + ], + "capabilities": [ + "KBPS", + "PKTPS", + "BURST", + "STATS" + ], + "max_bands": 255, + "max_color": 0, + "max_meter": 16777216 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-52-ofp_port_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-52-ofp_port_desc_reply.packet.json new file mode 100644 index 00000000..c2f748a8 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-52-ofp_port_desc_reply.packet.json @@ -0,0 +1,83 @@ +{ + "1": [ + { + "config": 0, + "hw_addr": "f2:0b:a4:d0:3f:70", + "length": 168, + "name": "Port7", + "port_no": 7, + "properties": [ + { + "advertised": 10240, + "curr": 10248, + "curr_speed": 5000, + "length": 32, + "max_speed": 5000, + "peer": 10248, + "supported": 10248, + "type": "ETHERNET" + }, + { + "length": 40, + "rx_grid_freq_lmda": 1500, + "rx_max_freq_lmda": 2000, + "rx_min_freq_lmda": 1000, + "supported": 1, + "tx_grid_freq_lmda": 1500, + "tx_max_freq_lmda": 2000, + "tx_min_freq_lmda": 1000, + "tx_pwr_max": 2000, + "tx_pwr_min": 1000, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "state": 4 + }, + { + "config": 0, + "hw_addr": "f2:0b:a4:7d:f8:ea", + "length": 72, + "name": "Port6", + "port_no": 6, + "properties": [ + { + "advertised": 10240, + "curr": 10248, + "curr_speed": 5000, + "length": 32, + "max_speed": 5000, + "peer": 10248, + "supported": 10248, + "type": "ETHERNET" + } + ], + "state": 4 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-54-ofp_table_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-54-ofp_table_features_reply.packet.json new file mode 100644 index 00000000..ded5a996 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-54-ofp_table_features_reply.packet.json @@ -0,0 +1,11928 @@ +{ + "1": [ + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "\u79c1\u306e\u30c6\u30fc\u30d6\u30eb", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + }, + { + "type": "EXPERIMENTER" + }, + { + "type": "EXPERIMENTER" + }, + { + "type": "EXPERIMENTER" + } + ], + "table_id": 0 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x01", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 1 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x02", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 2 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x03", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 3 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x04", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 4 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x05", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 5 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x06", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 6 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x07", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 7 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x08", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 8 + }, + { + "config": 0, + "max_entries": 16777216, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "Flow Table 0x09", + "properties": [ + { + "instruction_ids": [ + { + "len": 4, + "type": 1 + }, + { + "len": 4, + "type": 2 + }, + { + "len": 4, + "type": 3 + }, + { + "len": 4, + "type": 4 + }, + { + "len": 4, + "type": 5 + }, + { + "len": 4, + "type": 6 + } + ], + "type": "INSTRUCTIONS" + }, + { + "table_ids": [ + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154, + 155, + 156, + 157, + 158, + 159, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 174, + 175, + 176, + 177, + 178, + 179, + 180, + 181, + 182, + 183, + 184, + 185, + 186, + 187, + 188, + 189, + 190, + 191, + 192, + 193, + 194, + 195, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 210, + 211, + 212, + 213, + 214, + 215, + 216, + 217, + 218, + 219, + 220, + 221, + 222, + 223, + 224, + 225, + 226, + 227, + 228, + 229, + 230, + 231, + 232, + 233, + 234, + 235, + 236, + 237, + 238, + 239, + 240, + 241, + 242, + 243, + 244, + 245, + 246, + 247, + 248, + 249, + 250, + 251, + 252, + 253, + 254 + ], + "type": "NEXT_TABLES" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "WRITE_ACTIONS" + }, + { + "action_ids": [ + { + "len": 4, + "type": 0 + }, + { + "len": 4, + "type": 22 + }, + { + "len": 4, + "type": 21 + }, + { + "len": 4, + "type": 15 + }, + { + "len": 4, + "type": 16 + }, + { + "len": 4, + "type": 23 + }, + { + "len": 4, + "type": 24 + }, + { + "len": 4, + "type": 11 + }, + { + "len": 4, + "type": 12 + }, + { + "len": 4, + "type": 17 + }, + { + "len": 4, + "type": 18 + }, + { + "len": 4, + "type": 19 + }, + { + "len": 4, + "type": 20 + }, + { + "len": 4, + "type": 26 + }, + { + "len": 4, + "type": 27 + }, + { + "len": 4, + "type": 25 + } + ], + "type": "APPLY_ACTIONS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "MATCH" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WILDCARDS" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "WRITE_SETFIELD" + }, + { + "oxm_ids": [ + { + "hasmask": 0, + "length": 0, + "type": "in_port" + }, + { + "hasmask": 0, + "length": 0, + "type": "metadata" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "eth_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_vid" + }, + { + "hasmask": 0, + "length": 0, + "type": "vlan_pcp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_dscp" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_ecn" + }, + { + "hasmask": 0, + "length": 0, + "type": "ip_proto" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv4_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "tcp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "udp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "sctp_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv4_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_op" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_spa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tpa" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_sha" + }, + { + "hasmask": 0, + "length": 0, + "type": "arp_tha" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_src" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_dst" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_flabel" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_type" + }, + { + "hasmask": 0, + "length": 0, + "type": "icmpv6_code" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_target" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_sll" + }, + { + "hasmask": 0, + "length": 0, + "type": "ipv6_nd_tll" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_label" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_tc" + }, + { + "hasmask": 0, + "length": 0, + "type": "mpls_bos" + }, + { + "hasmask": 0, + "length": 0, + "type": "pbb_isid" + } + ], + "type": "APPLY_SETFIELD" + } + ], + "table_id": 9 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-56-ofp_group_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-56-ofp_group_stats_reply.packet.json new file mode 100644 index 00000000..1c6f9039 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-56-ofp_group_stats_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "bucket_stats": [ + { + "byte_count": 2345, + "packet_count": 234 + } + ], + "byte_count": 12345, + "duration_nsec": 609036000, + "duration_sec": 9, + "group_id": 1, + "length": 56, + "packet_count": 123, + "ref_count": 2 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-63-ofp_queue_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-63-ofp_queue_desc_request.packet.json new file mode 100644 index 00000000..fc863813 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-63-ofp_queue_desc_request.packet.json @@ -0,0 +1,4 @@ +{ + "port_no": 7, + "queue_id": 4294967295 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of14/5-64-ofp_queue_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of14/5-64-ofp_queue_desc_reply.packet.json new file mode 100644 index 00000000..ca6e87f2 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of14/5-64-ofp_queue_desc_reply.packet.json @@ -0,0 +1,61 @@ +{ + "1": [ + { + "len": 32, + "port_no": 7, + "properties": [ + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + } + ], + "queue_id": 0 + }, + { + "len": 88, + "port_no": 8, + "properties": [ + { + "length": 8, + "rate": 300, + "type": "MIN_RATE" + }, + { + "length": 8, + "rate": 900, + "type": "MAX_RATE" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "queue_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json new file mode 100644 index 00000000..0b5e04be --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json @@ -0,0 +1,48 @@ +{ + "1": [ + { + "cookie": 1234605616436508552, + "flags": 1, + "hard_timeout": 255, + "idle_timeout": 255, + "importance": 43690, + "instructions": [ + { + "table_id": 2, + "type": "GOTO_TABLE" + }, + { + "actions": [ + { + "meter_id": 2, + "type": "METER" + } + ], + "type": "WRITE_ACTIONS" + }, + { + "actions": [ + { + "type": "COPY_FIELD", + "n_bits": 32, + "src_offset": 1, + "dst_offset": 2, + "src_oxm_id": "eth_src", + "dst_oxm_id": "eth_dst" + } + ], + "type": "APPLY_ACTIONS" + } + ], + "length": 64, + "match": { + "in_port": 1 + }, + "priority": 5, + "stats": { + "flow_count": 1 + }, + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json new file mode 100644 index 00000000..f59aa0b2 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json @@ -0,0 +1,13 @@ +{ + "flow": { + "cookie": 1234605616436508552, + "cookie_mask": 18446744073709551615, + "flags": 0, + "match": { + "in_port": 1 + }, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_mod.packet.json new file mode 100644 index 00000000..cc3ba488 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/lib-ofctl-OFP15-flow_mod.packet.json @@ -0,0 +1,115 @@ +{ + "cmd": 0, + "flow": { + "instructions": [ + { + "actions": [ + { + "field": "vlan_vid", + "type": "SET_FIELD", + "value": 258 + }, + { + "type": "COPY_TTL_OUT" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "POP_PBB" + }, + { + "ethertype": 4660, + "type": "PUSH_PBB" + }, + { + "ethertype": 39030, + "type": "POP_MPLS" + }, + { + "ethertype": 34887, + "type": "PUSH_MPLS" + }, + { + "type": "POP_VLAN" + }, + { + "ethertype": 33024, + "type": "PUSH_VLAN" + }, + { + "type": "DEC_MPLS_TTL" + }, + { + "mpls_ttl": 10, + "type": "SET_MPLS_TTL" + }, + { + "type": "DEC_NW_TTL" + }, + { + "nw_ttl": 10, + "type": "SET_NW_TTL" + }, + { + "data": "AAECAwQFBgc=", + "data_type": "base64", + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + "queue_id": 3, + "type": "SET_QUEUE" + }, + { + "meter_id": 2, + "type": "METER" + }, + { + "group_id": 99, + "type": "GROUP" + }, + { + "max_len": 65535, + "port": 6, + "type": "OUTPUT" + } + ], + "type": "WRITE_ACTIONS" + }, + { + "actions": [ + { + "field": "eth_src", + "type": "SET_FIELD", + "value": "01:02:03:04:05:06" + }, + { + "field": "pbb_uca", + "type": "SET_FIELD", + "value": 1 + }, + { + "type": "COPY_FIELD", + "n_bits": 32, + "src_offset": 1, + "dst_offset": 2, + "src_oxm_id": "eth_src", + "dst_oxm_id": "eth_dst" + } + ], + "type": "APPLY_ACTIONS" + } + ], + "buffer_id": 65535, + "importance": 0, + "match": { + "eth_dst": "f2:0b:a4:7d:f8:ea" + }, + "priority": 123, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_reply.packet.json new file mode 100644 index 00000000..ac4f93c4 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_reply.packet.json @@ -0,0 +1,10 @@ +{ + "1": [ + { + "length": 16, + "stats": { + "flow_count": 1 + } + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_request.packet.json new file mode 100644 index 00000000..845af65b --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-aggregate_stats_request.packet.json @@ -0,0 +1,11 @@ +{ + "flow": { + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "match": {}, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 255 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-desc_reply.packet.json new file mode 100644 index 00000000..8fc54b74 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-desc_reply.packet.json @@ -0,0 +1,9 @@ +{ + "1": { + "dp_desc": "dp", + "hw_desc": "hw", + "mfr_desc": "mfr", + "serial_num": "serial", + "sw_desc": "sw" + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-experimenter.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-experimenter.packet.json new file mode 100644 index 00000000..c655f8f0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-experimenter.packet.json @@ -0,0 +1,8 @@ +{ + "exp": { + "data": "bmF6bw==", + "data_type": "base64", + "exp_type": 123456789, + "experimenter": 98765432 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_reply.packet.json new file mode 100644 index 00000000..4e12f857 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_reply.packet.json @@ -0,0 +1,26 @@ +{ + "1": [ + { + "cookie": 1234605616436508552, + "flags": 1, + "hard_timeout": 255, + "idle_timeout": 255, + "importance": 43690, + "instructions": [ + { + "table_id": 2, + "type": "GOTO_TABLE" + } + ], + "length": 64, + "match": { + "in_port": 1 + }, + "priority": 5, + "stats": { + "flow_count": 1 + }, + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_request.packet.json new file mode 100644 index 00000000..f59aa0b2 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_desc_request.packet.json @@ -0,0 +1,13 @@ +{ + "flow": { + "cookie": 1234605616436508552, + "cookie_mask": 18446744073709551615, + "flags": 0, + "match": { + "in_port": 1 + }, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json new file mode 100644 index 00000000..72054e33 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json @@ -0,0 +1,103 @@ +{ + "cmd": 0, + "flow": { + "instructions": [ + { + "actions": [ + { + "field": "vlan_vid", + "type": "SET_FIELD", + "value": 258 + }, + { + "type": "COPY_TTL_OUT" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "COPY_TTL_IN" + }, + { + "type": "POP_PBB" + }, + { + "ethertype": 4660, + "type": "PUSH_PBB" + }, + { + "ethertype": 39030, + "type": "POP_MPLS" + }, + { + "ethertype": 34887, + "type": "PUSH_MPLS" + }, + { + "type": "POP_VLAN" + }, + { + "ethertype": 33024, + "type": "PUSH_VLAN" + }, + { + "type": "DEC_MPLS_TTL" + }, + { + "mpls_ttl": 10, + "type": "SET_MPLS_TTL" + }, + { + "type": "DEC_NW_TTL" + }, + { + "nw_ttl": 10, + "type": "SET_NW_TTL" + }, + { + "data": "AAECAwQFBgc=", + "data_type": "base64", + "experimenter": 101, + "type": "EXPERIMENTER" + }, + { + "queue_id": 3, + "type": "SET_QUEUE" + }, + { + "group_id": 99, + "type": "GROUP" + }, + { + "max_len": 65535, + "port": 6, + "type": "OUTPUT" + } + ], + "type": "WRITE_ACTIONS" + }, + { + "actions": [ + { + "field": "eth_src", + "type": "SET_FIELD", + "value": "01:02:03:04:05:06" + }, + { + "field": "pbb_uca", + "type": "SET_FIELD", + "value": 1 + } + ], + "type": "APPLY_ACTIONS" + } + ], + "buffer_id": 65535, + "importance": 0, + "match": { + "eth_dst": "f2:0b:a4:7d:f8:ea" + }, + "priority": 123, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_reply.packet.json new file mode 100644 index 00000000..385f256b --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_reply.packet.json @@ -0,0 +1,16 @@ +{ + "1": [ + { + "length": 40, + "match": { + "in_port": 1 + }, + "priority": 1, + "reason": 0, + "stats": { + "flow_count": 1 + }, + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_request.packet.json new file mode 100644 index 00000000..a42dfef0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-flow_stats_request.packet.json @@ -0,0 +1,11 @@ +{ + "flow": { + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "match": {}, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 0 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_reply.packet.json new file mode 100644 index 00000000..b6b75020 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_reply.packet.json @@ -0,0 +1,27 @@ +{ + "1": [ + { + "buckets": [ + { + "actions": [ + { + "max_len": 65509, + "port": 1, + "type": "OUTPUT" + } + ], + "bucket_id": 65535, + "properties": [ + { + "type": "WEIGHT", + "weight": 65535 + } + ] + } + ], + "group_id": 1, + "properties": [], + "type": "SELECT" + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_request.packet.json new file mode 100644 index 00000000..f25aaff4 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_desc_request.packet.json @@ -0,0 +1,3 @@ +{ + "group_id": 52651 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_features_reply.packet.json new file mode 100644 index 00000000..2a29ac2d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_features_reply.packet.json @@ -0,0 +1,104 @@ +{ + "1": [ + { + "actions": [ + { + "ALL": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "SELECT": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "INDIRECT": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + }, + { + "FF": [ + "OUTPUT", + "COPY_TTL_OUT", + "COPY_TTL_IN", + "SET_MPLS_TTL", + "DEC_MPLS_TTL", + "PUSH_VLAN", + "POP_VLAN", + "PUSH_MPLS", + "POP_MPLS", + "SET_QUEUE", + "GROUP", + "SET_NW_TTL", + "DEC_NW_TTL", + "SET_FIELD" + ] + } + ], + "capabilities": [ + "SELECT_WEIGHT", + "CHAINING" + ], + "max_groups": [ + { + "ALL": 16777216 + }, + { + "SELECT": 16777216 + }, + { + "INDIRECT": 16777216 + }, + { + "FF": 16777216 + } + ], + "types": [ + "ALL", + "SELECT", + "INDIRECT", + "FF" + ] + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_mod.packet.json new file mode 100644 index 00000000..eca861ea --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_mod.packet.json @@ -0,0 +1,42 @@ +{ + "cmd": 3, + "group": { + "buckets": [ + { + "actions": [ + { + "len": 8, + "type": "POP_VLAN" + }, + { + "field": "ipv4_dst", + "type": "SET_FIELD", + "value": "192.168.2.9" + } + ], + "bucket_id": 305419896, + "properties": [ + { + "length": 8, + "type": "WEIGHT", + "weight": 52428 + }, + { + "length": 8, + "type": "WATCH_PORT", + "watch": 56797 + }, + { + "length": 8, + "type": "WATCH_GROUP", + "watch": 4008636142 + } + ] + } + ], + "command_bucket_id": 3149642683, + "group_id": 2863311530, + "properties": [], + "type": "SELECT" + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_stats_reply.packet.json new file mode 100644 index 00000000..1c6f9039 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-group_stats_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "bucket_stats": [ + { + "byte_count": 2345, + "packet_count": 234 + } + ], + "byte_count": 12345, + "duration_nsec": 609036000, + "duration_sec": 9, + "group_id": 1, + "length": 56, + "packet_count": 123, + "ref_count": 2 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_desc_reply.packet.json new file mode 100644 index 00000000..d5efcd97 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_desc_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "bands": [ + { + "burst_size": 10, + "rate": 1000, + "type": "DROP" + } + ], + "flags": [ + "PKTPS", + "BURST", + "STATS" + ], + "meter_id": 100 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_features_reply.packet.json new file mode 100644 index 00000000..24dac7d4 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_features_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "band_types": [ + "DROP", + "DSCP_REMARK" + ], + "capabilities": [ + "KBPS", + "PKTPS", + "BURST", + "STATS" + ], + "max_bands": 255, + "max_color": 0, + "max_meter": 16777216 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_mod.packet.json new file mode 100644 index 00000000..1d881209 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_mod.packet.json @@ -0,0 +1,24 @@ +{ + "cmd": 0, + "meter": { + "bands": [ + { + "burst_size": 10, + "rate": 1000, + "type": "DROP" + }, + { + "burst_size": 10, + "prec_level": 1, + "rate": 1000, + "type": "DSCP_REMARK" + } + ], + "flags": [ + "PKTPS", + "BURST", + "STATS" + ], + "meter_id": 100 + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_stats_reply.packet.json new file mode 100644 index 00000000..e7f9722b --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-meter_stats_reply.packet.json @@ -0,0 +1,19 @@ +{ + "1": [ + { + "band_stats": [ + { + "byte_band_count": 0, + "packet_band_count": 0 + } + ], + "byte_in_count": 0, + "duration_nsec": 480000, + "duration_sec": 0, + "len": 56, + "meter_id": 100, + "packet_in_count": 0, + "ref_count": 0 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_reply.packet.json new file mode 100644 index 00000000..c2f748a8 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_reply.packet.json @@ -0,0 +1,83 @@ +{ + "1": [ + { + "config": 0, + "hw_addr": "f2:0b:a4:d0:3f:70", + "length": 168, + "name": "Port7", + "port_no": 7, + "properties": [ + { + "advertised": 10240, + "curr": 10248, + "curr_speed": 5000, + "length": 32, + "max_speed": 5000, + "peer": 10248, + "supported": 10248, + "type": "ETHERNET" + }, + { + "length": 40, + "rx_grid_freq_lmda": 1500, + "rx_max_freq_lmda": 2000, + "rx_min_freq_lmda": 1000, + "supported": 1, + "tx_grid_freq_lmda": 1500, + "tx_max_freq_lmda": 2000, + "tx_min_freq_lmda": 1000, + "tx_pwr_max": 2000, + "tx_pwr_min": 1000, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "state": 4 + }, + { + "config": 0, + "hw_addr": "f2:0b:a4:7d:f8:ea", + "length": 72, + "name": "Port6", + "port_no": 6, + "properties": [ + { + "advertised": 10240, + "curr": 10248, + "curr_speed": 5000, + "length": 32, + "max_speed": 5000, + "peer": 10248, + "supported": 10248, + "type": "ETHERNET" + } + ], + "state": 4 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_request.packet.json new file mode 100644 index 00000000..d0519e83 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_desc_request.packet.json @@ -0,0 +1,3 @@ +{ + "port_no": 48346 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_mod.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_mod.packet.json new file mode 100644 index 00000000..be9de69d --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_mod.packet.json @@ -0,0 +1,50 @@ +{ + "port_config": { + "config": 0, + "hw_addr": "00:11:00:00:11:11", + "mask": 0, + "port_no": 1, + "properties": [ + { + "advertise": 4096, + "length": 8, + "type": "ETHERNET" + }, + { + "configure": 3, + "fl_offset": 2000, + "freq_lmda": 1500, + "grid_span": 3000, + "length": 24, + "tx_pwr": 300, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ] + } +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_stats_reply.packet.json new file mode 100644 index 00000000..821c9b72 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-port_stats_reply.packet.json @@ -0,0 +1,93 @@ +{ + "1": [ + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 224, + "port_no": 7, + "properties": [ + { + "collisions": 0, + "length": 40, + "rx_crc_err": 0, + "rx_frame_err": 0, + "rx_over_err": 0, + "type": "ETHERNET" + }, + { + "bias_current": 300, + "flags": 3, + "length": 44, + "rx_freq_lmda": 1500, + "rx_grid_span": 500, + "rx_offset": 700, + "rx_pwr": 2000, + "temperature": 273, + "tx_freq_lmda": 1500, + "tx_grid_span": 500, + "tx_offset": 700, + "tx_pwr": 2000, + "type": "OPTICAL" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "rx_bytes": 0, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 0, + "tx_bytes": 336, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 4 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 120, + "port_no": 6, + "properties": [ + { + "collisions": 0, + "length": 40, + "rx_crc_err": 0, + "rx_frame_err": 0, + "rx_over_err": 0, + "type": "ETHERNET" + } + ], + "rx_bytes": 336, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 4, + "tx_bytes": 336, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 4 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_reply.packet.json new file mode 100644 index 00000000..71ca0b71 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_reply.packet.json @@ -0,0 +1,61 @@ +{ + "1": [ + { + "len": 32, + "port_no": 7, + "properties": [ + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + } + ], + "queue_id": 0 + }, + { + "len": 88, + "port_no": 8, + "properties": [ + { + "length": 8, + "rate": 300, + "type": "MIN_RATE" + }, + { + "length": 8, + "rate": 900, + "type": "MAX_RATE" + }, + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "queue_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_request.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_request.packet.json new file mode 100644 index 00000000..9765cf31 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_desc_request.packet.json @@ -0,0 +1,4 @@ +{ + "port_no": 52651, + "queue_id": 57020 +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_stats_reply.packet.json new file mode 100644 index 00000000..a6d246d0 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-queue_stats_reply.packet.json @@ -0,0 +1,64 @@ +{ + "1": [ + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 104, + "port_no": 7, + "properties": [ + { + "data": [], + "exp_type": 0, + "experimenter": 101, + "length": 12, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1 + ], + "exp_type": 1, + "experimenter": 101, + "length": 16, + "type": "EXPERIMENTER" + }, + { + "data": [ + 1, + 2 + ], + "exp_type": 2, + "experimenter": 101, + "length": 20, + "type": "EXPERIMENTER" + } + ], + "queue_id": 1, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 48, + "port_no": 6, + "properties": [], + "queue_id": 1, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + }, + { + "duration_nsec": 0, + "duration_sec": 0, + "length": 48, + "port_no": 7, + "properties": [], + "queue_id": 2, + "tx_bytes": 0, + "tx_errors": 0, + "tx_packets": 0 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_features_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_features_reply.packet.json new file mode 100644 index 00000000..e16efb76 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_features_reply.packet.json @@ -0,0 +1,25 @@ +{ + "1": [ + { + "capabilities": 4, + "command": 1, + "features": 1, + "length": 80, + "max_entries": 255, + "metadata_match": 18446744073709551615, + "metadata_write": 18446744073709551615, + "name": "table1", + "properties": [ + { + "oxm_values": [ + { + "eth_src": "aa:bb:cc:dd:ee:ff" + } + ], + "type": "PACKET_TYPES" + } + ], + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_stats_reply.packet.json b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_stats_reply.packet.json new file mode 100644 index 00000000..95b2c495 --- /dev/null +++ b/ryu/tests/unit/lib/ofctl_json/of15/libofproto-OFP15-table_stats_reply.packet.json @@ -0,0 +1,16 @@ +{ + "1": [ + { + "active_count": 4, + "lookup_count": 4, + "matched_count": 4, + "table_id": 0 + }, + { + "active_count": 4, + "lookup_count": 4, + "matched_count": 4, + "table_id": 1 + } + ] +} diff --git a/ryu/tests/unit/lib/test_import_module.py b/ryu/tests/unit/lib/test_import_module.py index 71ff984b..25264c36 100644 --- a/ryu/tests/unit/lib/test_import_module.py +++ b/ryu/tests/unit/lib/test_import_module.py @@ -18,18 +18,12 @@ import unittest from nose.tools import eq_ from ryu.utils import import_module -import ryu.tests.unit.lib.test_mod.fuga.mod class Test_import_module(unittest.TestCase): - """ Test case for ryu.utils.import_module """ - - def setUp(self): - pass - - def tearDown(self): - pass + Test case for ryu.utils.import_module + """ @staticmethod def _my_import(name): @@ -40,32 +34,34 @@ class Test_import_module(unittest.TestCase): return mod def test_import_module_with_same_basename(self): - fuga = import_module('ryu.tests.unit.lib.test_mod.fuga.mod') - eq_("this is fuga", fuga.name) - hoge = import_module('ryu.tests.unit.lib.test_mod.hoge.mod') - eq_("this is hoge", hoge.name) + aaa = import_module('ryu.tests.unit.lib.test_mod.aaa.mod') + eq_("this is aaa", aaa.name) + bbb = import_module('ryu.tests.unit.lib.test_mod.bbb.mod') + eq_("this is bbb", bbb.name) def test_import_module_by_filename(self): - fuga = import_module('./lib/test_mod/fuga/mod.py') - eq_("this is fuga", fuga.name) - hoge = import_module('./lib/test_mod/hoge/mod.py') - eq_("this is hoge", hoge.name) + ccc = import_module('./lib/test_mod/ccc/mod.py') + eq_("this is ccc", ccc.name) + ddd = import_module('./lib/test_mod/ddd/mod.py') + # Note: When importing a module by filename, if module file name + # is duplicated, import_module returns a module instance which is + # imported before. + eq_("this is ccc", ddd.name) def test_import_same_module1(self): - fuga1 = import_module('./lib/test_mod/fuga/mod.py') - eq_("this is fuga", fuga1.name) - eq_(ryu.tests.unit.lib.test_mod.fuga.mod, fuga1) + from ryu.tests.unit.lib.test_mod import eee as eee1 + eq_("this is eee", eee1.name) + eee2 = import_module('./lib/test_mod/eee.py') + eq_("this is eee", eee2.name) def test_import_same_module2(self): - fuga1 = import_module('./lib/test_mod/fuga/mod.py') - eq_("this is fuga", fuga1.name) - fuga2 = import_module('ryu.tests.unit.lib.test_mod.fuga.mod') - eq_("this is fuga", fuga2.name) - eq_(fuga1, fuga2) + fff1 = import_module('./lib/test_mod/fff.py') + eq_("this is fff", fff1.name) + fff2 = import_module('ryu.tests.unit.lib.test_mod.fff') + eq_("this is fff", fff2.name) def test_import_same_module3(self): - fuga1 = import_module('./lib/test_mod/fuga/mod.py') - eq_("this is fuga", fuga1.name) - fuga3 = self._my_import('ryu.tests.unit.lib.test_mod.fuga.mod') - eq_("this is fuga", fuga3.name) - eq_(fuga1, fuga3) + ggg1 = import_module('./lib/test_mod/ggg.py') + eq_("this is ggg", ggg1.name) + ggg2 = self._my_import('ryu.tests.unit.lib.test_mod.ggg') + eq_("this is ggg", ggg2.name) diff --git a/ryu/tests/unit/lib/test_mod/fuga/__init__.py b/ryu/tests/unit/lib/test_mod/aaa/__init__.py similarity index 100% rename from ryu/tests/unit/lib/test_mod/fuga/__init__.py rename to ryu/tests/unit/lib/test_mod/aaa/__init__.py diff --git a/ryu/tests/unit/lib/test_mod/fuga/mod.py b/ryu/tests/unit/lib/test_mod/aaa/mod.py similarity index 96% rename from ryu/tests/unit/lib/test_mod/fuga/mod.py rename to ryu/tests/unit/lib/test_mod/aaa/mod.py index 551cb6c0..a5eff136 100644 --- a/ryu/tests/unit/lib/test_mod/fuga/mod.py +++ b/ryu/tests/unit/lib/test_mod/aaa/mod.py @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -name = "this is fuga" +name = "this is aaa" diff --git a/ryu/tests/unit/lib/test_mod/hoge/__init__.py b/ryu/tests/unit/lib/test_mod/bbb/__init__.py similarity index 100% rename from ryu/tests/unit/lib/test_mod/hoge/__init__.py rename to ryu/tests/unit/lib/test_mod/bbb/__init__.py diff --git a/ryu/tests/unit/lib/test_mod/hoge/mod.py b/ryu/tests/unit/lib/test_mod/bbb/mod.py similarity index 96% rename from ryu/tests/unit/lib/test_mod/hoge/mod.py rename to ryu/tests/unit/lib/test_mod/bbb/mod.py index 2f363d0b..397c8554 100644 --- a/ryu/tests/unit/lib/test_mod/hoge/mod.py +++ b/ryu/tests/unit/lib/test_mod/bbb/mod.py @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -name = "this is hoge" +name = "this is bbb" diff --git a/ryu/tests/unit/lib/test_mod/ccc/__init__.py b/ryu/tests/unit/lib/test_mod/ccc/__init__.py new file mode 100644 index 00000000..ce07156d --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/ccc/__init__.py @@ -0,0 +1,14 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ryu/contrib/ovs/timeval.py b/ryu/tests/unit/lib/test_mod/ccc/mod.py similarity index 55% rename from ryu/contrib/ovs/timeval.py rename to ryu/tests/unit/lib/test_mod/ccc/mod.py index ba0e54e9..5be826cf 100644 --- a/ryu/contrib/ovs/timeval.py +++ b/ryu/tests/unit/lib/test_mod/ccc/mod.py @@ -1,26 +1,16 @@ -# Copyright (c) 2009, 2010 Nicira, Inc. +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. # See the License for the specific language governing permissions and # limitations under the License. -import time - - -def msec(): - """Returns the current time, as the amount of time since the epoch, in - milliseconds, as a float.""" - return time.time() * 1000.0 - - -def postfork(): - # Just a stub for now - pass +name = "this is ccc" diff --git a/ryu/tests/unit/lib/test_mod/ddd/__init__.py b/ryu/tests/unit/lib/test_mod/ddd/__init__.py new file mode 100644 index 00000000..ce07156d --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/ddd/__init__.py @@ -0,0 +1,14 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/ryu/tests/unit/lib/test_mod/ddd/mod.py b/ryu/tests/unit/lib/test_mod/ddd/mod.py new file mode 100644 index 00000000..31d31d53 --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/ddd/mod.py @@ -0,0 +1,16 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name = "this is ddd" diff --git a/ryu/tests/unit/lib/test_mod/eee.py b/ryu/tests/unit/lib/test_mod/eee.py new file mode 100644 index 00000000..ed21a8aa --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/eee.py @@ -0,0 +1,16 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name = "this is eee" diff --git a/ryu/tests/unit/lib/test_mod/fff.py b/ryu/tests/unit/lib/test_mod/fff.py new file mode 100644 index 00000000..5a565e38 --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/fff.py @@ -0,0 +1,16 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name = "this is fff" diff --git a/ryu/tests/unit/lib/test_mod/ggg.py b/ryu/tests/unit/lib/test_mod/ggg.py new file mode 100644 index 00000000..29555e56 --- /dev/null +++ b/ryu/tests/unit/lib/test_mod/ggg.py @@ -0,0 +1,16 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name = "this is ggg" diff --git a/ryu/tests/unit/lib/test_ofctl.py b/ryu/tests/unit/lib/test_ofctl.py index a8ca01f2..862ce1d3 100644 --- a/ryu/tests/unit/lib/test_ofctl.py +++ b/ryu/tests/unit/lib/test_ofctl.py @@ -24,6 +24,8 @@ import unittest from ryu.lib import ofctl_v1_0 from ryu.lib import ofctl_v1_2 from ryu.lib import ofctl_v1_3 +from ryu.lib import ofctl_v1_4 +from ryu.lib import ofctl_v1_5 from ryu.ofproto import ofproto_parser from ryu.ofproto.ofproto_protocol import ProtocolDesc from ryu.tests import test_lib @@ -32,6 +34,7 @@ LOG = logging.getLogger(__name__) class DummyDatapath(ProtocolDesc): + def __init__(self, version): super(DummyDatapath, self).__init__(version) self.id = 1 # XXX @@ -73,7 +76,8 @@ class Test_ofctl(unittest.TestCase): # expected message <--> sent message request.serialize() try: - eq_(request.to_jsondict(), dp.request_msg.to_jsondict()) + eq_(json.dumps(request.to_jsondict(), sort_keys=True), + json.dumps(dp.request_msg.to_jsondict(), sort_keys=True)) except AssertionError as e: # For debugging json.dump(dp.request_msg.to_jsondict(), @@ -83,7 +87,9 @@ class Test_ofctl(unittest.TestCase): # expected output <--> return of ofctl def _remove(d, names): - f = lambda x: _remove(x, names) + def f(x): + return _remove(x, names) + if isinstance(d, list): return list(map(f, d)) if isinstance(d, dict): @@ -95,9 +101,11 @@ class Test_ofctl(unittest.TestCase): return d2 return d + expected = _remove(expected, ['len', 'length']) + output = _remove(output, ['len', 'length']) try: - eq_(_remove(expected, ['len', 'length']), - _remove(output, ['len', 'length'])) + eq_(json.dumps(expected, sort_keys=True), + json.dumps(output, sort_keys=True)) except AssertionError as e: # For debugging json.dump(output, open('/tmp/' + name + '_reply.json', 'w'), @@ -109,7 +117,9 @@ def _add_tests(): _ofp_vers = { 'of10': 0x01, 'of12': 0x03, - 'of13': 0x04 + 'of13': 0x04, + 'of14': 0x05, + 'of15': 0x06, } _test_cases = { @@ -131,11 +141,31 @@ def _add_tests(): 'request': '3-37-ofp_queue_stats_request.packet.json', 'reply': '3-38-ofp_queue_stats_reply.packet.json' }, + { + 'method': ofctl_v1_2.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet1.json', + 'reply': '3-38-ofp_queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_2.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet2.json', + 'reply': '3-38-ofp_queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_2.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet3.json', + 'reply': '3-38-ofp_queue_stats_reply.packet.json' + }, { 'method': ofctl_v1_2.get_queue_config, 'request': '3-35-ofp_queue_get_config_request.packet.json', 'reply': '3-36-ofp_queue_get_config_reply.packet.json' }, + { + 'method': ofctl_v1_2.get_queue_config, + 'request': 'lib-ofctl-ofp_queue_get_config_request.packet.json', + 'reply': '3-36-ofp_queue_get_config_reply.packet.json' + }, { 'method': ofctl_v1_2.get_flow_stats, 'request': '3-11-ofp_flow_stats_request.packet.json', @@ -156,11 +186,21 @@ def _add_tests(): 'request': '3-29-ofp_port_stats_request.packet.json', 'reply': '3-30-ofp_port_stats_reply.packet.json' }, + { + 'method': ofctl_v1_2.get_port_stats, + 'request': 'lib-ofctl-ofp_port_stats_request.packet.json', + 'reply': '3-30-ofp_port_stats_reply.packet.json' + }, { 'method': ofctl_v1_2.get_group_stats, 'request': '3-61-ofp_group_stats_request.packet.json', 'reply': '3-62-ofp_group_stats_reply.packet.json' }, + { + 'method': ofctl_v1_2.get_group_stats, + 'request': 'lib-ofctl-ofp_group_stats_request.packet.json', + 'reply': '3-62-ofp_group_stats_reply.packet.json' + }, { 'method': ofctl_v1_2.get_group_features, 'request': '3-31-ofp_group_features_stats_request.packet.json', @@ -210,11 +250,31 @@ def _add_tests(): 'request': '4-37-ofp_queue_stats_request.packet.json', 'reply': '4-38-ofp_queue_stats_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet1.json', + 'reply': '4-38-ofp_queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_3.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet2.json', + 'reply': '4-38-ofp_queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_3.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet3.json', + 'reply': '4-38-ofp_queue_stats_reply.packet.json' + }, { 'method': ofctl_v1_3.get_queue_config, 'request': '4-35-ofp_queue_get_config_request.packet.json', 'reply': '4-36-ofp_queue_get_config_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_queue_config, + 'request': 'lib-ofctl-ofp_queue_get_config_request.packet.json', + 'reply': '4-36-ofp_queue_get_config_reply.packet.json' + }, { 'method': ofctl_v1_3.get_flow_stats, 'request': '4-11-ofp_flow_stats_request.packet.json', @@ -240,11 +300,21 @@ def _add_tests(): 'request': '4-29-ofp_port_stats_request.packet.json', 'reply': '4-30-ofp_port_stats_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_port_stats, + 'request': 'lib-ofctl-ofp_port_stats_request.packet.json', + 'reply': '4-30-ofp_port_stats_reply.packet.json' + }, { 'method': ofctl_v1_3.get_meter_stats, 'request': '4-49-ofp_meter_stats_request.packet.json', 'reply': '4-50-ofp_meter_stats_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_meter_stats, + 'request': 'lib-ofctl-ofp_meter_stats_request.packet.json', + 'reply': '4-50-ofp_meter_stats_reply.packet.json' + }, { 'method': ofctl_v1_3.get_meter_features, 'request': '4-51-ofp_meter_features_request.packet.json', @@ -255,11 +325,21 @@ def _add_tests(): 'request': '4-47-ofp_meter_config_request.packet.json', 'reply': '4-48-ofp_meter_config_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_meter_config, + 'request': 'lib-ofctl-ofp_meter_config_request.packet.json', + 'reply': '4-48-ofp_meter_config_reply.packet.json' + }, { 'method': ofctl_v1_3.get_group_stats, 'request': '4-57-ofp_group_stats_request.packet.json', 'reply': '4-58-ofp_group_stats_reply.packet.json' }, + { + 'method': ofctl_v1_3.get_group_stats, + 'request': 'lib-ofctl-ofp_group_stats_request.packet.json', + 'reply': '4-58-ofp_group_stats_reply.packet.json' + }, { 'method': ofctl_v1_3.get_group_features, 'request': '4-31-ofp_group_features_request.packet.json', @@ -300,7 +380,226 @@ def _add_tests(): 'request': '4-16-ofp_experimenter.packet.json', 'reply': None }, - ] + ], + 'of14': [ + { + 'method': ofctl_v1_4.get_desc_stats, + 'request': '5-24-ofp_desc_request.packet.json', + 'reply': '5-0-ofp_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_queue_stats, + 'request': '5-35-ofp_queue_stats_request.packet.json', + 'reply': '5-36-ofp_queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_queue_desc, + 'request': '5-63-ofp_queue_desc_request.packet.json', + 'reply': '5-64-ofp_queue_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_flow_stats, + 'request': '5-11-ofp_flow_stats_request.packet.json', + 'reply': '5-12-ofp_flow_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_aggregate_flow_stats, + 'request': '5-25-ofp_aggregate_stats_request.packet.json', + 'reply': '5-26-ofp_aggregate_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_table_stats, + 'request': '5-27-ofp_table_stats_request.packet.json', + 'reply': '5-28-ofp_table_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_table_features, + 'request': 'lib-ofctl-ofp_table_features_request.packet.json', + 'reply': '5-54-ofp_table_features_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_port_stats, + 'request': '5-29-ofp_port_stats_request.packet.json', + 'reply': '5-30-ofp_port_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_meter_stats, + 'request': '5-47-ofp_meter_stats_request.packet.json', + 'reply': '5-48-ofp_meter_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_meter_features, + 'request': '5-49-ofp_meter_features_request.packet.json', + 'reply': '5-50-ofp_meter_features_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_meter_config, + 'request': '5-45-ofp_meter_config_request.packet.json', + 'reply': '5-46-ofp_meter_config_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_group_stats, + 'request': '5-55-ofp_group_stats_request.packet.json', + 'reply': '5-56-ofp_group_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_group_features, + 'request': '5-31-ofp_group_features_request.packet.json', + 'reply': '5-32-ofp_group_features_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_group_desc, + 'request': '5-33-ofp_group_desc_request.packet.json', + 'reply': '5-34-ofp_group_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_4.get_port_desc, + 'request': '5-51-ofp_port_desc_request.packet.json', + 'reply': '5-52-ofp_port_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_4.mod_flow_entry, + 'request': '5-2-ofp_flow_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_4.mod_meter_entry, + 'request': '5-43-ofp_meter_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_4.mod_group_entry, + 'request': '5-21-ofp_group_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_4.mod_port_behavior, + 'request': '5-22-ofp_port_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_4.send_experimenter, + 'request': '5-16-ofp_experimenter.packet.json', + 'reply': None + }, + ], + 'of15': [ + { + 'method': ofctl_v1_5.get_desc_stats, + 'request': 'libofproto-OFP15-desc_request.packet.json', + 'reply': 'libofproto-OFP15-desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_queue_stats, + 'request': 'lib-ofctl-ofp_queue_stats_request.packet.json', + 'reply': 'libofproto-OFP15-queue_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_queue_desc, + 'request': 'libofproto-OFP15-queue_desc_request.packet.json', + 'reply': 'libofproto-OFP15-queue_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_flow_stats, + 'request': 'libofproto-OFP15-flow_stats_request.packet.json', + 'reply': 'libofproto-OFP15-flow_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_flow_desc_stats, + 'request': 'libofproto-OFP15-flow_desc_request.packet.json', + 'reply': 'libofproto-OFP15-flow_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_flow_desc_stats, + 'request': 'lib-ofctl-OFP15-flow_desc_request.packet.json', + 'reply': 'lib-ofctl-OFP15-flow_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_aggregate_flow_stats, + 'request': 'libofproto-OFP15-aggregate_stats_request.packet.json', + 'reply': 'libofproto-OFP15-aggregate_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_table_stats, + 'request': 'libofproto-OFP15-table_stats_request.packet.json', + 'reply': 'libofproto-OFP15-table_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_table_features, + 'request': 'lib-ofctl-ofp_table_features_request.packet.json', + 'reply': 'libofproto-OFP15-table_features_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_port_stats, + 'request': 'libofproto-OFP15-port_stats_request.packet.json', + 'reply': 'libofproto-OFP15-port_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_meter_stats, + 'request': 'libofproto-OFP15-meter_stats_request.packet.json', + 'reply': 'libofproto-OFP15-meter_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_meter_features, + 'request': 'libofproto-OFP15-meter_features_request.packet.json', + 'reply': 'libofproto-OFP15-meter_features_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_meter_desc, + 'request': 'libofproto-OFP15-meter_desc_request.packet.json', + 'reply': 'libofproto-OFP15-meter_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_group_stats, + 'request': 'libofproto-OFP15-group_stats_request.packet.json', + 'reply': 'libofproto-OFP15-group_stats_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_group_features, + 'request': 'libofproto-OFP15-group_features_request.packet.json', + 'reply': 'libofproto-OFP15-group_features_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_group_desc, + 'request': 'libofproto-OFP15-group_desc_request.packet.json', + 'reply': 'libofproto-OFP15-group_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.get_port_desc, + 'request': 'libofproto-OFP15-port_desc_request.packet.json', + 'reply': 'libofproto-OFP15-port_desc_reply.packet.json' + }, + { + 'method': ofctl_v1_5.mod_flow_entry, + 'request': 'libofproto-OFP15-flow_mod_no_nx.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_5.mod_flow_entry, + 'request': 'lib-ofctl-OFP15-flow_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_5.mod_meter_entry, + 'request': 'libofproto-OFP15-meter_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_5.mod_group_entry, + 'request': 'libofproto-OFP15-group_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_5.mod_port_behavior, + 'request': 'libofproto-OFP15-port_mod.packet.json', + 'reply': None + }, + { + 'method': ofctl_v1_5.send_experimenter, + 'request': 'libofproto-OFP15-experimenter.packet.json', + 'reply': None + } + ], } def _jsonfile_to_msg(datapath, jsonfile): @@ -316,7 +615,7 @@ def _add_tests(): parser_json_dir = os.path.join(parser_json_root, ofp_ver) ofctl_json_dir = os.path.join(ofctl_json_root, ofp_ver) for test in tests: - name = 'test_ofctl_' + ofp_ver + '_' + test['method'].__name__ + name = 'test_ofctl_' + ofp_ver + '_' + test['request'] print('adding %s ...' % name) args = {} args_json_path = os.path.join(ofctl_json_dir, test['request']) diff --git a/ryu/tests/unit/lib/test_pcaplib.py b/ryu/tests/unit/lib/test_pcaplib.py new file mode 100644 index 00000000..ac86904e --- /dev/null +++ b/ryu/tests/unit/lib/test_pcaplib.py @@ -0,0 +1,230 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import logging +import os +import struct +import sys +import unittest + +try: + import mock # Python 2 +except ImportError: + from unittest import mock # Python 3 + +from nose.tools import eq_ +from nose.tools import raises + +from ryu.utils import binary_str +from ryu.lib import pcaplib + +LOG = logging.getLogger(__name__) + +PCAP_PACKET_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), + '../../packet_data/pcap/') + + +class Test_PcapFileHdr(unittest.TestCase): + """ + Test case for pcaplib.PcapFileHdr class + """ + hdr = pcaplib.PcapFileHdr( + magic=None, # temporary default + version_major=2, + version_minor=4, + thiszone=0x11223344, + sigfigs=0x22334455, + snaplen=0x33445566, + network=0x44556677, + ) + + buf_big = ( + b'\xa1\xb2\xc3\xd4' # magic (Big Endian) + b'\x00\x02\x00\x04' # version_major, version_minor + b'\x11\x22\x33\x44' # thiszone + b'\x22\x33\x44\x55' # sigfigs + b'\x33\x44\x55\x66' # snaplen + b'\x44\x55\x66\x77' # network + ) + + buf_little = ( + b'\xd4\xc3\xb2\xa1' # magic (Little Endian) + b'\x02\x00\x04\x00' # version_major, version_minor + b'\x44\x33\x22\x11' # thiszone + b'\x55\x44\x33\x22' # sigfigs + b'\x66\x55\x44\x33' # snaplen + b'\x77\x66\x55\x44' # network + ) + + buf_invalid = ( + b'\xff\xff\xff\xff' # magic (Invalid) + b'\x02\x00\x04\x00' # version_major, version_minor + b'\x44\x33\x22\x11' # thiszone + b'\x55\x44\x33\x22' # sigfigs + b'\x66\x55\x44\x33' # snaplen + b'\x77\x66\x55\x44' # network + ) + + def _assert(self, magic, ret): + self.hdr.magic = magic + eq_(self.hdr.__dict__, ret.__dict__) + + def test_parser_with_big_endian(self): + ret, byteorder = pcaplib.PcapFileHdr.parser(self.buf_big) + self._assert(pcaplib.PcapFileHdr.MAGIC_NUMBER_IDENTICAL, ret) + eq_('big', byteorder) + + def test_parser_with_little_endian(self): + ret, byteorder = pcaplib.PcapFileHdr.parser(self.buf_little) + self._assert(pcaplib.PcapFileHdr.MAGIC_NUMBER_SWAPPED, ret) + eq_('little', byteorder) + + @mock.patch('sys.byteorder', 'big') + def test_serialize_with_big_endian(self): + buf = self.hdr.serialize() + eq_(binary_str(self.buf_big), binary_str(buf)) + + @mock.patch('sys.byteorder', 'little') + def test_serialize_with_little_endian(self): + buf = self.hdr.serialize() + eq_(binary_str(self.buf_little), binary_str(buf)) + + @raises(struct.error) + def test_parser_with_invalid_magic_number(self): + pcaplib.PcapFileHdr.parser(self.buf_invalid) + + +class Test_PcapPktHdr(unittest.TestCase): + """ + Test case for pcaplib.PcapPktHdr class + """ + expected_buf = b'test_data' + + hdr = pcaplib.PcapPktHdr( + ts_sec=0x11223344, + ts_usec=0x22334455, + incl_len=len(expected_buf), + orig_len=0x44556677, + ) + + buf_big = ( + b'\x11\x22\x33\x44' # ts_sec + b'\x22\x33\x44\x55' # ts_usec + b'\x00\x00\x00\x09' # incl_len = len(expected_buf) + b'\x44\x55\x66\x77' # orig_len + ) + + buf_little = ( + b'\x44\x33\x22\x11' # ts_sec + b'\x55\x44\x33\x22' # ts_usec + b'\x09\x00\x00\x00' # incl_len = len(expected_buf) + b'\x77\x66\x55\x44' # orig_len + ) + + def test_parser_with_big_endian(self): + ret, buf = pcaplib.PcapPktHdr.parser( + self.buf_big + self.expected_buf, 'big') + eq_(self.hdr.__dict__, ret.__dict__) + eq_(self.expected_buf, buf) + + def test_parser_with_little_endian(self): + ret, buf = pcaplib.PcapPktHdr.parser( + self.buf_little + self.expected_buf, 'little') + eq_(self.hdr.__dict__, ret.__dict__) + eq_(self.expected_buf, buf) + + @mock.patch('sys.byteorder', 'big') + def test_serialize_with_big_endian(self): + buf = self.hdr.serialize() + eq_(binary_str(self.buf_big), binary_str(buf)) + + @mock.patch('sys.byteorder', 'little') + def test_serialize_with_little_endian(self): + buf = self.hdr.serialize() + eq_(binary_str(self.buf_little), binary_str(buf)) + + +class Test_pcaplib_Reader(unittest.TestCase): + """ + Test case for pcaplib.Reader class + """ + + expected_outputs = [ + (0x1234 + (0x5678 / 1e6), b'test_data_1'), # sec=0x1234, usec=0x5678 + (0x2345 + (0x6789 / 1e6), b'test_data_2'), # sec=0x2345, usec=0x6789 + ] + + def _test(self, file_name): + outputs = [] + for ts, buf in pcaplib.Reader(open(file_name, 'rb')): + outputs.append((ts, buf)) + + eq_(self.expected_outputs, outputs) + + def test_with_big_endian(self): + self._test(os.path.join(PCAP_PACKET_DATA_DIR, 'big_endian.pcap')) + + def test_with_little_endian(self): + self._test(os.path.join(PCAP_PACKET_DATA_DIR, 'little_endian.pcap')) + + +class DummyFile(object): + + def __init__(self): + self.buf = b'' + + def write(self, buf): + self.buf += buf + + def close(self): + pass + + +class Test_pcaplib_Writer(unittest.TestCase): + """ + Test case for pcaplib.Writer class + """ + + @staticmethod + def _test(file_name): + expected_buf = open(file_name, 'rb').read() + f = DummyFile() + w = pcaplib.Writer(f) + w.write_pkt(b'test_data_1', ts=(0x1234 + (0x5678 / 1e6))) + w.write_pkt(b'test_data_2', ts=(0x2345 + (0x6789 / 1e6))) + eq_(expected_buf, f.buf) + + @mock.patch('sys.byteorder', 'big') + def test_with_big_endian(self): + self._test(os.path.join(PCAP_PACKET_DATA_DIR, 'big_endian.pcap')) + + @mock.patch('sys.byteorder', 'little') + def test_with_little_endian(self): + self._test(os.path.join(PCAP_PACKET_DATA_DIR, 'little_endian.pcap')) + + @staticmethod + @mock.patch.object(pcaplib.Writer, '_write_pcap_file_hdr', mock.MagicMock) + @mock.patch.object(pcaplib.Writer, '_write_pkt_hdr', mock.MagicMock) + def test_with_longer_buf(): + f = DummyFile() + snaplen = 4 + w = pcaplib.Writer(f, snaplen=snaplen) + w.write_pkt(b'hogehoge', ts=0) + expected_buf = b'hoge' # b'hogehoge'[:snaplen] + eq_(expected_buf, f.buf) + eq_(snaplen, len(f.buf)) diff --git a/ryu/tests/unit/lib/test_rpc.py b/ryu/tests/unit/lib/test_rpc.py index 149912ac..cedab558 100644 --- a/ryu/tests/unit/lib/test_rpc.py +++ b/ryu/tests/unit/lib/test_rpc.py @@ -119,7 +119,13 @@ class Test_rpc(unittest.TestCase): assert isinstance(obj, int) result = c.call(b'resp', [obj]) assert result == obj - assert isinstance(result, type(obj)) + import sys + # note: on PyPy, result will be a long type value. + sv = getattr(sys, 'subversion', None) + if sv is not None and sv[0] == 'PyPy': + assert isinstance(result, long) + else: + assert isinstance(result, type(obj)) def test_0_call_int3(self): c = rpc.Client(self._client_sock) @@ -237,6 +243,11 @@ class Test_rpc(unittest.TestCase): @unittest.skip("doesn't work with eventlet 0.18 and later") def test_4_call_large_binary(self): import struct + import sys + # note: on PyPy, this test case may hang up. + sv = getattr(sys, 'subversion', None) + if sv is not None and sv[0] == 'PyPy': + return c = rpc.Client(self._client_sock) obj = struct.pack("10000000x") diff --git a/ryu/tests/unit/ofproto/json/of12/3-2-ofp_flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of12/3-2-ofp_flow_mod.packet.json index 92392415..43e0e3c9 100644 --- a/ryu/tests/unit/ofproto/json/of12/3-2-ofp_flow_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of12/3-2-ofp_flow_mod.packet.json @@ -19,7 +19,9 @@ "mask": null, "value": 258 } - } + }, + "len": 16, + "type": 25 } }, { @@ -46,7 +48,9 @@ "mask": null, "value": "01:02:03:04:05:06" } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_group_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_group_stats_request.packet.json new file mode 100644 index 00000000..542ae726 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_group_stats_request.packet.json @@ -0,0 +1,6 @@ +{ + "OFPGroupStatsRequest": { + "flags": 0, + "group_id": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_port_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_port_stats_request.packet.json new file mode 100644 index 00000000..884c5fd2 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_port_stats_request.packet.json @@ -0,0 +1,6 @@ +{ + "OFPPortStatsRequest": { + "flags": 0, + "port_no": 7 + } +} diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json new file mode 100644 index 00000000..a50308c0 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_get_config_request.packet.json @@ -0,0 +1,5 @@ +{ + "OFPQueueGetConfigRequest": { + "port": 4294967295 + } +} diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json new file mode 100644 index 00000000..77535806 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet1.json @@ -0,0 +1,7 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 7, + "queue_id": 4294967295 + } +} diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json new file mode 100644 index 00000000..66127d31 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet2.json @@ -0,0 +1,7 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 7, + "queue_id": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json new file mode 100644 index 00000000..1a798837 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of12/lib-ofctl-ofp_queue_stats_request.packet3.json @@ -0,0 +1,7 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 4294967295, + "queue_id": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/4-12-ofp_flow_stats_reply.packet.json b/ryu/tests/unit/ofproto/json/of13/4-12-ofp_flow_stats_reply.packet.json index 4b3483f5..04339dc3 100644 --- a/ryu/tests/unit/ofproto/json/of13/4-12-ofp_flow_stats_reply.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/4-12-ofp_flow_stats_reply.packet.json @@ -139,7 +139,9 @@ "mask": null, "value": 258 } - } + }, + "len": 16, + "type": 25 } }, { @@ -247,9 +249,26 @@ "port": 6, "type": 0 } + }, + { + "OFPActionExperimenterUnknown": { + "len": 16, + "data": "ZXhwX2RhdGE=", + "experimenter": 98765432, + "type": 65535 + } + }, + { + "NXActionUnknown": { + "len": 16, + "data": "cF9kYXRh", + "experimenter": 8992, + "type": 65535, + "subtype": 25976 + } } ], - "len": 160, + "len": 192, "type": 3 } }, @@ -264,7 +283,9 @@ "mask": null, "value": "01:02:03:04:05:06" } - } + }, + "len": 16, + "type": 25 } }, { @@ -275,7 +296,9 @@ "mask": null, "value": 1 } - } + }, + "len": 16, + "type": 25 } } ], @@ -300,7 +323,7 @@ } } ], - "length": 280, + "length": 312, "match": { "OFPMatch": { "length": 4, diff --git a/ryu/tests/unit/ofproto/json/of13/4-2-ofp_flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of13/4-2-ofp_flow_mod.packet.json index d644545e..0e3a2cc3 100644 --- a/ryu/tests/unit/ofproto/json/of13/4-2-ofp_flow_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/4-2-ofp_flow_mod.packet.json @@ -19,7 +19,9 @@ "mask": null, "value": 258 } - } + }, + "len": 16, + "type": 25 } }, { @@ -105,7 +107,15 @@ "nw_ttl": 10, "type": 23 } - }, + }, + { + "OFPActionExperimenterUnknown": { + "data": "AAECAwQFBgc=", + "experimenter": 101, + "len": 16, + "type": 65535 + } + }, { "OFPActionSetQueue": { "len": 8, @@ -128,8 +138,8 @@ "type": 0 } } - ], - "len": 160, + ], + "len": 176, "type": 3 } }, @@ -144,7 +154,9 @@ "mask": null, "value": "01:02:03:04:05:06" } - } + }, + "len": 16, + "type": 25 } }, { @@ -155,7 +167,9 @@ "mask": null, "value": 1 } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_group_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_group_stats_request.packet.json new file mode 100644 index 00000000..311b0381 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_group_stats_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPGroupStatsRequest": { + "flags": 0, + "group_id": 1, + "type": 6 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_config_request.packet.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_config_request.packet.json new file mode 100644 index 00000000..9967bd29 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_config_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPMeterConfigStatsRequest": { + "flags": 0, + "meter_id": 1, + "type": 10 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_stats_request.packet.json new file mode 100644 index 00000000..570c2b6d --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_meter_stats_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPMeterStatsRequest": { + "flags": 0, + "meter_id": 1, + "type": 9 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_port_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_port_stats_request.packet.json new file mode 100644 index 00000000..f1258076 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_port_stats_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPPortStatsRequest": { + "flags": 0, + "port_no": 7, + "type": 4 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json new file mode 100644 index 00000000..a50308c0 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_get_config_request.packet.json @@ -0,0 +1,5 @@ +{ + "OFPQueueGetConfigRequest": { + "port": 4294967295 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json new file mode 100644 index 00000000..b216fe97 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet1.json @@ -0,0 +1,8 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 7, + "queue_id": 4294967295, + "type": 5 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json new file mode 100644 index 00000000..cc00e1ea --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet2.json @@ -0,0 +1,8 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 7, + "queue_id": 1, + "type": 5 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json new file mode 100644 index 00000000..5f6579be --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/lib-ofctl-ofp_queue_stats_request.packet3.json @@ -0,0 +1,8 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 4294967295, + "queue_id": 1, + "type": 5 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod.packet.json index 23812142..7de40ef3 100644 --- a/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod.packet.json @@ -25,7 +25,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } }, { diff --git a/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod_match_conj.packet.json b/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod_match_conj.packet.json index 89c3a222..4fd8c4d4 100644 --- a/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod_match_conj.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/libofproto-OFP13-flow_mod_match_conj.packet.json @@ -25,7 +25,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_conjunction.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_conjunction.packet.json index 7f89c5ea..edc10939 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_conjunction.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_conjunction.packet.json @@ -95,7 +95,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 2, - "xid": 2 + "table_id": 2 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_controller.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_controller.packet.json new file mode 100644 index 00000000..95bc3f51 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_controller.packet.json @@ -0,0 +1,43 @@ +{ + "OFPFlowMod": { + "buffer_id": 4294967295, + "command": 0, + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "NXActionController": { + "controller_id": 1, + "experimenter": 8992, + "len": 16, + "max_len": 1024, + "reason": 5, + "subtype": 20, + "type": 65535 + } + } + ], + "len": 24, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 4, + "oxm_fields": [], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 100, + "table_id": 0 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct.packet.json index 0c8670f8..473b54f1 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct.packet.json @@ -21,7 +21,8 @@ "recirc_table": 4, "subtype": 35, "type": 65535, - "zone_ofs_nbits": 0, + "zone_start": 0, + "zone_end": 0, "zone_src": 0 } } @@ -56,7 +57,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_exec.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_exec.packet.json index a1ad1402..7cecad7d 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_exec.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_exec.packet.json @@ -22,7 +22,9 @@ "mask": null, "value": 6636321 } - } + }, + "len": 16, + "type": 25 } } ], @@ -33,7 +35,8 @@ "recirc_table": 255, "subtype": 35, "type": 65535, - "zone_ofs_nbits": 0, + "zone_start": 0, + "zone_end": 0, "zone_src": 0 } } @@ -68,7 +71,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat.packet.json index 6052ec1b..b63c2267 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat.packet.json @@ -37,7 +37,8 @@ "recirc_table": 255, "subtype": 35, "type": 65535, - "zone_ofs_nbits": 0, + "zone_start": 0, + "zone_end": 0, "zone_src": 0 } } @@ -65,7 +66,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat_v6.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat_v6.packet.json index c5e3d50a..e50d5610 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat_v6.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_ct_nat_v6.packet.json @@ -37,7 +37,8 @@ "recirc_table": 255, "subtype": 35, "type": 65535, - "zone_ofs_nbits": 0, + "zone_start": 0, + "zone_end": 0, "zone_src": 0 } } @@ -65,7 +66,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_fintimeout.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_fintimeout.packet.json new file mode 100644 index 00000000..c0e02de9 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_fintimeout.packet.json @@ -0,0 +1,57 @@ +{ + "OFPFlowMod": { + "buffer_id": 4294967295, + "command": 0, + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "NXActionFinTimeout": { + "experimenter": 8992, + "fin_hard_timeout": 60, + "fin_idle_timeout": 30, + "len": 16, + "subtype": 19, + "type": 65535 + } + } + ], + "len": 24, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 15, + "oxm_fields": [ + { + "OXMTlv": { + "field": "eth_type", + "mask": null, + "value": 2048 + } + }, + { + "OXMTlv": { + "field": "ip_proto", + "mask": null, + "value": 6 + } + } + ], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 100, + "table_id": 0 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_learn.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_learn.packet.json index aaad64d4..c874c2e9 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_learn.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_learn.packet.json @@ -25,7 +25,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } }, { @@ -185,7 +187,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 2, - "xid": 2 + "table_id": 2 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_note.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_note.packet.json new file mode 100644 index 00000000..330000da --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_note.packet.json @@ -0,0 +1,48 @@ +{ + "OFPFlowMod": { + "buffer_id": 4294967295, + "command": 0, + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "NXActionNote": { + "experimenter": 8992, + "len": 16, + "note": [ + 4, + 5, + 6, + 7, + 0, + 0 + ], + "subtype": 8, + "type": 65535 + } + } + ], + "len": 24, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 4, + "oxm_fields": [], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 100, + "table_id": 0 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_resubmit.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_resubmit.packet.json index c0404958..1fdedc55 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_resubmit.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-action_resubmit.packet.json @@ -94,7 +94,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_conj.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_conj.packet.json index 2b500014..6b1c2864 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_conj.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_conj.packet.json @@ -55,7 +55,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_load_nx_register.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_load_nx_register.packet.json new file mode 100644 index 00000000..e57d11a9 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_load_nx_register.packet.json @@ -0,0 +1,59 @@ +{ + "OFPFlowMod": { + "buffer_id": 4294967295, + "command": 1, + "cookie": 1311768467463790320, + "cookie_mask": 18446744073709551615, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "NXActionRegLoad": { + "dst": "reg0", + "experimenter": 8992, + "len": 24, + "end": 31, + "start": 4, + "subtype": 7, + "type": 65535, + "value": 233495534 + } + } + ], + "len": 32, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 24, + "oxm_fields": [ + { + "OXMTlv": { + "field": "reg0", + "mask": null, + "value": 4660 + } + }, + { + "OXMTlv": { + "field": "reg5", + "mask": 65535, + "value": 43981 + } + } + ], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 32768, + "table_id": 3 + } +} diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_move_nx_register.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_move_nx_register.packet.json index 5143a3d5..48ced9f6 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_move_nx_register.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_move_nx_register.packet.json @@ -55,7 +55,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark.packet.json index 1cba8c6f..61aee954 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark.packet.json @@ -55,7 +55,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet.json b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet.json index 89314867..a7bd2367 100644 --- a/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet.json +++ b/ryu/tests/unit/ofproto/json/of13/ovs-ofctl-of13-match_pkt_mark_masked.packet.json @@ -55,7 +55,6 @@ "out_group": 4294967295, "out_port": 4294967295, "priority": 32768, - "table_id": 3, - "xid": 2 + "table_id": 3 } } diff --git a/ryu/tests/unit/ofproto/json/of14/5-12-ofp_flow_stats_reply.packet.json b/ryu/tests/unit/ofproto/json/of14/5-12-ofp_flow_stats_reply.packet.json index 88d2976a..42eacf37 100644 --- a/ryu/tests/unit/ofproto/json/of14/5-12-ofp_flow_stats_reply.packet.json +++ b/ryu/tests/unit/ofproto/json/of14/5-12-ofp_flow_stats_reply.packet.json @@ -143,7 +143,9 @@ "mask": null, "value": 258 } - } + }, + "len": 16, + "type": 25 } }, { @@ -285,7 +287,9 @@ "mask": null, "value": "01:02:03:04:05:06" } - } + }, + "len": 16, + "type": 25 } }, { @@ -296,7 +300,9 @@ "mask": null, "value": 1 } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of14/5-2-ofp_flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of14/5-2-ofp_flow_mod.packet.json index a884ce95..5f789d52 100644 --- a/ryu/tests/unit/ofproto/json/of14/5-2-ofp_flow_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of14/5-2-ofp_flow_mod.packet.json @@ -20,7 +20,9 @@ "mask": null, "value": 258 } - } + }, + "len": 16, + "type": 25 } }, { @@ -153,7 +155,9 @@ "mask": null, "value": "01:02:03:04:05:06" } - } + }, + "len": 16, + "type": 25 } }, { @@ -164,7 +168,9 @@ "mask": null, "value": 1 } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of14/lib-ofctl-ofp_table_features_request.packet.json b/ryu/tests/unit/ofproto/json/of14/lib-ofctl-ofp_table_features_request.packet.json new file mode 100644 index 00000000..6501de15 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of14/lib-ofctl-ofp_table_features_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPTableFeaturesStatsRequest": { + "body": [], + "flags": 0, + "type": 12 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json new file mode 100644 index 00000000..9626ceea --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_reply.packet.json @@ -0,0 +1,105 @@ +{ + "OFPFlowDescStatsReply": { + "body": [ + { + "OFPFlowDesc": { + "cookie": 1234605616436508552, + "flags": 1, + "hard_timeout": 255, + "idle_timeout": 255, + "importance": 43690, + "instructions": [ + { + "OFPInstructionGotoTable": { + "len": 8, + "table_id": 2, + "type": 1 + } + }, + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionMeter": { + "len": 8, + "meter_id": 2, + "type": 29 + } + } + ], + "len": 8, + "type": 3 + } + }, + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionCopyField": { + "type": 28, + "len": 20, + "n_bits": 32, + "src_offset": 1, + "dst_offset": 2, + "oxm_ids": [ + { + "OFPOxmId": { + "hasmask": false, + "length": 0, + "type": "eth_src" + } + }, + { + "OFPOxmId": { + "hasmask": false, + "length": 0, + "type": "eth_dst" + } + } + ] + } + } + ], + "len": 28, + "type": 4 + } + } + ], + "length": 84, + "match": { + "OFPMatch": { + "length": 12, + "oxm_fields": [ + { + "OXMTlv": { + "field": "in_port", + "mask": null, + "value": 1 + } + } + ], + "type": 1 + } + }, + "priority": 5, + "stats": { + "OFPStats": { + "length": 12, + "oxs_fields": [ + { + "OXSTlv": { + "field": "flow_count", + "value": 1 + } + } + ] + } + }, + "table_id": 1 + } + } + ], + "flags": 0, + "type": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json new file mode 100644 index 00000000..62e46c65 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_desc_request.packet.json @@ -0,0 +1,26 @@ +{ + "OFPFlowDescStatsRequest": { + "cookie": 1234605616436508552, + "cookie_mask": 18446744073709551615, + "flags": 0, + "match": { + "OFPMatch": { + "length": 12, + "oxm_fields": [ + { + "OXMTlv": { + "field": "in_port", + "mask": null, + "value": 1 + } + } + ], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "table_id": 1, + "type": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_mod.packet.json new file mode 100644 index 00000000..7fc12ed3 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-OFP15-flow_mod.packet.json @@ -0,0 +1,234 @@ +{ + "OFPFlowMod": { + "buffer_id": 65535, + "command": 0, + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "vlan_vid", + "mask": null, + "value": 258 + } + }, + "len": 16, + "type": 25 + } + }, + { + "OFPActionCopyTtlOut": { + "len": 8, + "type": 11 + } + }, + { + "OFPActionCopyTtlIn": { + "len": 8, + "type": 12 + } + }, + { + "OFPActionCopyTtlIn": { + "len": 8, + "type": 12 + } + }, + { + "OFPActionPopPbb": { + "len": 8, + "type": 27 + } + }, + { + "OFPActionPushPbb": { + "ethertype": 4660, + "len": 8, + "type": 26 + } + }, + { + "OFPActionPopMpls": { + "ethertype": 39030, + "len": 8, + "type": 20 + } + }, + { + "OFPActionPushMpls": { + "ethertype": 34887, + "len": 8, + "type": 19 + } + }, + { + "OFPActionPopVlan": { + "len": 8, + "type": 18 + } + }, + { + "OFPActionPushVlan": { + "ethertype": 33024, + "len": 8, + "type": 17 + } + }, + { + "OFPActionDecMplsTtl": { + "len": 8, + "type": 16 + } + }, + { + "OFPActionSetMplsTtl": { + "len": 8, + "mpls_ttl": 10, + "type": 15 + } + }, + { + "OFPActionDecNwTtl": { + "len": 8, + "type": 24 + } + }, + { + "OFPActionSetNwTtl": { + "len": 8, + "nw_ttl": 10, + "type": 23 + } + }, + { + "OFPActionExperimenterUnknown": { + "data": "AAECAwQFBgc=", + "experimenter": 101, + "len": 16, + "type": 65535 + } + }, + { + "OFPActionSetQueue": { + "len": 8, + "queue_id": 3, + "type": 21 + } + }, + { + "OFPActionMeter": { + "len": 8, + "meter_id": 2, + "type": 29 + } + }, + { + "OFPActionGroup": { + "group_id": 99, + "len": 8, + "type": 22 + } + }, + { + "OFPActionOutput": { + "len": 16, + "max_len": 65535, + "port": 6, + "type": 0 + } + } + ], + "len": 176, + "type": 3 + } + }, + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "eth_src", + "mask": null, + "value": "01:02:03:04:05:06" + } + }, + "len": 16, + "type": 25 + } + }, + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "pbb_uca", + "mask": null, + "value": 1 + } + }, + "len": 16, + "type": 25 + } + }, + { + "OFPActionCopyField": { + "type": 28, + "len": 20, + "n_bits": 32, + "src_offset": 1, + "dst_offset": 2, + "oxm_ids": [ + { + "OFPOxmId": { + "hasmask": false, + "length": 0, + "type": "eth_src" + } + }, + { + "OFPOxmId": { + "hasmask": false, + "length": 0, + "type": "eth_dst" + } + } + ] + } + } + ], + "len": 60, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 14, + "oxm_fields": [ + { + "OXMTlv": { + "field": "eth_dst", + "mask": null, + "value": "f2:0b:a4:7d:f8:ea" + } + } + ], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 123, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_queue_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_queue_stats_request.packet.json new file mode 100644 index 00000000..41d30f70 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_queue_stats_request.packet.json @@ -0,0 +1,8 @@ +{ + "OFPQueueStatsRequest": { + "flags": 0, + "port_no": 4294967295, + "queue_id": 4294967295, + "type": 5 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_table_features_request.packet.json b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_table_features_request.packet.json new file mode 100644 index 00000000..6501de15 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/lib-ofctl-ofp_table_features_request.packet.json @@ -0,0 +1,7 @@ +{ + "OFPTableFeaturesStatsRequest": { + "body": [], + "flags": 0, + "type": 12 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-bundle_add.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-bundle_add.packet.json index 60106984..5a1ddb19 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-bundle_add.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-bundle_add.packet.json @@ -30,7 +30,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } }, { diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-experimenter.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-experimenter.packet.json new file mode 100644 index 00000000..0e749179 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-experimenter.packet.json @@ -0,0 +1,7 @@ +{ + "OFPExperimenter": { + "data": "bmF6bw==", + "exp_type": 123456789, + "experimenter": 98765432 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod.packet.json index 8a028426..ac6dce7e 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod.packet.json @@ -26,7 +26,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } }, { diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_match_conj.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_match_conj.packet.json index 2355577a..2d0b9ad4 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_match_conj.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_match_conj.packet.json @@ -26,7 +26,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json new file mode 100644 index 00000000..5f789d52 --- /dev/null +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_mod_no_nx.packet.json @@ -0,0 +1,202 @@ +{ + "OFPFlowMod": { + "buffer_id": 65535, + "command": 0, + "cookie": 0, + "cookie_mask": 0, + "flags": 0, + "hard_timeout": 0, + "idle_timeout": 0, + "importance": 0, + "instructions": [ + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "vlan_vid", + "mask": null, + "value": 258 + } + }, + "len": 16, + "type": 25 + } + }, + { + "OFPActionCopyTtlOut": { + "len": 8, + "type": 11 + } + }, + { + "OFPActionCopyTtlIn": { + "len": 8, + "type": 12 + } + }, + { + "OFPActionCopyTtlIn": { + "len": 8, + "type": 12 + } + }, + { + "OFPActionPopPbb": { + "len": 8, + "type": 27 + } + }, + { + "OFPActionPushPbb": { + "ethertype": 4660, + "len": 8, + "type": 26 + } + }, + { + "OFPActionPopMpls": { + "ethertype": 39030, + "len": 8, + "type": 20 + } + }, + { + "OFPActionPushMpls": { + "ethertype": 34887, + "len": 8, + "type": 19 + } + }, + { + "OFPActionPopVlan": { + "len": 8, + "type": 18 + } + }, + { + "OFPActionPushVlan": { + "ethertype": 33024, + "len": 8, + "type": 17 + } + }, + { + "OFPActionDecMplsTtl": { + "len": 8, + "type": 16 + } + }, + { + "OFPActionSetMplsTtl": { + "len": 8, + "mpls_ttl": 10, + "type": 15 + } + }, + { + "OFPActionDecNwTtl": { + "len": 8, + "type": 24 + } + }, + { + "OFPActionSetNwTtl": { + "len": 8, + "nw_ttl": 10, + "type": 23 + } + }, + { + "OFPActionExperimenterUnknown": { + "data": "AAECAwQFBgc=", + "experimenter": 101, + "len": 16, + "type": 65535 + } + }, + { + "OFPActionSetQueue": { + "len": 8, + "queue_id": 3, + "type": 21 + } + }, + { + "OFPActionGroup": { + "group_id": 99, + "len": 8, + "type": 22 + } + }, + { + "OFPActionOutput": { + "len": 16, + "max_len": 65535, + "port": 6, + "type": 0 + } + } + ], + "len": 176, + "type": 3 + } + }, + { + "OFPInstructionActions": { + "actions": [ + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "eth_src", + "mask": null, + "value": "01:02:03:04:05:06" + } + }, + "len": 16, + "type": 25 + } + }, + { + "OFPActionSetField": { + "field": { + "OXMTlv": { + "field": "pbb_uca", + "mask": null, + "value": 1 + } + }, + "len": 16, + "type": 25 + } + } + ], + "len": 40, + "type": 4 + } + } + ], + "match": { + "OFPMatch": { + "length": 14, + "oxm_fields": [ + { + "OXMTlv": { + "field": "eth_dst", + "mask": null, + "value": "f2:0b:a4:7d:f8:ea" + } + } + ], + "type": 1 + } + }, + "out_group": 4294967295, + "out_port": 4294967295, + "priority": 123, + "table_id": 1 + } +} diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_stats_request.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_stats_request.packet.json index 2baccad4..cc9de0a6 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_stats_request.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-flow_stats_request.packet.json @@ -13,6 +13,6 @@ "out_group": 4294967295, "out_port": 4294967295, "table_id": 0, - "type": 1 + "type": 17 } } diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-group_mod.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-group_mod.packet.json index 2f92361a..d168fefd 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-group_mod.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-group_mod.packet.json @@ -20,7 +20,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-meter_features_reply.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-meter_features_reply.packet.json index e3f6918f..f9fb7848 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-meter_features_reply.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-meter_features_reply.packet.json @@ -5,6 +5,7 @@ "OFPMeterFeaturesStats": { "band_types": 2147483654, "capabilities": 15, + "features": 3, "max_bands": 255, "max_color": 0, "max_meter": 16777216 diff --git a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-requestforward.packet.json b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-requestforward.packet.json index aab3acda..66368c20 100644 --- a/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-requestforward.packet.json +++ b/ryu/tests/unit/ofproto/json/of15/libofproto-OFP15-requestforward.packet.json @@ -22,7 +22,9 @@ "mask": null, "value": "192.168.2.9" } - } + }, + "len": 16, + "type": 25 } } ], diff --git a/ryu/tests/unit/ofproto/test_ofproto_common.py b/ryu/tests/unit/ofproto/test_ofproto_common.py index f9af4f5a..6a63236e 100644 --- a/ryu/tests/unit/ofproto/test_ofproto_common.py +++ b/ryu/tests/unit/ofproto/test_ofproto_common.py @@ -33,5 +33,5 @@ class TestOfprotCommon(unittest.TestCase): eq_(OFP_HEADER_SIZE, 8) def test_define_constants(self): - eq_(OFP_TCP_PORT, 6633) - eq_(OFP_SSL_PORT, 6633) + eq_(OFP_TCP_PORT, 6653) + eq_(OFP_SSL_PORT, 6653) diff --git a/ryu/tests/unit/ofproto/test_parser.py b/ryu/tests/unit/ofproto/test_parser.py index dc2940e2..dcbee23c 100644 --- a/ryu/tests/unit/ofproto/test_parser.py +++ b/ryu/tests/unit/ofproto/test_parser.py @@ -39,7 +39,7 @@ implemented = { ofproto_v1_0.OFPT_FEATURES_REQUEST: (False, True), ofproto_v1_0.OFPT_FEATURES_REPLY: (True, False), ofproto_v1_0.OFPT_PACKET_IN: (True, False), - ofproto_v1_0.OFPT_FLOW_MOD: (False, True), + ofproto_v1_0.OFPT_FLOW_MOD: (True, True), }, 3: { ofproto_v1_2.OFPT_FEATURES_REQUEST: (False, True), @@ -51,7 +51,7 @@ implemented = { ofproto_v1_2.OFPT_FLOW_REMOVED: (True, False), ofproto_v1_2.OFPT_PORT_STATUS: (True, False), ofproto_v1_2.OFPT_PACKET_OUT: (False, True), - ofproto_v1_2.OFPT_FLOW_MOD: (False, True), + ofproto_v1_2.OFPT_FLOW_MOD: (True, True), ofproto_v1_2.OFPT_GROUP_MOD: (False, True), ofproto_v1_2.OFPT_PORT_MOD: (False, True), ofproto_v1_2.OFPT_TABLE_MOD: (False, True), @@ -74,7 +74,7 @@ implemented = { ofproto_v1_3.OFPT_FLOW_REMOVED: (True, False), ofproto_v1_3.OFPT_PORT_STATUS: (True, False), ofproto_v1_3.OFPT_PACKET_OUT: (False, True), - ofproto_v1_3.OFPT_FLOW_MOD: (False, True), + ofproto_v1_3.OFPT_FLOW_MOD: (True, True), ofproto_v1_3.OFPT_GROUP_MOD: (False, True), ofproto_v1_3.OFPT_PORT_MOD: (False, True), ofproto_v1_3.OFPT_METER_MOD: (False, True), @@ -101,7 +101,7 @@ implemented = { ofproto_v1_4.OFPT_FLOW_REMOVED: (True, False), ofproto_v1_4.OFPT_PORT_STATUS: (True, False), ofproto_v1_4.OFPT_PACKET_OUT: (False, True), - ofproto_v1_4.OFPT_FLOW_MOD: (False, True), + ofproto_v1_4.OFPT_FLOW_MOD: (True, True), ofproto_v1_4.OFPT_GROUP_MOD: (True, True), ofproto_v1_4.OFPT_PORT_MOD: (False, True), ofproto_v1_4.OFPT_METER_MOD: (True, True), @@ -131,7 +131,7 @@ implemented = { ofproto_v1_5.OFPT_FLOW_REMOVED: (True, False), ofproto_v1_5.OFPT_PORT_STATUS: (True, False), ofproto_v1_5.OFPT_PACKET_OUT: (False, True), - ofproto_v1_5.OFPT_FLOW_MOD: (False, True), + ofproto_v1_5.OFPT_FLOW_MOD: (True, True), ofproto_v1_5.OFPT_GROUP_MOD: (True, True), ofproto_v1_5.OFPT_PORT_MOD: (False, True), ofproto_v1_5.OFPT_METER_MOD: (True, True), diff --git a/ryu/tests/unit/ofproto/test_parser_ofpmatch.py b/ryu/tests/unit/ofproto/test_parser_ofpmatch.py index 3989f383..d4dba9ea 100644 --- a/ryu/tests/unit/ofproto/test_parser_ofpmatch.py +++ b/ryu/tests/unit/ofproto/test_parser_ofpmatch.py @@ -200,6 +200,8 @@ def _add_tests(): ('tun_ipv4_dst', IPv4), ('pkt_mark', Int4), ('conj_id', Int4), + ('tun_ipv6_src', IPv6), + ('tun_ipv6_dst', IPv6), ('_dp_hash', Int4), ('reg0', Int4), ('reg1', Int4), diff --git a/ryu/tests/unit/ofproto/test_parser_v10.py b/ryu/tests/unit/ofproto/test_parser_v10.py index 02ce69ba..29bc3157 100644 --- a/ryu/tests/unit/ofproto/test_parser_v10.py +++ b/ryu/tests/unit/ofproto/test_parser_v10.py @@ -20,6 +20,7 @@ import logging import six from nose.tools import * from ryu.ofproto.ofproto_v1_0_parser import * +from ryu.ofproto.nx_actions import * from ryu.ofproto import ofproto_v1_0_parser from ryu.lib import addrconv @@ -1139,15 +1140,13 @@ class TestNXActionResubmit(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.in_port['val'], self.c.in_port) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) eq_(self.in_port['val'], res.in_port) def test_serialize(self): @@ -1175,7 +1174,7 @@ class TestNXActionResubmitTable(unittest.TestCase): vendor = {'buf': b'\x00\x00\x23\x20', 'val': 8992} subtype = {'buf': b'\x00\x0e', 'val': 14} in_port = {'buf': b'\x0a\x4c', 'val': 2636} - table = {'buf': b'\x52', 'val': 82} + table_id = {'buf': b'\x52', 'val': 82} zfill = b'\x00' * 3 buf = type_['buf'] \ @@ -1183,10 +1182,10 @@ class TestNXActionResubmitTable(unittest.TestCase): + vendor['buf'] \ + subtype['buf'] \ + in_port['buf'] \ - + table['buf'] \ + + table_id['buf'] \ + zfill - c = NXActionResubmitTable(in_port['val'], table['val']) + c = NXActionResubmitTable(in_port['val'], table_id['val']) def setUp(self): pass @@ -1195,17 +1194,16 @@ class TestNXActionResubmitTable(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.in_port['val'], self.c.in_port) + eq_(self.table_id['val'], self.c.table_id) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) eq_(self.in_port['val'], res.in_port) - eq_(self.table['val'], res.table) + eq_(self.table_id['val'], res.table_id) def test_serialize(self): buf = bytearray() @@ -1219,6 +1217,7 @@ class TestNXActionResubmitTable(unittest.TestCase): eq_(self.vendor['val'], res[2]) eq_(self.subtype['val'], res[3]) eq_(self.in_port['val'], res[4]) + eq_(self.table_id['val'], res[5]) class TestNXActionSetTunnel(unittest.TestCase): @@ -1250,14 +1249,13 @@ class TestNXActionSetTunnel(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.tun_id['val'], self.c.tun_id) - def test_parser(self): - res = self.c.parser(self.buf, 0) + def test_parse(self): + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) eq_(self.tun_id['val'], res.tun_id) def test_serialize(self): @@ -1274,60 +1272,6 @@ class TestNXActionSetTunnel(unittest.TestCase): eq_(self.tun_id['val'], res[4]) -class TestNXActionSetQueue(unittest.TestCase): - """ Test case for ofproto_v1_0_parser.NXActionSetQueue - """ - - # NX_ACTION_SET_QUEUE_PACK_STR - # '!HHIH2xI'...type, len, vendor, subtype, zfill, queue_id - type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR} - len_ = {'buf': b'\x00\x10', 'val': ofproto.NX_ACTION_SET_TUNNEL_SIZE} - vendor = {'buf': b'\x00\x00\x23\x20', - 'val': ofproto_common.NX_EXPERIMENTER_ID} - subtype = {'buf': b'\x00\x04', 'val': ofproto.NXAST_SET_QUEUE} - zfill = b'\x00' * 2 - queue_id = {'buf': b'\xde\xbe\xc5\x18', 'val': 3737044248} - - buf = type_['buf'] \ - + len_['buf'] \ - + vendor['buf'] \ - + subtype['buf'] \ - + zfill \ - + queue_id['buf'] - - c = NXActionSetQueue(queue_id['val']) - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) - eq_(self.subtype['val'], self.c.subtype) - eq_(self.queue_id['val'], self.c.queue_id) - - def test_parser(self): - res = self.c.parser(self.buf, 0) - eq_(self.queue_id['val'], res.queue_id) - - def test_serialize(self): - buf = bytearray() - self.c.serialize(buf, 0) - - fmt = ofproto.NX_ACTION_SET_QUEUE_PACK_STR - res = struct.unpack(fmt, six.binary_type(buf)) - - eq_(self.type_['val'], res[0]) - eq_(self.len_['val'], res[1]) - eq_(self.vendor['val'], res[2]) - eq_(self.subtype['val'], res[3]) - eq_(self.queue_id['val'], res[4]) - - class TestNXActionPopQueue(unittest.TestCase): """ Test case for ofproto_v1_0_parser.NXActionPopQueue """ @@ -1356,16 +1300,12 @@ class TestNXActionPopQueue(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) def test_parser(self): - res = self.c.parser(self.buf, 0) + res = OFPActionVendor.parser(self.buf, 0) eq_(self.type_['val'], res.type) eq_(self.len_['val'], res.len) - eq_(self.vendor['val'], res.vendor) eq_(self.subtype['val'], res.subtype) def test_serialize(self): @@ -1396,8 +1336,8 @@ class TestNXActionRegMove(unittest.TestCase): n_bits = {'buf': b'\x3d\x98', 'val': 15768} src_ofs = {'buf': b'\xf3\xa3', 'val': 62371} dst_ofs = {'buf': b'\xdc\x67', 'val': 56423} - src = {'buf': b'\x15\x68\x60\xfd', 'val': 359162109} - dst = {'buf': b'\x9f\x9f\x88\x26', 'val': 2678032422} + src_field = {'buf': b'\x00\x01\x00\x04', 'val': "reg0", "val2": 65540} + dst_field = {'buf': b'\x00\x01\x02\x04', 'val': "reg1", "val2": 66052} buf = type_['buf'] \ + len_['buf'] \ @@ -1406,14 +1346,14 @@ class TestNXActionRegMove(unittest.TestCase): + n_bits['buf'] \ + src_ofs['buf'] \ + dst_ofs['buf'] \ - + src['buf'] \ - + dst['buf'] + + src_field['buf'] \ + + dst_field['buf'] - c = NXActionRegMove(n_bits['val'], + c = NXActionRegMove(src_field['val'], + dst_field['val'], + n_bits['val'], src_ofs['val'], - dst_ofs['val'], - src['val'], - dst['val']) + dst_ofs['val']) def setUp(self): pass @@ -1422,23 +1362,23 @@ class TestNXActionRegMove(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.n_bits['val'], self.c.n_bits) eq_(self.src_ofs['val'], self.c.src_ofs) eq_(self.dst_ofs['val'], self.c.dst_ofs) - eq_(self.src['val'], self.c.src) - eq_(self.dst['val'], self.c.dst) + eq_(self.src_field['val'], self.c.src_field) + eq_(self.dst_field['val'], self.c.dst_field) def test_parser(self): - res = self.c.parser(self.buf, 0) + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) + eq_(self.subtype['val'], res.subtype) eq_(self.n_bits['val'], res.n_bits) eq_(self.src_ofs['val'], res.src_ofs) eq_(self.dst_ofs['val'], res.dst_ofs) - eq_(self.src['val'], res.src) - eq_(self.dst['val'], res.dst) + eq_(self.src_field['val'], res.src_field) + eq_(self.dst_field['val'], res.dst_field) def test_serialize(self): buf = bytearray() @@ -1454,8 +1394,8 @@ class TestNXActionRegMove(unittest.TestCase): eq_(self.n_bits['val'], res[4]) eq_(self.src_ofs['val'], res[5]) eq_(self.dst_ofs['val'], res[6]) - eq_(self.src['val'], res[7]) - eq_(self.dst['val'], res[8]) + eq_(self.src_field['val2'], res[7]) + eq_(self.dst_field['val2'], res[8]) class TestNXActionRegLoad(unittest.TestCase): @@ -1471,9 +1411,11 @@ class TestNXActionRegLoad(unittest.TestCase): 'val': ofproto_common.NX_EXPERIMENTER_ID} subtype = {'buf': b'\x00\x07', 'val': ofproto.NXAST_REG_LOAD} ofs_nbits = {'buf': b'\x3d\x98', 'val': 15768} - dst = {'buf': b'\x9f\x9f\x88\x26', 'val': 2678032422} + dst = {'buf': b'\x00\x01\x00\x04', 'val': "reg0", "val2": 65540} value = {'buf': b'\x33\x51\xcd\x43\x25\x28\x18\x99', 'val': 3697962457317775513} + start = 246 + end = 270 buf = type_['buf'] \ + len_['buf'] \ @@ -1483,7 +1425,8 @@ class TestNXActionRegLoad(unittest.TestCase): + dst['buf'] \ + value['buf'] - c = NXActionRegLoad(ofs_nbits['val'], + c = NXActionRegLoad(start, + end, dst['val'], value['val']) @@ -1494,17 +1437,18 @@ class TestNXActionRegLoad(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.dst['val'], self.c.dst) eq_(self.value['val'], self.c.value) def test_parser(self): - res = self.c.parser(self.buf, 0) - eq_(self.ofs_nbits['val'], res.ofs_nbits) + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.dst['val'], res.dst) eq_(self.value['val'], res.value) @@ -1520,7 +1464,7 @@ class TestNXActionRegLoad(unittest.TestCase): eq_(self.vendor['val'], res[2]) eq_(self.subtype['val'], res[3]) eq_(self.ofs_nbits['val'], res[4]) - eq_(self.dst['val'], res[5]) + eq_(self.dst['val2'], res[5]) eq_(self.value['val'], res[6]) @@ -1555,15 +1499,15 @@ class TestNXActionSetTunnel64(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.tun_id['val'], self.c.tun_id) def test_parser(self): - res = self.c.parser(self.buf, 0) - eq_(self.tun_id['val'], self.c.tun_id) + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) + eq_(self.subtype['val'], res.subtype) + eq_(self.tun_id['val'], res.tun_id) def test_serialize(self): buf = bytearray() @@ -1600,6 +1544,8 @@ class TestNXActionMultipath(unittest.TestCase): zfill1 = b'\x00' * 2 ofs_nbits = {'buf': b'\xa9\x9a', 'val': 43418} dst = {'buf': b'\xb9\x2f\x16\x64', 'val': 3106870884} + start = 678 + end = 704 buf = type_['buf'] \ + len_['buf'] \ @@ -1620,7 +1566,8 @@ class TestNXActionMultipath(unittest.TestCase): algorithm['val'], max_link['val'], arg['val'], - ofs_nbits['val'], + start, + end, dst['val']) def setUp(self): @@ -1630,23 +1577,28 @@ class TestNXActionMultipath(unittest.TestCase): pass def test_init(self): + eq_(self.subtype['val'], self.c.subtype) eq_(self.fields['val'], self.c.fields) eq_(self.basis['val'], self.c.basis) eq_(self.algorithm['val'], self.c.algorithm) eq_(self.max_link['val'], self.c.max_link) eq_(self.arg['val'], self.c.arg) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.dst['val'], self.c.dst) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) + eq_(self.type_['val'], res.type) + eq_(self.len_['val'], res.len) + eq_(self.subtype['val'], res.subtype) eq_(self.fields['val'], res.fields) eq_(self.basis['val'], res.basis) eq_(self.algorithm['val'], res.algorithm) eq_(self.max_link['val'], res.max_link) eq_(self.arg['val'], res.arg) - eq_(self.ofs_nbits['val'], res.ofs_nbits) + eq_(self.start, res.start) + eq_(self.end, res.end) eq_(self.dst['val'], res.dst) def test_serialize(self): @@ -1678,7 +1630,7 @@ class TestNXActionBundle(unittest.TestCase): # fields, basis, slave_type, n_slaves, # ofs_nbits, dst, zfill type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR} - len_ = {'buf': b'\x00\x20', 'val': ofproto.NX_ACTION_BUNDLE_SIZE} + len_ = {'buf': b'\x00\x28', 'val': (ofproto.NX_ACTION_BUNDLE_SIZE + 8)} vendor = {'buf': b'\x00\x00\x23\x20', 'val': ofproto_common.NX_EXPERIMENTER_ID} subtype = {'buf': b'\x00\x0c', 'val': ofproto.NXAST_BUNDLE} @@ -1687,12 +1639,14 @@ class TestNXActionBundle(unittest.TestCase): basis = {'buf': b'\xfd\x6f', 'val': 64879} slave_type = {'buf': b'\x7c\x51\x0f\xe0', 'val': 2085687264} n_slaves = {'buf': b'\x00\x02', 'val': 2} - ofs_nbits = {'buf': b'\xec\xf7', 'val': 60663} - dst = {'buf': b'\x50\x7c\x75\xfe', 'val': 1350333950} + ofs_nbits = {'buf': b'\x00\x00', 'val': 0} + dst = {'buf': b'\x00\x00\x00\x00', 'val': 0} zfill = b'\x00' * 4 slaves_buf = (b'\x00\x01', b'\x00\x02') slaves_val = (1, 2) + start = 0 + end = 0 _len = len_['val'] + len(slaves_val) * 2 _len += (_len % 8) @@ -1717,7 +1671,8 @@ class TestNXActionBundle(unittest.TestCase): basis['val'], slave_type['val'], n_slaves['val'], - ofs_nbits['val'], + start, + end, dst['val'], slaves_val) @@ -1728,16 +1683,14 @@ class TestNXActionBundle(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self._len, self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.algorithm['val'], self.c.algorithm) eq_(self.fields['val'], self.c.fields) eq_(self.basis['val'], self.c.basis) eq_(self.slave_type['val'], self.c.slave_type) eq_(self.n_slaves['val'], self.c.n_slaves) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.dst['val'], self.c.dst) # slaves @@ -1746,18 +1699,17 @@ class TestNXActionBundle(unittest.TestCase): eq_(self.slaves_val[1], slaves[1]) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) eq_(self.type_['val'], res.type) - eq_(self._len, res.len) - eq_(self.vendor['val'], res.vendor) + eq_(self.len_['val'], res.len) eq_(self.subtype['val'], res.subtype) eq_(self.algorithm['val'], res.algorithm) eq_(self.fields['val'], res.fields) eq_(self.basis['val'], res.basis) eq_(self.slave_type['val'], res.slave_type) eq_(self.n_slaves['val'], res.n_slaves) - eq_(self.ofs_nbits['val'], res.ofs_nbits) + eq_(self.start, res.start) + eq_(self.end, res.end) eq_(self.dst['val'], res.dst) # slaves @@ -1776,7 +1728,7 @@ class TestNXActionBundle(unittest.TestCase): res = struct.unpack(fmt, six.binary_type(buf)) eq_(self.type_['val'], res[0]) - eq_(self._len, res[1]) + eq_(self.len_['val'], res[1]) eq_(self.vendor['val'], res[2]) eq_(self.subtype['val'], res[3]) eq_(self.algorithm['val'], res[4]) @@ -1797,7 +1749,7 @@ class TestNXActionBundleLoad(unittest.TestCase): # fields, basis, slave_type, n_slaves, # ofs_nbits, dst, zfill type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR} - len_ = {'buf': b'\x00\x20', 'val': ofproto.NX_ACTION_BUNDLE_SIZE} + len_ = {'buf': b'\x00\x28', 'val': (ofproto.NX_ACTION_BUNDLE_SIZE + 8)} vendor = {'buf': b'\x00\x00\x23\x20', 'val': ofproto_common.NX_EXPERIMENTER_ID} subtype = {'buf': b'\x00\x0d', 'val': ofproto.NXAST_BUNDLE_LOAD} @@ -1809,6 +1761,8 @@ class TestNXActionBundleLoad(unittest.TestCase): ofs_nbits = {'buf': b'\xd2\x9d', 'val': 53917} dst = {'buf': b'\x37\xfe\xb3\x60', 'val': 939438944} zfill = b'\x00' * 4 + start = 842 + end = 871 slaves_buf = (b'\x00\x01', b'\x00\x02') slaves_val = (1, 2) @@ -1836,7 +1790,8 @@ class TestNXActionBundleLoad(unittest.TestCase): basis['val'], slave_type['val'], n_slaves['val'], - ofs_nbits['val'], + start, + end, dst['val'], slaves_val) @@ -1847,16 +1802,14 @@ class TestNXActionBundleLoad(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self._len, self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) eq_(self.algorithm['val'], self.c.algorithm) eq_(self.fields['val'], self.c.fields) eq_(self.basis['val'], self.c.basis) eq_(self.slave_type['val'], self.c.slave_type) eq_(self.n_slaves['val'], self.c.n_slaves) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.dst['val'], self.c.dst) # slaves @@ -1865,18 +1818,17 @@ class TestNXActionBundleLoad(unittest.TestCase): eq_(self.slaves_val[1], slaves[1]) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) eq_(self.type_['val'], res.type) - eq_(self._len, res.len) - eq_(self.vendor['val'], res.vendor) + eq_(self.len_['val'], res.len) eq_(self.subtype['val'], res.subtype) eq_(self.algorithm['val'], res.algorithm) eq_(self.fields['val'], res.fields) eq_(self.basis['val'], res.basis) eq_(self.slave_type['val'], res.slave_type) eq_(self.n_slaves['val'], res.n_slaves) - eq_(self.ofs_nbits['val'], res.ofs_nbits) + eq_(self.start, res.start) + eq_(self.end, res.end) eq_(self.dst['val'], res.dst) # slaves @@ -1895,7 +1847,7 @@ class TestNXActionBundleLoad(unittest.TestCase): res = struct.unpack(fmt, six.binary_type(buf)) eq_(self.type_['val'], res[0]) - eq_(self._len, res[1]) + eq_(self.len_['val'], res[1]) eq_(self.vendor['val'], res[2]) eq_(self.subtype['val'], res[3]) eq_(self.algorithm['val'], res[4]) @@ -1907,78 +1859,6 @@ class TestNXActionBundleLoad(unittest.TestCase): eq_(self.dst['val'], res[10]) -class TestNXActionAutopath(unittest.TestCase): - """ Test case for ofproto_v1_0_parser.NXActionAutopath - """ - - # NX_ACTION_AUTOPATH_PACK_STR - # '!HHIHHII4x'...type, len, vendor, subtype, ofs_nbits, - # dst, id_, zfill - type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR} - len_ = {'buf': b'\x00\x20', 'val': ofproto.NX_ACTION_OUTPUT_REG_SIZE} - vendor = {'buf': b'\x00\x00\x23\x20', - 'val': ofproto_common.NX_EXPERIMENTER_ID} - subtype = {'buf': b'\x00\x0b', 'val': ofproto.NXAST_AUTOPATH} - ofs_nbits = {'buf': b'\xfe\x78', 'val': 65144} - dst = {'buf': b'\xf8\x55\x74\x95', 'val': 4166349973} - id_ = {'buf': b'\x02\x2d\x37\xed', 'val': 36517869} - zfill = b'\x00' * 4 - - buf = type_['buf'] \ - + len_['buf'] \ - + vendor['buf'] \ - + subtype['buf'] \ - + ofs_nbits['buf'] \ - + dst['buf'] \ - + id_['buf'] \ - + zfill - - c = NXActionAutopath(ofs_nbits['val'], - dst['val'], - id_['val']) - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) - eq_(self.subtype['val'], self.c.subtype) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) - eq_(self.dst['val'], self.c.dst) - eq_(self.id_['val'], self.c.id) - - def test_parser(self): - res = self.c.parser(self.buf, 0) - - eq_(self.type_['val'], res.type) - eq_(self.len_['val'], res.len) - eq_(self.vendor['val'], res.vendor) - eq_(self.subtype['val'], res.subtype) - eq_(self.ofs_nbits['val'], res.ofs_nbits) - eq_(self.dst['val'], res.dst) - eq_(self.id_['val'], res.id) - - def test_serialize(self): - buf = bytearray() - self.c.serialize(buf, 0) - - fmt = ofproto.NX_ACTION_AUTOPATH_PACK_STR - res = struct.unpack(fmt, six.binary_type(buf)) - - eq_(self.type_['val'], res[0]) - eq_(self.len_['val'], res[1]) - eq_(self.vendor['val'], res[2]) - eq_(self.subtype['val'], res[3]) - eq_(self.ofs_nbits['val'], res[4]) - eq_(self.dst['val'], res[5]) - eq_(self.id_['val'], res[6]) - - class TestNXActionOutputReg(unittest.TestCase): """ Test case for ofproto_v1_0_parser.NXActionOutputReg """ @@ -1987,7 +1867,7 @@ class TestNXActionOutputReg(unittest.TestCase): # '!HHIHHIH6x'...type, len, vendor, subtype, ofs_nbits, # src, max_len, zfill type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR} - len_ = {'buf': b'\x00\x20', 'val': ofproto.NX_ACTION_OUTPUT_REG_SIZE} + len_ = {'buf': b'\x00\x18', 'val': ofproto.NX_ACTION_OUTPUT_REG_SIZE} vendor = {'buf': b'\x00\x00\x23\x20', 'val': ofproto_common.NX_EXPERIMENTER_ID} subtype = {'buf': b'\x00\x0f', 'val': ofproto.NXAST_OUTPUT_REG} @@ -1995,6 +1875,8 @@ class TestNXActionOutputReg(unittest.TestCase): src = {'buf': b'\x5e\x3a\x04\x26', 'val': 1580860454} max_len = {'buf': b'\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE} zfill = b'\x00' * 6 + start = 1017 + end = 1073 buf = type_['buf'] \ + len_['buf'] \ @@ -2005,7 +1887,8 @@ class TestNXActionOutputReg(unittest.TestCase): + max_len['buf'] \ + zfill - c = NXActionOutputReg(ofs_nbits['val'], + c = NXActionOutputReg(start, + end, src['val'], max_len['val']) @@ -2016,22 +1899,19 @@ class TestNXActionOutputReg(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) - eq_(self.ofs_nbits['val'], self.c.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.src['val'], self.c.src) eq_(self.max_len['val'], self.c.max_len) def test_parser(self): - res = self.c.parser(self.buf, 0) - + res = OFPActionVendor.parser(self.buf, 0) eq_(self.type_['val'], res.type) eq_(self.len_['val'], res.len) - eq_(self.vendor['val'], res.vendor) eq_(self.subtype['val'], res.subtype) - eq_(self.ofs_nbits['val'], res.ofs_nbits) + eq_(self.start, self.c.start) + eq_(self.end, self.c.end) eq_(self.src['val'], res.src) eq_(self.max_len['val'], res.max_len) @@ -2079,16 +1959,12 @@ class TestNXActionExit(unittest.TestCase): pass def test_init(self): - eq_(self.type_['val'], self.c.type) - eq_(self.len_['val'], self.c.len) - eq_(self.vendor['val'], self.c.vendor) eq_(self.subtype['val'], self.c.subtype) def test_parser(self): - res = self.c.parser(self.buf, 0) + res = OFPActionVendor.parser(self.buf, 0) eq_(self.type_['val'], res.type) eq_(self.len_['val'], res.len) - eq_(self.vendor['val'], res.vendor) eq_(self.subtype['val'], res.subtype) def test_serialize(self): diff --git a/ryu/tests/unit/packet/test_bgp.py b/ryu/tests/unit/packet/test_bgp.py index 37f0468b..b3c11984 100644 --- a/ryu/tests/unit/packet/test_bgp.py +++ b/ryu/tests/unit/packet/test_bgp.py @@ -16,15 +16,22 @@ from __future__ import print_function +import os +import sys import unittest from nose.tools import eq_ from nose.tools import ok_ +from ryu.lib.packet import packet from ryu.lib.packet import bgp from ryu.lib.packet import afi from ryu.lib.packet import safi +BGP4_PACKET_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), '../../packet_data/bgp4/') + + class Test_bgp(unittest.TestCase): """ Test case for ryu.lib.packet.bgp """ @@ -112,7 +119,7 @@ class Test_bgp(unittest.TestCase): ] path_attributes = [ bgp.BGPPathAttributeOrigin(value=1), - bgp.BGPPathAttributeAsPath(value=[[1000], set([1001, 1002]), + bgp.BGPPathAttributeAsPath(value=[[1000], {1001, 1002}, [1003, 1004]]), bgp.BGPPathAttributeNextHop(value='192.0.2.199'), bgp.BGPPathAttributeMultiExitDisc(value=2000000000), @@ -124,7 +131,7 @@ class Test_bgp(unittest.TestCase): bgp.BGPPathAttributeOriginatorId(value='10.1.1.1'), bgp.BGPPathAttributeClusterList(value=['1.1.1.1', '2.2.2.2']), bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities), - bgp.BGPPathAttributeAs4Path(value=[[1000000], set([1000001, 1002]), + bgp.BGPPathAttributeAs4Path(value=[[1000000], {1000001, 1002}, [1003, 1000004]]), bgp.BGPPathAttributeAs4Aggregator(as_number=100040000, addr='192.0.2.99'), @@ -199,15 +206,13 @@ class Test_bgp(unittest.TestCase): # 'bgp4-update', 'bgp4-keepalive', ] - dir = '../packet_data/bgp4/' for f in files: print('testing %s' % f) - binmsg = open(dir + f, 'rb').read() - msg, rest = bgp.BGPMessage.parser(binmsg) - binmsg2 = msg.serialize() - eq_(binmsg, binmsg2) - eq_(rest, b'') + msg_buf = open(BGP4_PACKET_DATA_DIR + f + '.pcap', 'rb').read() + pkt = packet.Packet(msg_buf) + pkt.serialize() + eq_(msg_buf, pkt.data) def test_json1(self): opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200, @@ -260,7 +265,7 @@ class Test_bgp(unittest.TestCase): ] path_attributes = [ bgp.BGPPathAttributeOrigin(value=1), - bgp.BGPPathAttributeAsPath(value=[[1000], set([1001, 1002]), + bgp.BGPPathAttributeAsPath(value=[[1000], {1001, 1002}, [1003, 1004]]), bgp.BGPPathAttributeNextHop(value='192.0.2.199'), bgp.BGPPathAttributeMultiExitDisc(value=2000000000), @@ -270,7 +275,7 @@ class Test_bgp(unittest.TestCase): addr='192.0.2.99'), bgp.BGPPathAttributeCommunities(communities=communities), bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities), - bgp.BGPPathAttributeAs4Path(value=[[1000000], set([1000001, 1002]), + bgp.BGPPathAttributeAs4Path(value=[[1000000], {1000001, 1002}, [1003, 1000004]]), bgp.BGPPathAttributeAs4Aggregator(as_number=100040000, addr='192.0.2.99'), diff --git a/ryu/tests/unit/packet/test_packet.py b/ryu/tests/unit/packet/test_packet.py index a793a5fa..c48e3727 100644 --- a/ryu/tests/unit/packet/test_packet.py +++ b/ryu/tests/unit/packet/test_packet.py @@ -697,7 +697,7 @@ class TestPacket(unittest.TestCase): sctp_values = {'src_port': 1, 'dst_port': 1, 'vtag': 0, - 'csum': p_sctp.csum, + 'csum': repr(p_sctp.csum), 'chunks': data_str} _sctp_str = ','.join(['%s=%s' % (k, sctp_values[k]) for k, _ in inspect.getmembers(p_sctp) @@ -1233,7 +1233,7 @@ class TestPacket(unittest.TestCase): sctp_values = {'src_port': 1, 'dst_port': 1, 'vtag': 0, - 'csum': p_sctp.csum, + 'csum': repr(p_sctp.csum), 'chunks': data_str} _sctp_str = ','.join(['%s=%s' % (k, sctp_values[k]) for k, _ in inspect.getmembers(p_sctp) diff --git a/ryu/tests/unit/packet/test_vxlan.py b/ryu/tests/unit/packet/test_vxlan.py new file mode 100644 index 00000000..fe418ff7 --- /dev/null +++ b/ryu/tests/unit/packet/test_vxlan.py @@ -0,0 +1,75 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import unittest + +from nose.tools import eq_ +from nose.tools import raises + +from ryu.lib.packet import ethernet +from ryu.lib.packet import vxlan + + +LOG = logging.getLogger(__name__) + + +class Test_vxlan(unittest.TestCase): + """ + Test case for VXLAN (RFC 7348) header encoder/decoder class. + """ + + vni = 0x123456 + buf = ( + b'\x08\x00\x00\x00' # flags = R|R|R|R|I|R|R|R (8 bits) + b'\x12\x34\x56\x00' # vni = 0x123456 (24 bits) + b'test_payload' # for test + ) + pkt = vxlan.vxlan(vni) + jsondict = { + 'vxlan': { + 'vni': vni + } + } + + def test_init(self): + eq_(self.vni, self.pkt.vni) + + def test_parser(self): + parsed_pkt, next_proto_cls, rest_buf = vxlan.vxlan.parser(self.buf) + eq_(self.vni, parsed_pkt.vni) + eq_(ethernet.ethernet, next_proto_cls) + eq_(b'test_payload', rest_buf) + + @raises(AssertionError) + def test_invalid_flags(self): + invalid_flags_bug = ( + b'\x00\x00\x00\x00' # all bits are set to zero + b'\x12\x34\x56\x00' # vni = 0x123456 (24 bits) + ) + vxlan.vxlan.parser(invalid_flags_bug) + + def test_serialize(self): + serialized_buf = self.pkt.serialize(payload=None, prev=None) + eq_(self.buf[:vxlan.vxlan._MIN_LEN], serialized_buf) + + def test_from_jsondict(self): + pkt_from_json = vxlan.vxlan.from_jsondict( + self.jsondict[vxlan.vxlan.__name__]) + eq_(self.vni, pkt_from_json.vni) + + def test_to_jsondict(self): + jsondict_from_pkt = self.pkt.to_jsondict() + eq_(self.jsondict, jsondict_from_pkt) diff --git a/ryu/topology/switches.py b/ryu/topology/switches.py index d429effd..644a1fec 100644 --- a/ryu/topology/switches.py +++ b/ryu/topology/switches.py @@ -17,7 +17,6 @@ import logging import six import struct import time -import json from ryu import cfg from ryu.topology import event @@ -32,8 +31,8 @@ from ryu.lib.dpid import dpid_to_str, str_to_dpid from ryu.lib.port_no import port_no_to_str from ryu.lib.packet import packet, ethernet from ryu.lib.packet import lldp, ether_types -from ryu.lib.packet import arp, ipv4, ipv6 from ryu.ofproto.ether import ETH_TYPE_LLDP +from ryu.ofproto.ether import ETH_TYPE_CFM from ryu.ofproto import nx_match from ryu.ofproto import ofproto_v1_0 from ryu.ofproto import ofproto_v1_2 @@ -87,7 +86,7 @@ class Port(object): return {'dpid': dpid_to_str(self.dpid), 'port_no': port_no_to_str(self.port_no), 'hw_addr': self.hw_addr, - 'name': self.name.rstrip('\0')} + 'name': self.name.decode('utf-8')} # for Switch.del_port() def __eq__(self, other): @@ -206,12 +205,12 @@ class HostState(dict): if not host: return - if ip_v4 != None: + if ip_v4 is not None: if ip_v4 in host.ipv4: host.ipv4.remove(ip_v4) host.ipv4.append(ip_v4) - if ip_v6 != None: + if ip_v6 is not None: if ip_v6 in host.ipv6: host.ipv6.remove(ip_v6) host.ipv6.append(ip_v6) @@ -281,9 +280,8 @@ class PortDataState(dict): def __init__(self): super(PortDataState, self).__init__() - self._root = root = [] # sentinel node - root[:] = [root, root, None] # [_PREV, _NEXT, _KEY] - # doubly linked list + self._root = root = [] # sentinel node + root[:] = [root, root, None] # [_PREV, _NEXT, _KEY] doubly linked list self._map = {} def _remove_key(self, key): @@ -476,7 +474,7 @@ class LLDPPacket(object): if tlv_chassis_id.subtype != lldp.ChassisID.SUB_LOCALLY_ASSIGNED: raise LLDPPacket.LLDPUnknownFormat( msg='unknown chassis id subtype %d' % tlv_chassis_id.subtype) - chassis_id = tlv_chassis_id.chassis_id + chassis_id = tlv_chassis_id.chassis_id.decode('utf-8') if not chassis_id.startswith(LLDPPacket.CHASSIS_ID_PREFIX): raise LLDPPacket.LLDPUnknownFormat( msg='unknown chassis id format %s' % chassis_id) @@ -619,7 +617,8 @@ class Switches(app_manager.RyuApp): if not dp_multiple_conns: self.send_event_to_observers(event.EventSwitchEnter(switch)) else: - self.send_event_to_observers(event.EventSwitchReconnected(switch)) + evt = event.EventSwitchReconnected(switch) + self.send_event_to_observers(evt) if not self.link_discovery: return @@ -679,8 +678,8 @@ class Switches(app_manager.RyuApp): if switch.dp is dp: self._unregister(dp) LOG.debug('unregister %s', switch) - - self.send_event_to_observers(event.EventSwitchLeave(switch)) + evt = event.EventSwitchLeave(switch) + self.send_event_to_observers(evt) if not self.link_discovery: return @@ -773,7 +772,7 @@ class Switches(app_manager.RyuApp): msg = ev.msg try: src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data) - except LLDPPacket.LLDPUnknownFormat as e: + except LLDPPacket.LLDPUnknownFormat: # This handler can receive all the packets which can be # not-LLDP packet. Ignore it silently return @@ -796,7 +795,7 @@ class Switches(app_manager.RyuApp): # There are races between EventOFPPacketIn and # EventDPPortAdd. So packet-in event can happend before # port add event. In that case key error can happend. - # LOG.debug('lldp_received: KeyError %s', e) + # LOG.debug('lldp_received error', exc_info=True) pass dst = self._get_port(dst_dpid, dst_port_no) @@ -817,10 +816,14 @@ class Switches(app_manager.RyuApp): if link not in self.links: self.send_event_to_observers(event.EventLinkAdd(link)) - # remove hosts from edge port + # remove hosts if it's not attached to edge port + host_to_del = [] for host in self.hosts.values(): if not self._is_edge_port(host.port): - del self.hosts[host.mac] + host_to_del.append(host.mac) + + for host_mac in host_to_del: + del self.hosts[host_mac] if not self.links.update_link(src, dst): # reverse link is not detected yet. @@ -833,11 +836,10 @@ class Switches(app_manager.RyuApp): @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def host_discovery_packet_in_handler(self, ev): msg = ev.msg - pkt = packet.Packet(msg.data) - eth = pkt.get_protocols(ethernet.ethernet)[0] + eth, pkt_type, pkt_data = ethernet.ethernet.parser(msg.data) - # ignore lldp packet - if eth.ethertype == ETH_TYPE_LLDP: + # ignore lldp and cfm packets + if eth.ethertype in (ETH_TYPE_LLDP, ETH_TYPE_CFM): return datapath = msg.datapath @@ -868,26 +870,26 @@ class Switches(app_manager.RyuApp): # arp packet, update ip address if eth.ethertype == ether_types.ETH_TYPE_ARP: - arp_pkt = pkt.get_protocols(arp.arp)[0] + arp_pkt, _, _ = pkt_type.parser(pkt_data) self.hosts.update_ip(host, ip_v4=arp_pkt.src_ip) # ipv4 packet, update ipv4 address elif eth.ethertype == ether_types.ETH_TYPE_IP: - ipv4_pkt = pkt.get_protocols(ipv4.ipv4)[0] + ipv4_pkt, _, _ = pkt_type.parser(pkt_data) self.hosts.update_ip(host, ip_v4=ipv4_pkt.src) # ipv6 packet, update ipv6 address elif eth.ethertype == ether_types.ETH_TYPE_IPV6: # TODO: need to handle NDP - ipv6_pkt = pkt.get_protocols(ipv6.ipv6)[0] + ipv6_pkt, _, _ = pkt_type.parser(pkt_data) self.hosts.update_ip(host, ip_v6=ipv6_pkt.src) def send_lldp_packet(self, port): try: port_data = self.ports.lldp_sent(port) - except KeyError as e: + except KeyError: # ports can be modified during our sleep in self.lldp_loop() - # LOG.debug('send_lldp: KeyError %s', e) + # LOG.debug('send_lld error', exc_info=True) return if port_data.is_down: return diff --git a/ryu/utils.py b/ryu/utils.py index a8eb5094..3f6260ef 100644 --- a/ryu/utils.py +++ b/ryu/utils.py @@ -30,7 +30,7 @@ # under the License. -import inspect +import importlib import logging import os import sys @@ -77,21 +77,27 @@ def _find_loaded_module(modpath): def import_module(modname): try: - __import__(modname) - except: + # Import module with python module path + # e.g.) modname = 'module.path.module_name' + return importlib.import_module(modname) + except (ImportError, TypeError): + # In this block, we retry to import module when modname is filename + # e.g.) modname = 'module/path/module_name.py' abspath = os.path.abspath(modname) + # Check if specified modname is already imported mod = _find_loaded_module(abspath) if mod: return mod - opath = sys.path + # Backup original sys.path before appending path to file + original_path = list(sys.path) sys.path.append(os.path.dirname(abspath)) - name = os.path.basename(modname) - if name.endswith('.py'): - name = name[:-3] - __import__(name) - sys.path = opath - return sys.modules[name] - return sys.modules[modname] + # Remove python suffix + name = chop_py_suffix(os.path.basename(modname)) + # Retry to import + mod = importlib.import_module(name) + # Restore sys.path + sys.path = original_path + return mod def round_up(x, y): diff --git a/setup.cfg b/setup.cfg index 626e0f89..22737a31 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,7 +13,6 @@ classifier = Topic :: System :: Networking Natural Language :: English Programming Language :: Python - Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.4 diff --git a/tools/pip-requires b/tools/pip-requires index c274774d..a59805b0 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,8 +1,7 @@ eventlet>=0.15 msgpack-python>=0.3.0 # RPC library, BGP speaker(net_cntl) netaddr -oslo.config>=1.6.0, <=3.0.0 ; python_version < '2.7' -oslo.config>=1.6.0 ; python_version >= '2.7' +oslo.config>=1.15.0 routes # wsgi six>=1.4.0 webob>=1.2 # wsgi diff --git a/tools/test-requires b/tools/test-requires index 04ed5a23..7b89eeb3 100644 --- a/tools/test-requires +++ b/tools/test-requires @@ -2,7 +2,9 @@ coverage mock nose pep8 -pylint==0.25.0 +pylint formencode -lxml # OF-Config +lxml; platform_python_implementation != 'PyPy' # OF-Config +lxml==3.4.0; platform_python_implementation == 'PyPy' paramiko # NETCONF, BGP speaker +tinyrpc # RPC diff --git a/tox.ini b/tox.ini index 60e5d70b..321b7062 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,py34,pep8 +envlist = py27,py34,py35,pypy26,pep8 [testenv] deps = -U @@ -9,7 +9,7 @@ deps = -U usedevelop = True passenv= NOSE_VERBOSE commands = - python ryu/tests/run_tests.py '{posargs}' + coverage run --source=ryu ryu/tests/run_tests.py '{posargs}' [testenv:pep8] commands =