rename dir

Signed-off-by: Nico Schottelius <nico@nico-notebook.schottelius.org>
This commit is contained in:
Nico Schottelius 2019-08-21 11:39:09 +02:00
commit 50971dc10c
197 changed files with 0 additions and 0 deletions

9
netfpga/minip4/testdata/Makefile vendored Normal file
View file

@ -0,0 +1,9 @@
# Makefile to build the testdata
all:
./gen_testdata.py
${SUME_SDNET}/bin/pcap2axi --output Packet_in.axi --bus_width 256 src.pcap
${SUME_SDNET}/bin/pcap2axi --output Packet_expect.axi --bus_width 256 dst.pcap
clean:
rm -f *.pcap *.txt *.pyc *.axi config_writes.* *_reg_defines.py

View file

@ -0,0 +1 @@
{}

189
netfpga/minip4/testdata/gen_testdata-mirror.py vendored Executable file
View file

@ -0,0 +1,189 @@
#!/usr/bin/env python
# from switch_calc_headers import *
from scapy.all import *
from nf_sim_tools import *
from collections import OrderedDict
import sss_sdnet_tuples
########################
# pkt generation tools #
########################
pktsApplied = []
pktsExpected = []
# Pkt lists for SUME simulations
nf_applied = OrderedDict()
nf_applied[0] = []
nf_applied[1] = []
nf_applied[2] = []
nf_applied[3] = []
nf_expected = OrderedDict()
nf_expected[0] = []
nf_expected[1] = []
nf_expected[2] = []
nf_expected[3] = []
nf_port_map = {
"nf0": 0b00000001,
"nf1": 0b00000100,
"nf2": 0b00010000,
"nf3": 0b01000000,
"dma0": 0b00000010,
"bcast":0b01010101
}
nf_id_map = {
"nf0":0,
"nf1":1,
"nf2":2,
"nf3":3
}
sss_sdnet_tuples.clear_tuple_files()
def applyPkt(pkt, ingress, time):
pktsApplied.append(pkt)
sss_sdnet_tuples.sume_tuple_in['src_port'] = nf_port_map[ingress]
sss_sdnet_tuples.sume_tuple_expect['src_port'] = nf_port_map[ingress]
pkt.time = time
nf_applied[nf_id_map[ingress]].append(pkt)
def expPkt(pkt, egress):
pktsExpected.append(pkt)
sss_sdnet_tuples.sume_tuple_expect['dst_port'] = nf_port_map[egress]
sss_sdnet_tuples.write_tuples()
if egress in ["nf0","nf1","nf2","nf3"]:
nf_expected[nf_id_map[egress]].append(pkt)
elif egress == 'bcast':
nf_expected[0].append(pkt)
nf_expected[1].append(pkt)
nf_expected[2].append(pkt)
nf_expected[3].append(pkt)
def print_summary(pkts):
for pkt in pkts:
print "summary = ", pkt.summary()
def write_pcap_files():
wrpcap("src.pcap", pktsApplied)
wrpcap("dst.pcap", pktsExpected)
for i in nf_applied.keys():
if (len(nf_applied[i]) > 0):
wrpcap('nf{0}_applied.pcap'.format(i), nf_applied[i])
for i in nf_expected.keys():
if (len(nf_expected[i]) > 0):
wrpcap('nf{0}_expected.pcap'.format(i), nf_expected[i])
for i in nf_applied.keys():
print "nf{0}_applied times: ".format(i), [p.time for p in nf_applied[i]]
#####################
# generate testdata #
#####################
MACSRC = "08:11:11:11:11:08"
MAC0 = "08:22:22:22:22:00"
MAC1 = "08:22:22:22:22:01"
MAC2 = "08:22:22:22:22:02"
MAC3 = "08:22:22:22:22:03"
pktCnt = 0
INDEX_WIDTH = 4
REG_DEPTH = 2**INDEX_WIDTH
# Not sure what this is used for
NUM_KEYS = 4
lookup_table = {
0: 0x00000001,
1: 0x00000010,
2: 0x00000100,
3: 0x00001000
}
def test_port1():
pktCnt = 0
# First ethernet
pktCnt += 1
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
applyPkt(pkt, 'nf0', pktCnt)
expPkt(pkt, 'nf0')
def test_all_ports():
pktCnt = 0
# First ethernet
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
pktCnt += 1
applyPkt(pkt, 'nf1', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
pktCnt += 1
applyPkt(pkt, 'nf2', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
pktCnt += 1
applyPkt(pkt, 'nf3', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
# Test that packets are being mirrored
def test_mirror():
pktCnt = 0
# inject into nf1,2,3
pktCnt += 1
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
applyPkt(pkt, 'nf1', pktCnt)
pktCnt += 1
pkt = Ether(dst=MAC1, src=MAC2)
pkt = pad_pkt(pkt, 64)
expPkt(pkt, 'nf0')
# # Second IP
# pktCnt += 1
# pkt = Ether(dst=MAC2, src=MAC1) / IPv6(src="fe80::1", dst="fe80::2")
# pkt = pad_pkt(pkt, 64)
# applyPkt(pkt, 'nf0', pktCnt)
# pktCnt += 1
# pkt = Ether(dst=MAC1, src=MAC2) / IPv6(src="fe80::2", dst="fe80::1")
# pkt = pad_pkt(pkt, 64)
# expPkt(pkt, 'nf0')
# # Third tcp
# pktCnt += 1
# pkt = Ether(dst=MAC2, src=MAC1) / IPv6(src="fe80::1", dst="fe80::2") / TCP(sport=42, dport=23)
# pkt = pad_pkt(pkt, 64)
# applyPkt(pkt, 'nf0', pktCnt)
# pktCnt += 1
# pkt = Ether(dst=MAC1, src=MAC2) / IPv6(src="fe80::2", dst="fe80::1") / TCP(sport=23, dport=42)
# pkt = pad_pkt(pkt, 64)
# expPkt(pkt, 'nf0')
#test_mirror()
#test_port1()
test_all_ports()
write_pcap_files()

125
netfpga/minip4/testdata/gen_testdata-port1.py vendored Executable file
View file

@ -0,0 +1,125 @@
#!/usr/bin/env python
# from switch_calc_headers import *
from scapy.all import *
from nf_sim_tools import *
from collections import OrderedDict
import sss_sdnet_tuples
########################
# pkt generation tools #
########################
pktsApplied = []
pktsExpected = []
# Pkt lists for SUME simulations
nf_applied = OrderedDict()
nf_applied[0] = []
nf_applied[1] = []
nf_applied[2] = []
nf_applied[3] = []
nf_expected = OrderedDict()
nf_expected[0] = []
nf_expected[1] = []
nf_expected[2] = []
nf_expected[3] = []
nf_port_map = {
"nf0":0b00000001,
"nf1":0b00000100,
"nf2":0b00010000,
"nf3":0b01000000,
"dma0":0b00000010
}
nf_id_map = {
"nf0":0,
"nf1":1,
"nf2":2,
"nf3":3
}
sss_sdnet_tuples.clear_tuple_files()
def applyPkt(pkt, ingress, time):
print("Applying pkt on {} at {}: ".format(ingress, time))
pktsApplied.append(pkt)
sss_sdnet_tuples.sume_tuple_in['src_port'] = nf_port_map[ingress]
sss_sdnet_tuples.sume_tuple_expect['src_port'] = nf_port_map[ingress]
pkt.time = time
nf_applied[nf_id_map[ingress]].append(pkt)
def expPkt(pkt, egress):
pktsExpected.append(pkt)
sss_sdnet_tuples.sume_tuple_expect['dst_port'] = nf_port_map[egress]
sss_sdnet_tuples.write_tuples()
if egress in ["nf0","nf1","nf2","nf3"]:
nf_expected[nf_id_map[egress]].append(pkt)
elif egress == 'bcast':
nf_expected[0].append(pkt)
nf_expected[1].append(pkt)
nf_expected[2].append(pkt)
nf_expected[3].append(pkt)
def print_summary(pkts):
for pkt in pkts:
print "summary = ", pkt.summary()
def write_pcap_files():
wrpcap("src.pcap", pktsApplied)
wrpcap("dst.pcap", pktsExpected)
for i in nf_applied.keys():
if (len(nf_applied[i]) > 0):
wrpcap('nf{0}_applied.pcap'.format(i), nf_applied[i])
for i in nf_expected.keys():
if (len(nf_expected[i]) > 0):
wrpcap('nf{0}_expected.pcap'.format(i), nf_expected[i])
# i = 0..3
for i in nf_applied.keys():
print "nf{0}_applied times: ".format(i), [p.time for p in nf_applied[i]]
#####################
# generate testdata #
#####################
MAC1 = "08:11:11:11:11:08"
MAC2 = "08:22:22:22:22:08"
pktCnt = 0
INDEX_WIDTH = 4
REG_DEPTH = 2**INDEX_WIDTH
# Not sure what this is used for
NUM_KEYS = 4
lookup_table = {
0: 0x00000001,
1: 0x00000010,
2: 0x00000100,
3: 0x00001000
}
def test_port1():
""" packets for a certain mac always go to nf0 """
pktCnt = 1
for inport in [ 'nf0', 'nf1', 'nf2', 'nf3' ]:
# Need to be regenerated, as object is modified above!
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
# Send on nf0..nf3
applyPkt(pkt, inport, pktCnt)
# Always receive on nf0 - also regenerate here!
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
expPkt(pkt, 'nf0')
pktCnt += 1
test_port1()
write_pcap_files()

196
netfpga/minip4/testdata/gen_testdata-v6zero.py vendored Executable file
View file

@ -0,0 +1,196 @@
#!/usr/bin/env python
# from switch_calc_headers import *
from scapy.all import *
from nf_sim_tools import *
from collections import OrderedDict
import sss_sdnet_tuples
########################
# pkt generation tools #
########################
pktsApplied = []
pktsExpected = []
# Pkt lists for SUME simulations
nf_applied = OrderedDict()
nf_applied[0] = []
nf_applied[1] = []
nf_applied[2] = []
nf_applied[3] = []
nf_expected = OrderedDict()
nf_expected[0] = []
nf_expected[1] = []
nf_expected[2] = []
nf_expected[3] = []
nf_port_map = {
"nf0": 0b00000001,
"nf1": 0b00000100,
"nf2": 0b00010000,
"nf3": 0b01000000,
"dma0": 0b00000010,
"bcast":0b01010101
}
nf_id_map = {
"nf0":0,
"nf1":1,
"nf2":2,
"nf3":3
}
sss_sdnet_tuples.clear_tuple_files()
def applyPkt(pkt, ingress, time):
pktsApplied.append(pkt)
sss_sdnet_tuples.sume_tuple_in['src_port'] = nf_port_map[ingress]
sss_sdnet_tuples.sume_tuple_expect['src_port'] = nf_port_map[ingress]
pkt.time = time
nf_applied[nf_id_map[ingress]].append(pkt)
def expPkt(pkt, egress):
pktsExpected.append(pkt)
sss_sdnet_tuples.sume_tuple_expect['dst_port'] = nf_port_map[egress]
sss_sdnet_tuples.write_tuples()
if egress in ["nf0","nf1","nf2","nf3"]:
nf_expected[nf_id_map[egress]].append(pkt)
elif egress == 'bcast':
nf_expected[0].append(pkt)
nf_expected[1].append(pkt)
nf_expected[2].append(pkt)
nf_expected[3].append(pkt)
def print_summary(pkts):
for pkt in pkts:
print "summary = ", pkt.summary()
def write_pcap_files():
wrpcap("src.pcap", pktsApplied)
wrpcap("dst.pcap", pktsExpected)
for i in nf_applied.keys():
if (len(nf_applied[i]) > 0):
wrpcap('nf{0}_applied.pcap'.format(i), nf_applied[i])
for i in nf_expected.keys():
if (len(nf_expected[i]) > 0):
wrpcap('nf{0}_expected.pcap'.format(i), nf_expected[i])
for i in nf_applied.keys():
print "nf{0}_applied times: ".format(i), [p.time for p in nf_applied[i]]
#####################
# generate testdata #
#####################
MACSRC = "08:11:11:11:11:08"
MAC0 = "08:22:22:22:22:00"
MAC1 = "08:22:22:22:22:01"
MAC2 = "08:22:22:22:22:02"
MAC3 = "08:22:22:22:22:03"
pktCnt = 0
INDEX_WIDTH = 4
REG_DEPTH = 2**INDEX_WIDTH
# Not sure what this is used for
NUM_KEYS = 4
lookup_table = {
0: 0x00000001,
1: 0x00000010,
2: 0x00000100,
3: 0x00001000
}
def test_port1():
pktCnt = 0
# First ethernet
pktCnt += 1
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
applyPkt(pkt, 'nf0', pktCnt)
expPkt(pkt, 'nf0')
def test_all_ports():
pktCnt = 0
# First ethernet
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
pktCnt += 1
applyPkt(pkt, 'nf1', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
pktCnt += 1
applyPkt(pkt, 'nf2', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
pktCnt += 1
applyPkt(pkt, 'nf3', pktCnt)
expPkt(pkt, 'bcast')
# expPkt(pkt, 'nf0')
# expPkt(pkt, 'nf1')
# expPkt(pkt, 'nf2')
# expPkt(pkt, 'nf3')
def test_ipv6_zero():
pktCnt = 0
pktCnt += 1
pkt = Ether(dst=MAC2, src=MAC1) / IPv6(src="fe80::1", dst="fe80::4")
pkt = pad_pkt(pkt, 64)
applyPkt(pkt, 'nf0', pktCnt)
pktCnt += 1
pkt = Ether(dst=MAC1, src=MAC2) / IPv6(src="fe80::2", dst="fe80::6")
pkt = pad_pkt(pkt, 64)
expPkt(pkt, 'nf0')
# Test that packets are being mirrored
def test_mirror():
pktCnt = 0
# inject into nf1,2,3
pktCnt += 1
pkt = Ether(dst=MAC2, src=MAC1)
pkt = pad_pkt(pkt, 64)
applyPkt(pkt, 'nf1', pktCnt)
pktCnt += 1
pkt = Ether(dst=MAC1, src=MAC2)
pkt = pad_pkt(pkt, 64)
expPkt(pkt, 'nf0')
# # Second IP
# # Third tcp
# pktCnt += 1
# pkt = Ether(dst=MAC2, src=MAC1) / IPv6(src="fe80::1", dst="fe80::2") / TCP(sport=42, dport=23)
# pkt = pad_pkt(pkt, 64)
# applyPkt(pkt, 'nf0', pktCnt)
# pktCnt += 1
# pkt = Ether(dst=MAC1, src=MAC2) / IPv6(src="fe80::2", dst="fe80::1") / TCP(sport=23, dport=42)
# pkt = pad_pkt(pkt, 64)
# expPkt(pkt, 'nf0')
#test_mirror()
#test_port1()
#test_all_ports()
test_ipv6_zero()
write_pcap_files()

1
netfpga/minip4/testdata/gen_testdata.py vendored Symbolic link
View file

@ -0,0 +1 @@
gen_testdata_nat64_500byte.py

View file

@ -0,0 +1,147 @@
#!/usr/bin/env python
# from switch_calc_headers import *
from scapy.all import *
from nf_sim_tools import *
from collections import OrderedDict
import sss_sdnet_tuples
########################
# pkt generation tools #
########################
pktsApplied = []
pktsExpected = []
# Pkt lists for SUME simulations
nf_applied = OrderedDict()
nf_applied[0] = []
nf_applied[1] = []
nf_applied[2] = []
nf_applied[3] = []
nf_expected = OrderedDict()
nf_expected[0] = []
nf_expected[1] = []
nf_expected[2] = []
nf_expected[3] = []
nf_port_map = {
"nf0":0b00000001,
"nf1":0b00000100,
"nf2":0b00010000,
"nf3":0b01000000,
"dma0":0b00000010
}
nf_id_map = {
"nf0":0,
"nf1":1,
"nf2":2,
"nf3":3
}
sss_sdnet_tuples.clear_tuple_files()
def applyPkt(pkt, ingress, time):
print("Applying pkt on {} at {}: ".format(ingress, time))
pktsApplied.append(pkt)
sss_sdnet_tuples.sume_tuple_in['src_port'] = nf_port_map[ingress]
sss_sdnet_tuples.sume_tuple_expect['src_port'] = nf_port_map[ingress]
pkt.time = time
nf_applied[nf_id_map[ingress]].append(pkt)
def expPkt(pkt, egress):
pktsExpected.append(pkt)
sss_sdnet_tuples.sume_tuple_expect['dst_port'] = nf_port_map[egress]
sss_sdnet_tuples.write_tuples()
if egress in ["nf0","nf1","nf2","nf3"]:
nf_expected[nf_id_map[egress]].append(pkt)
elif egress == 'bcast':
nf_expected[0].append(pkt)
nf_expected[1].append(pkt)
nf_expected[2].append(pkt)
nf_expected[3].append(pkt)
def print_summary(pkts):
for pkt in pkts:
print "summary = ", pkt.summary()
def write_pcap_files():
wrpcap("src.pcap", pktsApplied)
wrpcap("dst.pcap", pktsExpected)
for i in nf_applied.keys():
if (len(nf_applied[i]) > 0):
wrpcap('nf{0}_applied.pcap'.format(i), nf_applied[i])
for i in nf_expected.keys():
if (len(nf_expected[i]) > 0):
wrpcap('nf{0}_expected.pcap'.format(i), nf_expected[i])
# i = 0..3
for i in nf_applied.keys():
print "nf{0}_applied times: ".format(i), [p.time for p in nf_applied[i]]
#####################
# generate testdata #
#####################
MAC1 = "08:11:11:11:11:08"
MAC2 = "08:22:22:22:22:08"
pktCnt = 0
INDEX_WIDTH = 4
REG_DEPTH = 2**INDEX_WIDTH
# Not sure what this is used for
NUM_KEYS = 4
lookup_table = {
0: 0x00000001,
1: 0x00000010,
2: 0x00000100,
3: 0x00001000
}
def test_nat64():
""" packets for a certain mac always go to nf0 """
pkgCnt = 1
# From v4 to v6
e4 = Ether(dst=MAC2, src=MAC1)
i4 = IP(src = "10.0.0.42", dst = "10.0.0.66")
u4 = UDP(sport=5000, dport=2345)
p4 = 500 * "A"
pkg4 = e4 / i4 / u4 / p4
applyPkt(pkg4, "nf2", pkgCnt)
e6 = e4
i6 = IPv6(src = "2001:db8:42::a00:2a", dst = "2001:db8:42::42")
u6 = u4
p6 = p4
pkg6 = e6 / i6 / u6 / p6
expPkt(pkg6, 'nf3')
pkgCnt += 1
# From v6 to v4
e6 = Ether(dst=MAC1, src=MAC2)
i6 = IPv6(dst = "2001:db8:42::a00:2a", src = "2001:db8:42::42")
u6 = UDP(dport=5000, sport=2345)
p6 = 500 * "A"
pkg6 = e6 / i6 / u6 / p6
e4 = e6
i4 = IP(dst = "10.0.0.42", src = "10.0.0.66", id=0)
u4 = u6
p4 = p6
pkg4 = e4 / i4 / u4 / p4
applyPkt(pkg6, "nf3", pkgCnt)
expPkt(pkg4, 'nf2')
test_nat64()
write_pcap_files()

178
netfpga/minip4/testdata/sss_sdnet_tuples.py vendored Executable file
View file

@ -0,0 +1,178 @@
#!/usr/bin/env python
#
# Copyright (c) 2017 Stephen Ibanez
# All rights reserved.
#
# This software was developed by Stanford University and the University of Cambridge Computer Laboratory
# under National Science Foundation under Grant No. CNS-0855268,
# the University of Cambridge Computer Laboratory under EPSRC INTERNET Project EP/H040536/1 and
# by the University of Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-11-C-0249 ("MRC2"),
# as part of the DARPA MRC research programme.
#
# @NETFPGA_LICENSE_HEADER_START@
#
# Licensed to NetFPGA C.I.C. (NetFPGA) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. NetFPGA licenses this
# file to you under the NetFPGA Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.netfpga-cic.org
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @NETFPGA_LICENSE_HEADER_END@
#
"""
Used to create the Tuple_in.txt and Tuple_out.txt files for the
SDNet simulations
"""
import argparse, collections, sys
# this defines the common sume_metadata
from sss_sume_metadata import *
tuple_in_file = "Tuple_in.txt"
tuple_expect_file = "Tuple_expect.txt"
# Diget Data MUST be 256 bits
""" Digest Data:
unused (256 bits)
"""
dig_field_len = collections.OrderedDict()
dig_field_len['unused'] = 256
#initialize tuple_expect
dig_tuple_expect = collections.OrderedDict()
dig_tuple_expect['unused'] = 0
"""
Clear the tuple files
"""
def clear_tuple_files():
with open(tuple_in_file, "w") as f:
f.write("")
with open(tuple_expect_file, "w") as f:
f.write("")
"""
Return a binary string with length = field_len_dic[field_name]
"""
def get_bin_val(field_name, value, field_len_dic):
format_string = "{0:0%db}" % field_len_dic[field_name]
bin_string = format_string.format(value)
return bin_string
"""
Given a binary string, return the hex version
"""
def bin_to_hex(bin_string):
hex_string = ''
assert(len(bin_string) % 4 == 0)
for i in range(0,len(bin_string),4):
hex_string += "{0:1x}".format(int(bin_string[i:i+4], 2))
return hex_string
"""
Write the next line of the Tuple_in.txt and Tuple_expect.txt
"""
def write_tuples():
with open("Tuple_in.txt", "a") as f:
tup_bin_string = ''
for field_name, value in sume_tuple_in.iteritems():
bin_val = get_bin_val(field_name, value, sume_field_len)
tup_bin_string += bin_val
f.write(bin_to_hex(tup_bin_string) + '\n')
with open("Tuple_expect.txt", "a") as f:
tup_bin_string = ''
for field_name, value in dig_tuple_expect.iteritems():
bin_val = get_bin_val(field_name, value, dig_field_len)
tup_bin_string += bin_val
f.write(bin_to_hex(tup_bin_string) + ' ')
tup_bin_string = ''
for field_name, value in sume_tuple_expect.iteritems():
bin_val = get_bin_val(field_name, value, sume_field_len)
tup_bin_string += bin_val
f.write(bin_to_hex(tup_bin_string) + '\n')
###############################
## Functions to parse tuples ##
###############################
def find_tup_len(field_len_dic):
num_bits = 0
for length in field_len_dic.values():
num_bits += length
return num_bits
"""
Given a hex string, convert it to a binary string
"""
def hex_to_bin(hex_string, length):
fmat_string = '{0:0%db}' % length
bin_string = fmat_string.format(int(hex_string, 16))
return bin_string
def check_length(bin_string, field_len_dic):
num_bits = find_tup_len(field_len_dic)
try:
assert(len(bin_string) == num_bits)
except:
print 'ERROR: unexpected input'
print 'len(bin_string) = ', len(bin_string)
print 'num_bits = ', num_bits
sys.exit(1)
"""
Given hex string representation of a tuple, return the parsed version of it
"""
def parse_tup_string(tup_string, field_len_dic):
tup_len = find_tup_len(field_len_dic)
bin_string = hex_to_bin(tup_string, tup_len)
check_length(bin_string, field_len_dic)
tup = collections.OrderedDict()
i = 0
for (field,length) in field_len_dic.iteritems():
tup[field] = int(bin_string[i:i+length], 2)
i += length
return tup
def parse_line(line, tuple_type):
if tuple_type == 'sume':
field_len = sume_field_len
elif tuple_type == 'digest':
field_len = dig_field_len
else:
print >> sys.stderr, "ERROR: unsupported tuple_type, must one of: [sume, digest]"
sys.exit(1)
tup_string = line.strip()
tup = parse_tup_string(tup_string, field_len)
print "Parsed Tuple:\n", '-----------------------'
for (key, val) in tup.items():
if (key in ['src_port', 'dst_port']):
print key, " = {0:08b}".format(val)
else:
print key, " = ", val
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--parse', type=str, help="A tuple line to parse")
parser.add_argument('tuple_type', type=str, help="Which tuple type to parse: sume, digest")
args = parser.parse_args()
parse_line(args.parse, args.tuple_type)

View file

@ -0,0 +1,25 @@
from scapy.all import *
import sys, os
CALC_TYPE = 0x1212
ADD_OP = 0
SUB_OP = 1
LOOKUP_OP = 2
ADD_REG_OP = 3
SET_REG_OP = 4
class Calc(Packet):
name = "Calc"
fields_desc = [
IntField("op1", 0),
ByteEnumField("opCode", 0, {ADD_OP:"ADD", SUB_OP:"SUB", LOOKUP_OP:"LOOKUP", ADD_REG_OP:"ADD_REG", SET_REG_OP:"SET_REG"}),
IntField("op2", 0),
IntField("result", 0)
]
def mysummary(self):
return self.sprintf("op1=%op1% %opCode% op2=%op2% result=%result%")
bind_layers(Ether, Calc, type=CALC_TYPE)
bind_layers(Calc, Raw)