car
This commit is contained in:
commit
fa62566ea3
35
.vscode/c_cpp_properties.json
vendored
Normal file
35
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "windows-gcc-x64",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**"
|
||||
],
|
||||
"defines": [
|
||||
"_DEBUG",
|
||||
"UNICODE",
|
||||
"__GNUC__=6",
|
||||
"__cdecl=__attribute__((__cdecl__))"
|
||||
],
|
||||
"intelliSenseMode": "windows-gcc-x64",
|
||||
"browse": {
|
||||
"limitSymbolsToIncludedHeaders": true,
|
||||
"databaseFilename": "",
|
||||
"path": [
|
||||
"${workspaceRoot}",
|
||||
"F:/mingw64/include/**",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/include/c++",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/include/c++/x86_64-w64-mingw32",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/include/c++/backward",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/include",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/include-fixed",
|
||||
"F:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/8.1.0/../../../../x86_64-w64-mingw32/include"
|
||||
]
|
||||
},
|
||||
"cStandard": "${default}",
|
||||
"cppStandard": "${default}",
|
||||
"compilerPath": "F:/mingw64/bin/gcc.exe"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
64
.vscode/launch.json
vendored
Normal file
64
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "(Windows) Launch",
|
||||
"type": "cppvsdbg",
|
||||
"request": "launch",
|
||||
"program": "cmd",
|
||||
"preLaunchTask": "echo",
|
||||
"args": [
|
||||
"/C",
|
||||
"${fileDirname}\\${fileBasenameNoExtension}.exe",
|
||||
"&",
|
||||
"echo.",
|
||||
"&",
|
||||
"pause"
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"console": "externalTerminal"
|
||||
},
|
||||
{
|
||||
"name": "(gdb) Launch",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/${fileBasenameNoExtension}.exe",
|
||||
"args": [],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": true,
|
||||
"MIMode": "gdb",
|
||||
"miDebuggerPath": "F:\\mingw64\\bin\\gdb.exe",
|
||||
"preLaunchTask": "echo",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "C/C++ Runner: Debug Session",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"args": [],
|
||||
"stopAtEntry": false,
|
||||
"externalConsole": true,
|
||||
"cwd": "f:/RDMA/YCSB-C/db",
|
||||
"program": "f:/RDMA/YCSB-C/db/build/Debug/outDebug",
|
||||
"MIMode": "gdb",
|
||||
"miDebuggerPath": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
116
.vscode/settings.json
vendored
Normal file
116
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
{
|
||||
"C_Cpp_Runner.msvcBatchPath": "",
|
||||
"C_Cpp_Runner.cCompilerPath": "gcc",
|
||||
"C_Cpp_Runner.cppCompilerPath": "g++",
|
||||
"C_Cpp_Runner.debuggerPath": "gdb",
|
||||
"C_Cpp_Runner.cStandard": "",
|
||||
"C_Cpp_Runner.cppStandard": "",
|
||||
"C_Cpp_Runner.useMsvc": false,
|
||||
"C_Cpp_Runner.warnings": [
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Wpedantic",
|
||||
"-Wshadow",
|
||||
"-Wformat=2",
|
||||
"-Wcast-align",
|
||||
"-Wconversion",
|
||||
"-Wsign-conversion",
|
||||
"-Wnull-dereference"
|
||||
],
|
||||
"C_Cpp_Runner.msvcWarnings": [
|
||||
"/W4",
|
||||
"/permissive-",
|
||||
"/w14242",
|
||||
"/w14287",
|
||||
"/w14296",
|
||||
"/w14311",
|
||||
"/w14826",
|
||||
"/w44062",
|
||||
"/w44242",
|
||||
"/w14905",
|
||||
"/w14906",
|
||||
"/w14263",
|
||||
"/w44265",
|
||||
"/w14928"
|
||||
],
|
||||
"C_Cpp_Runner.enableWarnings": true,
|
||||
"C_Cpp_Runner.warningsAsError": false,
|
||||
"C_Cpp_Runner.compilerArgs": [],
|
||||
"C_Cpp_Runner.linkerArgs": [],
|
||||
"C_Cpp_Runner.includePaths": [],
|
||||
"C_Cpp_Runner.includeSearch": [
|
||||
"*",
|
||||
"**/*"
|
||||
],
|
||||
"C_Cpp_Runner.excludeSearch": [
|
||||
"**/build",
|
||||
"**/build/**",
|
||||
"**/.*",
|
||||
"**/.*/**",
|
||||
"**/.vscode",
|
||||
"**/.vscode/**"
|
||||
],
|
||||
"C_Cpp_Runner.useAddressSanitizer": false,
|
||||
"C_Cpp_Runner.useUndefinedSanitizer": false,
|
||||
"C_Cpp_Runner.useLeakSanitizer": false,
|
||||
"C_Cpp_Runner.showCompilationTime": false,
|
||||
"C_Cpp_Runner.useLinkTimeOptimization": false,
|
||||
"C_Cpp_Runner.msvcSecureNoWarnings": false,
|
||||
"files.associations": {
|
||||
"functional": "cpp",
|
||||
"optional": "cpp",
|
||||
"istream": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"system_error": "cpp",
|
||||
"array": "cpp",
|
||||
"tuple": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"utility": "cpp",
|
||||
"string_view": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"atomic_client.h": "c",
|
||||
"iostream": "cpp",
|
||||
"atomic": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"deque": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"vector": "cpp",
|
||||
"exception": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iterator": "cpp",
|
||||
"map": "cpp",
|
||||
"memory": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"numeric": "cpp",
|
||||
"random": "cpp",
|
||||
"string": "cpp",
|
||||
"fstream": "cpp",
|
||||
"future": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"limits": "cpp",
|
||||
"mutex": "cpp",
|
||||
"new": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"thread": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"typeinfo": "cpp"
|
||||
}
|
||||
}
|
28
.vscode/tasks.json
vendored
Normal file
28
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "echo",
|
||||
"type": "shell",
|
||||
"command": "gcc",
|
||||
"args": [
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileBasenameNoExtension}.exe",
|
||||
"-fexec-charset=GBK",//解决中文乱码
|
||||
"-lstdc++"//解决只能运行c不能运行c++
|
||||
]
|
||||
}
|
||||
],
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "always",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
}
|
||||
}
|
1
DTA
Submodule
1
DTA
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 596dc5378353b9b448305320d7f50bcf32f3a66f
|
1
ROLEX
Submodule
1
ROLEX
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 016a397b2e8a87b758564fff6eb8417c32ef4991
|
1
YCSB-C
Submodule
1
YCSB-C
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 6475c7a5c3c7b229224ce5ca3332a58c994b8a09
|
7
operation
Normal file
7
operation
Normal file
@ -0,0 +1,7 @@
|
||||
port-add 7/0 100g rs
|
||||
port-add 3/0 100g rs
|
||||
port-add 33/2 10g none
|
||||
an-set 7/0 1
|
||||
an-set 3/0 1
|
||||
an-set 33/2 1
|
||||
port-enb -/-
|
1756
wly_experiment/bf_drivers.log
Normal file
1756
wly_experiment/bf_drivers.log
Normal file
File diff suppressed because it is too large
Load Diff
4
wly_experiment/compile_dta.sh
Normal file
4
wly_experiment/compile_dta.sh
Normal file
@ -0,0 +1,4 @@
|
||||
# 启用 bf-sde 环境
|
||||
. ../bf-sde-9.2.0/set_sde.bash
|
||||
# 编译 Translator 代码
|
||||
./../bf-sde-9.2.0/p4_build.sh ../wly_experiment/dta_codes/translator/p4src/dta_translator.p4
|
BIN
wly_experiment/dta_codes/dependency/common-0.1.2.tar.gz
Normal file
BIN
wly_experiment/dta_codes/dependency/common-0.1.2.tar.gz
Normal file
Binary file not shown.
BIN
wly_experiment/dta_codes/manager/.tofino.py.swp
Normal file
BIN
wly_experiment/dta_codes/manager/.tofino.py.swp
Normal file
Binary file not shown.
236
wly_experiment/dta_codes/manager/Collector.py
Normal file
236
wly_experiment/dta_codes/manager/Collector.py
Normal file
@ -0,0 +1,236 @@
|
||||
#!/usr/bin/env python3
|
||||
#This script handled communication with the collector
|
||||
|
||||
import pexpect
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from Machine import Machine
|
||||
|
||||
class Collector(Machine):
|
||||
ssh_collector = None
|
||||
interface = None
|
||||
ip = None
|
||||
|
||||
def __init__(self, host, name="Collector"):
|
||||
self.host = host
|
||||
self.name = name
|
||||
self.log("Initiating %s at %s..." %(self.name, host))
|
||||
|
||||
assert self.testConnection(), "Connection to the Collector does not work!"
|
||||
|
||||
def configureNetworking(self):
|
||||
self.log("Configuring networking...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
ssh.sendline("./network_setup.sh")
|
||||
i = ssh.expect(["$", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Timeout while running network setup script!"
|
||||
|
||||
#Check IP assignment
|
||||
ssh.sendline("ifconfig ma1")
|
||||
i = ssh.expect(["192.168.1.91", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "Network interface failed to configure!"
|
||||
|
||||
#Check one of the ARP rules
|
||||
ssh.sendline("arp 192.168.1.87")
|
||||
i = ssh.expect(["84:c7:8f:00:6d:b3", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "ARP rules failed to update!"
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
self.debug("Networking is set up.")
|
||||
|
||||
def disabelICRCVerification(self):
|
||||
self.log("Disabling iCRC verification on the NIC...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("./disable-icrc.sh")
|
||||
i = ssh.expect(["WARNING: this script assumes", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "Failed to start the disable-icrc.sh script!"
|
||||
|
||||
i = ssh.expect(["$", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Timeout while running icrc disabling script!"
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
self.debug("iCRC verification is now disabled")
|
||||
|
||||
def setupRDMA(self):
|
||||
self.log("Setting up RDMA...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("./rdma/setup_rdma.sh")
|
||||
i = ssh.expect(["Removing old modules", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "Failed to start setup_rdma.sh!"
|
||||
|
||||
i = ssh.expect(["INFO System info file", pexpect.TIMEOUT], timeout=20)
|
||||
assert i == 0, "Timeout while setting up RDMA!"
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
self.debug("RDMA is now set up and configured!")
|
||||
|
||||
def recompileRDMA(self):
|
||||
self.log("Recompiling RDMA...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("cd rdma/rdma-core")
|
||||
ssh.expect("$", timeout=2)
|
||||
|
||||
ssh.sendline("sudo ./build.sh")
|
||||
i = ssh.expect(["Build files have been written to", pexpect.TIMEOUT], timeout=30)
|
||||
assert i == 0, "Failed to build rdma-core!"
|
||||
|
||||
self.debug("RDMA-core is now built")
|
||||
|
||||
ssh.sendline("cd ~/rdma/mlnx_ofed/MLNX_OFED_SRC-5.5-1.0.3.2")
|
||||
ssh.expect("$", timeout=2)
|
||||
|
||||
ssh.sendline("sudo ./install.pl")
|
||||
i = ssh.expect(["Checking SW Requirements", pexpect.TIMEOUT], timeout=30)
|
||||
assert i == 0, "Failed to start the mlnx_ofed installation!"
|
||||
self.debug("mlnx_ofed is now installing prerequisites...")
|
||||
|
||||
i = ssh.expect(["This program will install the OFED package on your machine.", pexpect.TIMEOUT], timeout=180)
|
||||
assert i == 0, "Stuck at installing prerequisites!"
|
||||
|
||||
i = ssh.expect(["Uninstalling the previous version of OFED", pexpect.TIMEOUT], timeout=180)
|
||||
assert i == 0, "Something went wrong!"
|
||||
self.debug("Uninstalling the previous version of OFED...")
|
||||
|
||||
i = ssh.expect(["Building packages", pexpect.TIMEOUT], timeout=300)
|
||||
assert i == 0, "Stuck at uninstalling old OFED!"
|
||||
self.debug("Building new OFED (this will take a while)...")
|
||||
|
||||
i = ssh.expect(["Installation passed successfully", pexpect.TIMEOUT], timeout=1800)
|
||||
assert i == 0, "Installation failed or timed out!"
|
||||
|
||||
self.debug("OFED is now reinstalled!")
|
||||
|
||||
|
||||
|
||||
def compileCollector(self):
|
||||
self.log("Compiling the collector service...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("cd ./rdma/playground")
|
||||
ssh.expect("$", timeout=2)
|
||||
|
||||
ssh.sendline("mv ./collector_new ./collector_backup_new")
|
||||
ssh.expect("$", timeout=2)
|
||||
|
||||
ssh.sendline("./compile.sh")
|
||||
i = ssh.expect(["Compiling DTA Collector...", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Compilation did not start!"
|
||||
|
||||
i = ssh.expect(["Compilation done", pexpect.TIMEOUT], timeout=20)
|
||||
assert i == 0, "Timeout while compiling collector service!"
|
||||
|
||||
ssh.sendline("ls -l")
|
||||
i = ssh.expect(["collector_new", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Failed to compile collector service!"
|
||||
|
||||
self.debug("Compilation finished")
|
||||
|
||||
def killOldCollector(self):
|
||||
self.debug("Killing old collectors, if any are running")
|
||||
ssh = self.init_ssh()
|
||||
ssh.sendline("sudo killall collector_new")
|
||||
i = ssh.expect(["$", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Timeout while killing old collector service(s)!"
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
def setupCollector(self):
|
||||
self.log("Setting up the collector...")
|
||||
|
||||
self.killOldCollector()
|
||||
|
||||
self.setupRDMA()
|
||||
#self.compileCollector()
|
||||
self.configureNetworking()
|
||||
self.disabelICRCVerification()
|
||||
self.setupRDMA()
|
||||
|
||||
def startCollector(self):
|
||||
self.log("Starting the DTA collector service...")
|
||||
|
||||
self.ssh_collector = self.init_ssh()
|
||||
|
||||
self.debug("Starting the service...")
|
||||
self.ssh_collector.sendline("sudo ./rdma/playground/collector_new")
|
||||
i = self.ssh_collector.expect(["Press ENTER to analyze storage.", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Failed to start the DTA collector!"
|
||||
|
||||
|
||||
time.sleep(3) #Give time for various primitive threads to complete
|
||||
|
||||
i = self.ssh_collector.expect(["Segmentation fault", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 1, "The collector returned a segfault during startup!"
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
self.log("DTA collector service is now running")
|
||||
|
||||
def verifyRDMAConnections(self):
|
||||
self.log("Verifying RDMA connections from the translator")
|
||||
|
||||
|
||||
self.ssh_collector.sendline("") #Send an enter to collector service
|
||||
numStructures = 0
|
||||
while True:
|
||||
i = self.ssh_collector.expect(["Printing RDMA info for ", pexpect.TIMEOUT], timeout=5)
|
||||
if i == 0:
|
||||
numStructures += 1
|
||||
self.debug("Found output for DTA structure. Total %i" %numStructures)
|
||||
else:
|
||||
break
|
||||
|
||||
self.log("There seems to be %i active DTA structures" %numStructures)
|
||||
assert numStructures > 0, "No RDMA connections were detected at the collector!"
|
||||
|
||||
def ui_menu(self):
|
||||
self.log("Entering menu")
|
||||
|
||||
while True:
|
||||
print("1: \t(B)ack")
|
||||
print("2: \t(R)eboot")
|
||||
print("3: \tRe(c)ompile DTA collector")
|
||||
print("4: \t(S)tart collector service")
|
||||
print("5: \tS(e)tup RDMA")
|
||||
print("6: \tConfigure (n)etworking")
|
||||
print("7: \t(D)isable iCRC verification")
|
||||
|
||||
option = input("Selection: ").lower()
|
||||
|
||||
if option in ["b", "1"]:
|
||||
break
|
||||
|
||||
if option in ["r","2"]:
|
||||
self.reboot()
|
||||
|
||||
if option in ["c","3"]:
|
||||
self.compileCollector()
|
||||
|
||||
if option in ["s","4"]:
|
||||
self.startCollector()
|
||||
|
||||
if option in ["e","5"]:
|
||||
self.setupRDMA()
|
||||
|
||||
if option in ["n","6"]:
|
||||
self.configureNetworking()
|
||||
|
||||
if option in ["d","7"]:
|
||||
self.disabelICRCVerification()
|
||||
|
||||
|
||||
return
|
276
wly_experiment/dta_codes/manager/Generator.py
Normal file
276
wly_experiment/dta_codes/manager/Generator.py
Normal file
@ -0,0 +1,276 @@
|
||||
#!/usr/bin/env python3
|
||||
#This script handled communication with the generator
|
||||
|
||||
import pexpect
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from common import strToPktrate
|
||||
from Machine import Machine
|
||||
|
||||
class Generator(Machine):
|
||||
interface = None
|
||||
ip = None
|
||||
|
||||
ssh_trex = None
|
||||
ssh_trexConsole = None
|
||||
|
||||
def __init__(self, host, name="Generator"):
|
||||
self.host = host
|
||||
self.name = name
|
||||
self.log("Initiating %s at %s..." %(self.name, host))
|
||||
|
||||
assert self.testConnection(), "Connection to the Generator does not work!"
|
||||
|
||||
def configureNetworking(self):
|
||||
self.log("Configuring networking...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("./network_setup.sh")
|
||||
i = ssh.expect(["$", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Timeout while running network setup script!"
|
||||
|
||||
#Check IP assignment (disabled, dpdk will remove this interface)
|
||||
#ssh.sendline("ifconfig ens2f0")
|
||||
#i = ssh.expect(["10.0.0.200", pexpect.TIMEOUT], timeout=2)
|
||||
#assert i == 0, "Network interface failed to configure!"
|
||||
|
||||
#Check one of the ARP rules
|
||||
#ssh.sendline("arp 10.0.0.51")
|
||||
#i = ssh.expect(["b8:ce:f6:d2:12:c7", pexpect.TIMEOUT], timeout=2)
|
||||
#assert i == 0, "ARP rules failed to update!"
|
||||
|
||||
self.log("Networking is set up.")
|
||||
|
||||
def startTrex(self):
|
||||
self.log("Starting TReX...")
|
||||
|
||||
self.ssh_trex = self.init_ssh()
|
||||
self.ssh_trex.sendline("cd ./generator/trex")
|
||||
i = self.ssh_trex.expect("$", timeout=5)
|
||||
|
||||
self.log("Launching trex service")
|
||||
self.ssh_trex.sendline("sudo ./t-rex-64 -i -c 16")
|
||||
i = self.ssh_trex.expect(["Starting Scapy server", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Trex does not respond!"
|
||||
|
||||
i = self.ssh_trex.expect(["Global stats enabled", pexpect.TIMEOUT], timeout=30)
|
||||
assert i == 0, "Trex start timed out!"
|
||||
|
||||
|
||||
|
||||
|
||||
def startTrexConsole(self):
|
||||
self.log("Starting TReX Console...")
|
||||
self.ssh_trexConsole = self.init_ssh()
|
||||
|
||||
self.ssh_trexConsole.sendline("cd ./generator/trex")
|
||||
i = self.ssh_trexConsole.expect("$", timeout=2)
|
||||
|
||||
self.ssh_trexConsole.sendline("./trex-console")
|
||||
i = self.ssh_trexConsole.expect(["Server Info", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Console does not launch!"
|
||||
|
||||
self.ssh_trexConsole.sendline("./trex-console")
|
||||
i = self.ssh_trexConsole.expect(["trex>", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Console timed out!"
|
||||
|
||||
self.log("TReX Console is running!")
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
def setup(self):
|
||||
self.log("Setting up the generator")
|
||||
|
||||
self.configureNetworking()
|
||||
self.startTrex()
|
||||
self.startTrexConsole()
|
||||
|
||||
#TODO: make this check Tofino rate-show!
|
||||
def findCurrentRate(self):
|
||||
trafficFlowing = False
|
||||
for i in range(10):
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
#Clear the output buffer
|
||||
self.ssh_trex.read_nonblocking(1000000000, timeout = 1)
|
||||
i = self.ssh_trex.expect(["Total-PPS : 0.00 pps", pexpect.TIMEOUT], timeout=1)
|
||||
|
||||
if i == 1:
|
||||
trafficFlowing = True
|
||||
break
|
||||
else:
|
||||
self.log("No traffic yet...")
|
||||
|
||||
if not trafficFlowing:
|
||||
return 0
|
||||
|
||||
#assert trafficFlowing, "TReX does not actually generate traffic!"
|
||||
|
||||
#Retrieve reported packet rate
|
||||
self.ssh_trex.expect("Total-PPS", timeout=3)
|
||||
rate_str = self.ssh_trex.readline()
|
||||
rate_str = rate_str.decode('ascii').replace(" ", "").replace(":", "").replace("\r\n", "")
|
||||
self.debug("The reported rate is: %s" %rate_str)
|
||||
|
||||
return strToPktrate(rate_str)
|
||||
|
||||
def waitForSpeed(self, speed_target, error_target=0.1):
|
||||
#Check in trex daemon that traffic is actually generating
|
||||
self.log("Checking in TReX daemon that traffic is flowing...")
|
||||
|
||||
#Wait for the traffic to be in the correct range
|
||||
rate_target = strToPktrate(speed_target)
|
||||
for i in range(10):
|
||||
time.sleep(2)
|
||||
rate = self.findCurrentRate()
|
||||
|
||||
error = 1 - rate/rate_target
|
||||
|
||||
self.debug("We generate %s pps, the target is %s pps" %(str(rate), str(rate_target)))
|
||||
|
||||
if error < error_target:
|
||||
self.debug("The speed error is acceptable: %s" %str(error))
|
||||
break
|
||||
|
||||
self.debug("The speed error is too large: %s" %str(error))
|
||||
|
||||
assert error < error_target, "The traffic rate error is too large!"
|
||||
|
||||
self.log("Traffic is flowing correctly!")
|
||||
|
||||
#Start STL-based replays
|
||||
def startTraffic_script(self, script="stl/dta_keywrite_basic.py", speed="1kpps", tuneables=""):
|
||||
self.log("Starting traffic generation script %s at speed %s" %(script, speed))
|
||||
|
||||
cmd = "start -f %s -m %s -t %s" %(script, speed, tuneables)
|
||||
print(cmd)
|
||||
self.ssh_trexConsole.sendline(cmd)
|
||||
i = self.ssh_trexConsole.expect(["Starting traffic on port", "are active - please stop them or specify", pexpect.TIMEOUT], timeout=10)
|
||||
|
||||
if i == 1:
|
||||
self.error("Can't start traffic, already running!")
|
||||
|
||||
assert i != 2, "Traffic generation start timed out!"
|
||||
|
||||
self.waitForSpeed(speed) #Wait until the target rate is achieved
|
||||
|
||||
#Push Marple PCAP
|
||||
def startTraffic_pcap(self, pcap, speed="1kpps"):
|
||||
self.log("Replaying pcap %s at speed %s" %(pcap, speed))
|
||||
|
||||
rate = strToPktrate(speed)
|
||||
print("rate", rate)
|
||||
|
||||
ipg = 1000000/rate
|
||||
print("ipg", ipg)
|
||||
|
||||
cmd = "push --force -f %s -p 0 -i %f -c 0 --dst-mac-pcap" %(pcap, ipg)
|
||||
|
||||
print(cmd)
|
||||
self.ssh_trexConsole.sendline(cmd)
|
||||
i = self.ssh_trexConsole.expect(["Starting traffic on port", "are active - please stop them or specify", pexpect.TIMEOUT], timeout=10)
|
||||
|
||||
if i == 1:
|
||||
self.error("Can't start traffic, already running!")
|
||||
|
||||
assert i != 2, "Traffic generation start timed out!"
|
||||
|
||||
self.waitForSpeed(speed) #Wait until the target rate is achieved
|
||||
|
||||
def startTraffic_keywrite(self, speed="1kpps", redundancy=4):
|
||||
self.log("Replaying KeyWrite traffic at redundancy %i and speed %s" %(redundancy, speed))
|
||||
|
||||
tuneables = "--redundancy %i" %(redundancy)
|
||||
self.startTraffic_script(script="stl/dta_keywrite_basic.py", speed=speed, tuneables=tuneables)
|
||||
|
||||
def startTraffic_keyincrement(self, speed="1kpps", redundancy=4):
|
||||
self.log("Replaying KeyIncrement traffic at redundancy %i and speed %s" %(redundancy, speed))
|
||||
|
||||
tuneables = "--redundancy %i" %(redundancy)
|
||||
self.startTraffic_script(script="stl/dta_keyincrement_basic.py", speed=speed, tuneables=tuneables)
|
||||
|
||||
def startTraffic_append(self, speed="1kpps"):
|
||||
self.log("Replaying Append traffic at speed %s" %(speed))
|
||||
|
||||
tuneables = ""
|
||||
self.startTraffic_script(script="stl/dta_append_basic.py", speed=speed, tuneables=tuneables)
|
||||
|
||||
def startTraffic_marple(self):
|
||||
speed = input("Speed (e.g., 1mpps): ")
|
||||
pcap = "/home/jlanglet/generator/marple_dta.pcap"
|
||||
|
||||
self.startTraffic_pcap(pcap, speed)
|
||||
|
||||
def stopTraffic(self):
|
||||
self.log("Stopping traffic generation")
|
||||
|
||||
self.ssh_trexConsole.sendline("stop")
|
||||
i = self.ssh_trexConsole.expect(["Stopping traffic on port", "no active ports", pexpect.TIMEOUT], timeout=5)
|
||||
if i == 1:
|
||||
self.error("No traffic is playing! Nothing to stop")
|
||||
|
||||
assert i != 2, "Traffic stop timed out!"
|
||||
|
||||
def ui_startTraffic(self):
|
||||
speed = input("Speed (e.g., 1mpps): ")
|
||||
|
||||
#Configure and run primitive
|
||||
while True:
|
||||
|
||||
primitive = input("Primitive (keywrite, append, keyincrement): ")
|
||||
|
||||
if primitive == "keywrite":
|
||||
redundancy = int(input("Redundancy: "))
|
||||
self.startTraffic_keywrite(speed=speed, redundancy=redundancy)
|
||||
|
||||
elif primitive == "append":
|
||||
self.startTraffic_append(speed=speed)
|
||||
|
||||
elif primitive == "keyincrement":
|
||||
redundancy = int(input("Redundancy: "))
|
||||
self.startTraffic_keyincrement(speed=speed, redundancy=redundancy)
|
||||
|
||||
|
||||
else:
|
||||
print("Invalid choice")
|
||||
continue
|
||||
|
||||
print("Started")
|
||||
|
||||
break
|
||||
|
||||
def ui_menu(self):
|
||||
self.log("Entering menu")
|
||||
|
||||
while True:
|
||||
print("1: \t(B)ack")
|
||||
print("2: \t(S)tart traffic (script)")
|
||||
print("3: \tStart (M)arple traffic")
|
||||
print("4: \tS(t)op traffic")
|
||||
print("5: \t(K)ill console")
|
||||
print("6: \t(R)eboot")
|
||||
|
||||
option = input("Selection: ").lower()
|
||||
|
||||
if option in ["b", "1"]:
|
||||
break
|
||||
|
||||
if option in ["s","2"]:
|
||||
self.ui_startTraffic()
|
||||
|
||||
if option in ["m","3"]:
|
||||
self.startTraffic_marple()
|
||||
|
||||
if option in ["t","4"]:
|
||||
self.stopTraffic()
|
||||
|
||||
if option in ["k","5"]:
|
||||
self.log("Removing reference of TReX console!")
|
||||
self.ssh_trexConsole = None
|
||||
|
||||
if option in ["r","5"]:
|
||||
self.reboot()
|
||||
|
80
wly_experiment/dta_codes/manager/Machine.py
Normal file
80
wly_experiment/dta_codes/manager/Machine.py
Normal file
@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env python3
|
||||
#This script contains the Machine class, with various functions shared between the components
|
||||
import time
|
||||
import pexpect
|
||||
|
||||
from common import log
|
||||
|
||||
class Machine:
|
||||
host = None
|
||||
name = None
|
||||
|
||||
def logger(self, text):
|
||||
log("%s: \t%s" %(self.name, text))
|
||||
|
||||
#High verbosity output
|
||||
def debug(self, text):
|
||||
self.logger(" Debug: %s" %(text))
|
||||
|
||||
def error(self, text):
|
||||
self.logger("ERROR: %s" %(text))
|
||||
|
||||
def reboot(self):
|
||||
self.logger("Rebooting %s at %s" %(self.name, self.host))
|
||||
ssh = self.init_ssh()
|
||||
ssh.sendline("sudo reboot")
|
||||
i = ssh.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Failed to detect reboot!"
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
def testConnection(self):
|
||||
self.debug("Testing connection to %s at %s..." %(self.name, self.host))
|
||||
|
||||
p = pexpect.spawn("ssh %s" %self.host)
|
||||
i = p.expect(["Welcome to Ubuntu", "Connection refused", pexpect.TIMEOUT, pexpect.EOF], timeout=5)
|
||||
if i == 0:
|
||||
self.debug("Logged into %s" %self.host)
|
||||
elif i == 1:
|
||||
self.debug("Connection refused!")
|
||||
return False
|
||||
elif i == 2:
|
||||
self.debug("Connection timeout!")
|
||||
return False
|
||||
elif i == 3:
|
||||
self.debug("SSH terminates!")
|
||||
return False
|
||||
|
||||
self.debug("Verifying command capability...")
|
||||
content = "Testing"
|
||||
p.sendline("echo \"%s\" > ssh_works" %content)
|
||||
p.expect("$")
|
||||
p.sendline("cat ssh_works")
|
||||
i = p.expect([content, pexpect.TIMEOUT], timeout=2)
|
||||
if i != 0:
|
||||
self.error("Did not find expected output!")
|
||||
return False
|
||||
|
||||
self.debug("Commands work! Resetting and logging out...")
|
||||
|
||||
p.sendline("rm ssh_works")
|
||||
p.expect("$")
|
||||
p.sendline("exit")
|
||||
time.sleep(1)
|
||||
|
||||
p.close()
|
||||
|
||||
return True
|
||||
|
||||
def init_ssh(self):
|
||||
self.debug("Logging into %s at %s..." %(self.name, self.host))
|
||||
p = pexpect.spawn("ssh %s" %self.host)
|
||||
i = p.expect(["$", pexpect.TIMEOUT], timeout=5)
|
||||
|
||||
if i != 0:
|
||||
self.error("Timeout!")
|
||||
return None
|
||||
|
||||
self.debug("SSH to %s is initiated" %self.host)
|
||||
|
||||
return p
|
85
wly_experiment/dta_codes/manager/Manager.py
Normal file
85
wly_experiment/dta_codes/manager/Manager.py
Normal file
@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python3
|
||||
#This script manages the entire DTA system. This is the script you want to run.
|
||||
|
||||
import time
|
||||
|
||||
#from common import log, debug, strToPktrate
|
||||
from Tofino import Tofino
|
||||
from Collector import Collector
|
||||
from Generator import Generator
|
||||
|
||||
host_tofino = "root@192.168.1.91" # Point to the Tofino
|
||||
host_collector = "server@192.168.1.87" # Point to the collector
|
||||
host_generator = "server@192.168.1.86" # Point to the traffic generator
|
||||
|
||||
|
||||
def setup(do_reboot=False, manual_collector=True):
|
||||
#Reboot the machines and wait for them to come back online
|
||||
if do_reboot:
|
||||
systems = [tofino, collector, generator]
|
||||
|
||||
#Reboot all
|
||||
for system in systems:
|
||||
system.reboot()
|
||||
|
||||
#Wait for all to come online
|
||||
for system in systems:
|
||||
while not system.testConnection():
|
||||
print("%s is offline..." %system.name)
|
||||
time.sleep(20)
|
||||
|
||||
|
||||
tofino.flashPipeline()
|
||||
tofino.confPorts()
|
||||
|
||||
tofino.configureNetworking()
|
||||
collector.setupCollector()
|
||||
|
||||
if manual_collector:
|
||||
print("sudo /home/jlanglet/rdma/playground/collector_new")
|
||||
input("Start the DTA collector and press ENTER")
|
||||
else:
|
||||
collector.startCollector()
|
||||
|
||||
|
||||
tofino.startController()
|
||||
|
||||
if not manual_collector:
|
||||
collector.verifyRDMAConnections() #Manually disabled
|
||||
|
||||
generator.setup()
|
||||
|
||||
def Menu():
|
||||
print("1: \t(S)tart up DTA environment")
|
||||
print("2: \t(T)ofino menu")
|
||||
print("3: \t(C)ollector menu")
|
||||
print("4: \t(G)enerator menu")
|
||||
|
||||
option = input("Selection: ").lower()
|
||||
|
||||
if option in ["s","1"]: #Setup
|
||||
resp = input("Reboot machines? (y/N): ")
|
||||
do_reboot = resp == "y"
|
||||
resp = input("Start collector manually? (y/N): ")
|
||||
manual_collector = resp == "y"
|
||||
|
||||
setup(do_reboot=do_reboot, manual_collector=manual_collector)
|
||||
|
||||
if option in ["t","2"]: #Tofino
|
||||
tofino.ui_menu()
|
||||
|
||||
if option in ["c","3"]: #Collector
|
||||
collector.ui_menu()
|
||||
|
||||
if option in ["g","4"]: #Generator
|
||||
generator.ui_menu()
|
||||
|
||||
|
||||
#Set up connection to machines
|
||||
tofino = Tofino(host=host_tofino, pipeline="dta_translator")
|
||||
collector = Collector(host=host_collector)
|
||||
generator = Generator(host=host_generator)
|
||||
|
||||
#Loop the menu
|
||||
while True:
|
||||
Menu()
|
335
wly_experiment/dta_codes/manager/Tofino.py
Normal file
335
wly_experiment/dta_codes/manager/Tofino.py
Normal file
@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env python3
|
||||
#This script prepares and launches a DTA pipeline on the Tofino switch, and prepares RDMA states
|
||||
#Assuming our setup, and SDE 9.7.0
|
||||
|
||||
|
||||
|
||||
import pexpect
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from Machine import Machine
|
||||
|
||||
#This is currently forced to only be the Translator Tofino
|
||||
class Tofino(Machine):
|
||||
pipeline = None
|
||||
port_config = None
|
||||
essential_ports = None
|
||||
|
||||
ssh_switchd = None
|
||||
ssh_controller = None
|
||||
|
||||
def __init__(self, host, pipeline, name="Tofino"):
|
||||
self.host = host
|
||||
self.name = name
|
||||
self.pipeline = pipeline
|
||||
|
||||
self.log("Initiating %s at %s..." %(self.name, host))
|
||||
|
||||
#TODO: Move these centralized into some config file
|
||||
self.port_config = [
|
||||
"pm port-del -/-",
|
||||
"pm port-add 7/0 100G rs",
|
||||
"pm port-add 3/0 100G rs",
|
||||
"pm port-add 57/0 10G none",
|
||||
"pm port-add 57/1 10G none",
|
||||
"pm an-set 7/0 1",
|
||||
"pm an-set 3/0 1",
|
||||
"pm an-set 57/0 1",
|
||||
"pm an-set 57/1 1",
|
||||
"pm port-enb -/-",
|
||||
"bf_pltfm led",
|
||||
"led-task-cfg -r 1",
|
||||
"..",
|
||||
".."
|
||||
]
|
||||
self.essential_ports = [
|
||||
"7/0",
|
||||
"3/0",
|
||||
"57/0",
|
||||
"57/1",
|
||||
]
|
||||
|
||||
assert self.testConnection(), "Connection to the Tofino does not work!"
|
||||
|
||||
#This is currently forced to just compile the Translator pipeline
|
||||
def compilePipeline(self, enable_nack_tracking=True, num_tracked_nacks=65536, append_batch_size=4, resync_grace_period=100000, max_supported_qps=256):
|
||||
|
||||
project = "dta_translator"
|
||||
p4_file = "~/wly_experiment/dta_codes/translator/p4src/dta_translator.p4"
|
||||
|
||||
self.log("Compiling project %s from source %s..." %(project, p4_file))
|
||||
|
||||
assert append_batch_size in [1,2,4,8,16], "Unsupported Append batch size"
|
||||
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
#
|
||||
# Generate compilation command
|
||||
#
|
||||
preprocessor_directives = ""
|
||||
#Nack tracking/retransmission
|
||||
if enable_nack_tracking:
|
||||
preprocessor_directives += " -DDO_NACK_TRACKING"
|
||||
preprocessor_directives += " -DNUM_TRACKED_NACKS=%i" %num_tracked_nacks
|
||||
|
||||
#Append batch size (num batched entries)
|
||||
preprocessor_directives += " -DAPPEND_BATCH_SIZE=%i" %append_batch_size
|
||||
preprocessor_directives += " -DNUM_APPEND_ENTRIES_IN_REGISTERS=%i" %(append_batch_size-1)
|
||||
preprocessor_directives += " -DAPPEND_RDMA_PAYLOAD_SIZE=%i" %(append_batch_size*4)
|
||||
|
||||
#Grace period
|
||||
preprocessor_directives += " -DQP_RESYNC_PACKET_DROP_NUM=%i" %(resync_grace_period)
|
||||
|
||||
#Max supported queue pairs
|
||||
preprocessor_directives += " -DMAX_SUPPORTED_QPS=%i" %(max_supported_qps)
|
||||
|
||||
#Build actual compilation command out of components
|
||||
command = "bf-p4c --target tofino --arch tna --std p4-16 -g -o $P4_BUILD_DIR/%s/ %s %s && echo Compilation\ finished" %(project, preprocessor_directives, p4_file)
|
||||
|
||||
self.debug("Executing '%s'..." %command)
|
||||
|
||||
ssh.sendline(command)
|
||||
i = ssh.expect(["Compilation finished", "error:", pexpect.TIMEOUT], timeout=180)
|
||||
|
||||
if i == 1:
|
||||
self.error("Compilation error!")
|
||||
elif i == 2:
|
||||
self.error("Compilation timeout!")
|
||||
|
||||
assert i == 0, "Pipeline compilation failed!"
|
||||
|
||||
self.debug("Compilation done!")
|
||||
|
||||
|
||||
def flashPipeline(self):
|
||||
self.ssh_switchd = self.init_ssh()
|
||||
|
||||
#Killing old process (if one is running)
|
||||
self.ssh_switchd.sendline("sudo killall bf_switchd")
|
||||
self.ssh_switchd.expect("$", timeout=4)
|
||||
|
||||
|
||||
#Flash the pipeline
|
||||
self.log("Flashing pipeline %s at %s" %(self.pipeline, self.host))
|
||||
self.ssh_switchd.sendline("./start_p4.sh %s" %self.pipeline)
|
||||
|
||||
i = self.ssh_switchd.expect(["Using SDE_INSTALL", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Failing to initiate pipeline statup!"
|
||||
|
||||
self.debug("Pipeline is flashing...")
|
||||
|
||||
i = self.ssh_switchd.expect(["WARNING: Authorised Access Only", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Failed to flash the pipeline!"
|
||||
self.debug("Pipeline '%s' is now running on host %s!" %(self.pipeline, self.host))
|
||||
|
||||
def initBFshell(self):
|
||||
ssh = self.init_ssh()
|
||||
|
||||
self.debug("Entering bfshell...")
|
||||
ssh.sendline("bfshell")
|
||||
i = ssh.expect(["WARNING: Authorised Access Only", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Failed to enter bfshell!"
|
||||
self.debug("bfshell established!")
|
||||
|
||||
return ssh
|
||||
|
||||
def initUCLI(self):
|
||||
ssh = self.initBFshell()
|
||||
|
||||
self.debug("Entering ucli...")
|
||||
ssh.sendline("ucli")
|
||||
i = ssh.expect(["bf-sde", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Failed to enter ucli!"
|
||||
|
||||
return ssh
|
||||
|
||||
#This assumes that ports are already configured
|
||||
def verifyPorts(self):
|
||||
self.log("Verifying that ports are online...")
|
||||
|
||||
ssh_ucli = self.initUCLI()
|
||||
|
||||
ssh_ucli.sendline("pm")
|
||||
ssh_ucli.expect("bf-sde.pm>", timeout=2)
|
||||
|
||||
for port in self.essential_ports:
|
||||
self.debug("Checking port %s..." %port)
|
||||
|
||||
portUp = False
|
||||
for i in range(10):
|
||||
ssh_ucli.sendline("show %s" %port)
|
||||
i = ssh_ucli.expect([port, pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Port %s was not configured!" %port
|
||||
|
||||
i = ssh_ucli.expect(["UP", "DWN", pexpect.TIMEOUT], timeout=10)
|
||||
|
||||
assert i != 2, "Timeout when checking port status!"
|
||||
if i == 1:
|
||||
self.debug("Port %s is down..." %port)
|
||||
time.sleep(5)
|
||||
continue
|
||||
elif i == 0:
|
||||
self.debug("Port %s is up!" %port)
|
||||
portUp = True
|
||||
break
|
||||
assert portUp, "Port %s did not come alive! Is the host connected and online?" %port
|
||||
|
||||
|
||||
self.debug("Ports are configured and ready for action!")
|
||||
|
||||
#This assumes that a pipeline is already flashed, and a switchd session is running
|
||||
def confPorts(self):
|
||||
self.log("Configuring Tofino ports on %s..." %self.host)
|
||||
|
||||
ssh_ucli = self.initUCLI()
|
||||
|
||||
for cmd in self.port_config:
|
||||
self.debug(" > %s" %cmd)
|
||||
ssh_ucli.sendline(cmd)
|
||||
time.sleep(0.1)
|
||||
|
||||
i = ssh_ucli.expect(["bf-sde.bf_pltfm.led", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Failed to enter port config commands!"
|
||||
|
||||
time.sleep(1) #Give configuration time to trigger
|
||||
self.debug("Ports are now configured.")
|
||||
|
||||
self.verifyPorts()
|
||||
|
||||
def configureNetworking(self):
|
||||
self.log("Configuring networking...")
|
||||
|
||||
ssh = self.init_ssh()
|
||||
|
||||
ssh.sendline("./network_setup.sh")
|
||||
i = ssh.expect(["$", pexpect.TIMEOUT], timeout=10)
|
||||
assert i == 0, "Timeout while running network setup script!"
|
||||
|
||||
#Check an IP assignment
|
||||
ssh.sendline("ifconfig ma1")
|
||||
i = ssh.expect(["192.168.1.91", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "Network interface failed to configure!"
|
||||
|
||||
#Check one of the ARP rules
|
||||
ssh.sendline("arp 192.168.1.87")
|
||||
i = ssh.expect(["08:c0:eb:58:92:89", pexpect.TIMEOUT], timeout=2)
|
||||
assert i == 0, "ARP rules failed to update!"
|
||||
|
||||
self.debug("Networking is set up.")
|
||||
|
||||
def startController(self):
|
||||
self.log("Starting the controller...")
|
||||
|
||||
#TODO: Make this into a parameter or dynamic depending on pipeline
|
||||
#file_script = "/home/jonatan/projects/dta/translator/switch_cpu.py"
|
||||
|
||||
self.ssh_controller = self.init_ssh()
|
||||
|
||||
self.ssh_controller.sendline("$SDE/run_bfshell.sh -b ../wly_experiment/dta_codes/translator/switch_cpu.py -i")
|
||||
|
||||
i = self.ssh_controller.expect(["Using SDE_INSTALL", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "bfshell failed to start!"
|
||||
|
||||
i = self.ssh_controller.expect(["DigProc: Starting", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Controller script failed to start!"
|
||||
self.debug("Controller script is starting...")
|
||||
|
||||
#TODO: add checks that we hear back from the collector RDMA NIC here!
|
||||
|
||||
i = self.ssh_controller.expect(["Inserting KeyWrite rules...", pexpect.TIMEOUT], timeout=5)
|
||||
assert i == 0, "Timeout waiting for keywrite preparation!"
|
||||
self.debug("Controller is configuring KeyWrite...")
|
||||
|
||||
|
||||
numConnections = 0
|
||||
while True:
|
||||
i = self.ssh_controller.expect(["DigProc: Setting up a new RDMA connection from virtual client...", pexpect.TIMEOUT], timeout=10)
|
||||
if i == 0:
|
||||
numConnections += 1
|
||||
self.debug("An RDMA connection is establishing at translator. Total %i" %numConnections)
|
||||
else:
|
||||
break
|
||||
|
||||
self.log("There seems to be %i RDMA connections established at the translator" %numConnections)
|
||||
assert numConnections > 0, "No RDMA connections were detected at the translator!"
|
||||
|
||||
i = self.ssh_controller.expect(["DigProc: Bootstrap complete", pexpect.TIMEOUT], timeout=60)
|
||||
assert i == 0, "Timeout waiting for controller to finish!"
|
||||
self.log("Controller bootstrap finished!")
|
||||
|
||||
def ui_compilePipeline(self):
|
||||
self.log("Menu for compiling Translator pipeline.")
|
||||
|
||||
#enable_nack_tracking
|
||||
resp = input("Enable NACK tracking? (Y/n): ")
|
||||
enable_nack_tracking = resp != "n"
|
||||
|
||||
#num_tracked_nacks
|
||||
num_tracked_nacks = 0
|
||||
if enable_nack_tracking:
|
||||
resp = input("Num tracked NACKs? (Def:65536): ")
|
||||
if resp == "":
|
||||
num_tracked_nacks = 65536
|
||||
else:
|
||||
num_tracked_nacks = int(resp)
|
||||
|
||||
#append_batch_size
|
||||
resp = input("Size of Append batches? (Def:4): ")
|
||||
if resp == "":
|
||||
append_batch_size = 4
|
||||
else:
|
||||
append_batch_size = int(resp)
|
||||
|
||||
#resync_grace_period
|
||||
resp = input("Resync grace period? (Def:100000): ")
|
||||
if resp == "":
|
||||
resync_grace_period = 100000
|
||||
else:
|
||||
resync_grace_period = int(resp)
|
||||
|
||||
#max_supported_qps
|
||||
resp = input("Number of supported QPs? (Def:256): ")
|
||||
if resp == "":
|
||||
max_supported_qps = 256
|
||||
else:
|
||||
max_supported_qps = int(resp)
|
||||
|
||||
self.compilePipeline(enable_nack_tracking=enable_nack_tracking, num_tracked_nacks=num_tracked_nacks, append_batch_size=append_batch_size, resync_grace_period=resync_grace_period, max_supported_qps=max_supported_qps)
|
||||
|
||||
def ui_menu(self):
|
||||
self.log("Entering menu")
|
||||
|
||||
while True:
|
||||
print("1: \t(B)ack")
|
||||
print("2: \t(C)ompile pipeline")
|
||||
print("3: \t(F)lash pipeline")
|
||||
print("4: \tConfigure (p)orts")
|
||||
print("5: \tConfigure (n)etworking")
|
||||
print("6: \t(S)tart controller")
|
||||
print("7: \t(R)eboot")
|
||||
|
||||
option = input("Selection: ").lower()
|
||||
|
||||
if option in ["b", "1"]:
|
||||
break
|
||||
|
||||
if option in ["c","2"]:
|
||||
self.ui_compilePipeline()
|
||||
|
||||
if option in ["f","3"]:
|
||||
self.flashPipeline()
|
||||
|
||||
if option in ["p","4"]:
|
||||
self.confPorts()
|
||||
|
||||
if option in ["n","5"]:
|
||||
self.configureNetworking()
|
||||
|
||||
if option in ["s","6"]:
|
||||
self.startController()
|
||||
|
||||
if option in ["r","7"]:
|
||||
self.reboot()
|
||||
|
Binary file not shown.
44
wly_experiment/dta_codes/manager/common.py
Normal file
44
wly_experiment/dta_codes/manager/common.py
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python3
|
||||
#This contains various helper functions
|
||||
|
||||
import pexpect
|
||||
import time
|
||||
import datetime
|
||||
import re
|
||||
|
||||
def getTime():
|
||||
""" 获取当前时间"""
|
||||
|
||||
return datetime.datetime.now()
|
||||
|
||||
|
||||
def log(text):
|
||||
""" 打印当前的日志信息 """
|
||||
|
||||
timestamp_str = getTime()
|
||||
fulltext = "%s\t %s" %(timestamp_str, text)
|
||||
|
||||
print(fulltext)
|
||||
|
||||
|
||||
#Converting inputs like 966.95kpps or 1MPPS to float with raw PPS
|
||||
def strToPktrate(rate_str):
|
||||
rate_str = rate_str.lower()
|
||||
|
||||
number = float(re.findall("[0-9]+\.[0-9]+|[0-9]+", rate_str)[0])
|
||||
|
||||
order = str(re.findall("[km]?pps", rate_str)[0])
|
||||
|
||||
#print(number)
|
||||
#print(order)
|
||||
#print()
|
||||
|
||||
if order == "pps":
|
||||
return number
|
||||
elif order == "kpps":
|
||||
return number*1000
|
||||
elif order == "mpps":
|
||||
return number*1000000
|
||||
|
||||
|
||||
return None
|
BIN
wly_experiment/dta_codes/translator/.pktgen.py.swp
Normal file
BIN
wly_experiment/dta_codes/translator/.pktgen.py.swp
Normal file
Binary file not shown.
18
wly_experiment/dta_codes/translator/README.md
Normal file
18
wly_experiment/dta_codes/translator/README.md
Normal file
@ -0,0 +1,18 @@
|
||||
# DTA - Translator
|
||||
该目录包含 DTA Translator 组件的代码
|
||||
|
||||
## Prerequisites
|
||||
您需要一个已完全安装并可运行的 Tofino 交换机
|
||||
请按照资源库根目录中的设置指南进行操作
|
||||
|
||||
## Setup
|
||||
1. 编译 Translator 的管道代码 [here](p4src/dta_translator.p4)
|
||||
2. 更新 switch_cpu.py 中的端口映射,使其与物理连接情况相匹配
|
||||
3. 更新 init_rdma_connection.py 中的初始 RDMA 数据包
|
||||
4. ...
|
||||
|
||||
## Runtime
|
||||
1. 在 Tofino ASIC 上启动已编译的管道
|
||||
2. 配置端口
|
||||
3. 启动交换机上的 CPU 组件 `$SDE/run_bfshell.sh -b <script> -i` (replace `<script>` with the path to [switch_cpu.py](switch_cpu.py), e.g., `~/dta_codes/translator/switch_cpu.py`)
|
||||
4. ...
|
446
wly_experiment/dta_codes/translator/init_rdma_connection.py
Normal file
446
wly_experiment/dta_codes/translator/init_rdma_connection.py
Normal file
@ -0,0 +1,446 @@
|
||||
#!/usr/bin/env python3
|
||||
# Written by Jonatan Langlet for Direct Telemetry Access
|
||||
#
|
||||
# Notes for those who are here to modify the file:
|
||||
#
|
||||
# Hard-coded magic numbers in here need to be verified if used with another NIC
|
||||
# What I did originally was establish a normal RDMA connection between two NICs and sniffed this connection to figure out the packet contents
|
||||
# You can not just tcpdump this though (since RDMA packets are handled at the NIC without entering the host machine), so you need to dump this in-transit. I used the Tofino to mirror packets to CPU (but any middlebox should work).
|
||||
# These values worked for my Mellanox Bluefield-2 DPU. You need to at least update the destination MAC address if you use another BF2 (here and in P4) (and likely more if you use another Mellanox NIC, and tons if you use non-mellanox).
|
||||
#
|
||||
# I wish you luck.
|
||||
|
||||
#from scapy.all import send, IP, ICMP
|
||||
from scapy.all import *
|
||||
import random
|
||||
import sys
|
||||
import binascii
|
||||
import struct
|
||||
import argparse
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Initiate an RDMA connection with the collector, and write metadata to disk.')
|
||||
parser.add_argument('--port', type=int, default=1337, help='The TCP port to the collector RDMA_CM')
|
||||
parser.add_argument('--dir', type=str, default="/root/RDMA/DTA/Translator/rdma_metadata", help='The directory where to store the RDMA connection metadata')
|
||||
args = parser.parse_args()
|
||||
|
||||
class rocev2_bth(Packet):
|
||||
name = "BTH"
|
||||
fields_desc = [
|
||||
XByteField("opcode", 100),
|
||||
BitField("solicited", 0, 1),
|
||||
BitField("migreq", 1, 1),
|
||||
BitField("padcount", 0, 2),
|
||||
BitField("version", 0, 4),
|
||||
XShortField("pkey", 0xffff),
|
||||
BitField("fecn", 0, 1),
|
||||
BitField("becn", 0, 1),
|
||||
BitField("resv6", 0, 6),
|
||||
BitField("destQP", 1, 24),
|
||||
BitField("ackreq", 0, 1),
|
||||
BitField("resv7", 0, 7),
|
||||
BitField("psn", 52, 24)
|
||||
]
|
||||
|
||||
class rocev2_deth(Packet):
|
||||
name = "DETH"
|
||||
fields_desc = [
|
||||
IntField("queueKey", 0x80010000),
|
||||
ByteField("reserved", 0x00),
|
||||
XBitField("sourceQP", 0x000001, 24)
|
||||
]
|
||||
|
||||
class rocev2_mad(Packet):
|
||||
name = "MAD"
|
||||
fields_desc = [
|
||||
XIntField("part1", 0x01070203),
|
||||
XIntField("part2", 0x0),
|
||||
XIntField("part3", 0x00000006),
|
||||
XIntField("part4", 0x7c313d63),
|
||||
XIntField("part5", 0x00100000),
|
||||
XIntField("part6", 0x30000000),
|
||||
]
|
||||
|
||||
class rocev2_connectRequest(Packet):
|
||||
name = "connectRequest"
|
||||
fields_desc = [
|
||||
XIntField("lComID", 0x633d317c),
|
||||
XIntField("part2", 0x000015b3),
|
||||
XIntField("part3", 0x0),
|
||||
BitField("part4_0", 0x0106, 16),
|
||||
BitField("dstPort", 0x0539, 16), # default: 1337
|
||||
#XIntField("part4", 0x01060539),
|
||||
XIntField("part5", 0xb8cef603),
|
||||
XIntField("part6", 0x00d21326),
|
||||
XIntField("part7", 0x0),
|
||||
XIntField("part8", 0x0),
|
||||
#XIntField("part9", 0x0011b903),
|
||||
XIntField("sourceQP", 0x0011b903), # default: 0x0011b903
|
||||
XIntField("part10", 0x00000003),
|
||||
XIntField("part11", 0x000000b0),
|
||||
XIntField("part12", 0xe6fb20b3),
|
||||
XIntField("part13", 0xffff30f0),
|
||||
XIntField("part14", 0xffffffff),
|
||||
XIntField("part15", 0x0),
|
||||
XIntField("part16", 0x0),
|
||||
XIntField("part17", 0x0000ffff),
|
||||
XIntField("srcIP1", 0xc0a8fd82), # 192.168.1.91
|
||||
XIntField("part19", 0x0),
|
||||
XIntField("part20", 0x0),
|
||||
XIntField("part21", 0x0000ffff),
|
||||
XIntField("dstIP1", 0xc0a80403), # 192.168.4.3
|
||||
XIntField("part23", 0x943e0007),
|
||||
XIntField("part24", 0x00400098),
|
||||
XIntField("part25", 0x0),
|
||||
XIntField("part26", 0x0),
|
||||
XIntField("part27", 0x0),
|
||||
XIntField("part28", 0x0),
|
||||
XIntField("part29", 0x0),
|
||||
XIntField("part30", 0x0),
|
||||
XIntField("part31", 0x0),
|
||||
XIntField("part32", 0x0),
|
||||
XIntField("part33", 0x0),
|
||||
XIntField("part34", 0x0),
|
||||
XIntField("part35", 0x0),
|
||||
XIntField("part36", 0x0040d079),
|
||||
XIntField("part37", 0x0),
|
||||
XIntField("part38", 0x0),
|
||||
XIntField("part39", 0x0),
|
||||
XIntField("srcIP2", 0xc0a8fd82), # Source IP again
|
||||
XIntField("part41", 0x0),
|
||||
XIntField("part42", 0x0),
|
||||
XIntField("part43", 0x0),
|
||||
XIntField("dstIP2", 0xc0a80403), # Destination Ip again
|
||||
XIntField("part45", 0x0),
|
||||
XIntField("part46", 0x0),
|
||||
XIntField("part47", 0x0),
|
||||
XIntField("part48", 0x0),
|
||||
XIntField("part49", 0x0),
|
||||
XIntField("part50", 0x0),
|
||||
XIntField("part51", 0x0),
|
||||
XIntField("part52", 0x0),
|
||||
XIntField("part53", 0x0),
|
||||
XIntField("part54", 0x0),
|
||||
XIntField("part55", 0x0),
|
||||
XIntField("part56", 0x0),
|
||||
XIntField("part57", 0x0),
|
||||
XIntField("part58", 0x0),
|
||||
|
||||
|
||||
]
|
||||
|
||||
class rocev2_readyToUse(Packet):
|
||||
name = "readyToUse"
|
||||
fields_desc = [
|
||||
XIntField("lComID", 0x633d317c), #same as last packet
|
||||
XIntField("rComID", 0x0), #0x1ff69dfd in dumped packet. Has to be according to ConnectReply
|
||||
XIntField("part3", 0x0),
|
||||
XIntField("part4", 0x0),
|
||||
XIntField("part5", 0x0),
|
||||
XIntField("part6", 0x0),
|
||||
XIntField("part7", 0x0),
|
||||
XIntField("part8", 0x0),
|
||||
XIntField("part9", 0x0),
|
||||
XIntField("part10", 0x0),
|
||||
XIntField("part11", 0x0),
|
||||
XIntField("part12", 0x0),
|
||||
XIntField("part13", 0x0),
|
||||
XIntField("part14", 0x0),
|
||||
XIntField("part15", 0x0),
|
||||
XIntField("part16", 0x0),
|
||||
XIntField("part17", 0x0),
|
||||
XIntField("part18", 0x0),
|
||||
XIntField("part19", 0x0),
|
||||
XIntField("part20", 0x0),
|
||||
XIntField("part21", 0x0),
|
||||
XIntField("part22", 0x0),
|
||||
XIntField("part23", 0x0),
|
||||
XIntField("part24", 0x0),
|
||||
XIntField("part25", 0x0),
|
||||
XIntField("part26", 0x0),
|
||||
XIntField("part27", 0x0),
|
||||
XIntField("part28", 0x0),
|
||||
XIntField("part29", 0x0),
|
||||
XIntField("part30", 0x0),
|
||||
XIntField("part31", 0x0),
|
||||
XIntField("part32", 0x0),
|
||||
XIntField("part33", 0x0),
|
||||
XIntField("part34", 0x0),
|
||||
XIntField("part35", 0x0),
|
||||
XIntField("part36", 0x0),
|
||||
XIntField("part37", 0x0),
|
||||
XIntField("part38", 0x0),
|
||||
XIntField("part39", 0x0),
|
||||
XIntField("part40", 0x0),
|
||||
XIntField("part41", 0x0),
|
||||
XIntField("part42", 0x0),
|
||||
XIntField("part43", 0x0),
|
||||
XIntField("part44", 0x0),
|
||||
XIntField("part45", 0x0),
|
||||
XIntField("part46", 0x0),
|
||||
XIntField("part47", 0x0),
|
||||
XIntField("part48", 0x0),
|
||||
XIntField("part49", 0x0),
|
||||
XIntField("part50", 0x0),
|
||||
XIntField("part51", 0x0),
|
||||
XIntField("part52", 0x0),
|
||||
XIntField("part53", 0x0),
|
||||
XIntField("part54", 0x0),
|
||||
XIntField("part55", 0x0),
|
||||
XIntField("part56", 0x0),
|
||||
XIntField("part57", 0x0),
|
||||
XIntField("part58", 0x0),
|
||||
]
|
||||
|
||||
class rocev2_aeth(Packet):
|
||||
name = "AETH"
|
||||
fields_desc = [
|
||||
ByteField("reserved", 0x00),
|
||||
XBitField("msgSeqNum", 0x00, 24),
|
||||
]
|
||||
|
||||
class rocev2_icrc(Packet):
|
||||
name = "iCRC"
|
||||
fields_desc = [
|
||||
XIntField("iCRC", 0)
|
||||
]
|
||||
|
||||
|
||||
def craft_ConnectRequest():
|
||||
sport = 10000
|
||||
srcMac = "6c:ec:5a:62:39:c8"
|
||||
dstMac = "08:c0:eb:58:92:89"
|
||||
pktID = 0x2c70 # stolen from cloned traffic
|
||||
psn = 100
|
||||
dport = int(args.port) # will also be the advertised source QP, to ensure consistent and unique
|
||||
#sport=dport
|
||||
#psn=dport
|
||||
#pktID=dport
|
||||
lComID=dport
|
||||
#sourceQP=dport<<16 #advertise the client QP to be equal to the collector port (just to ensure unique)
|
||||
sourceQP=dport<<8 #fix bitshift, should be correct now
|
||||
|
||||
pkt = Ether(src=srcMac,dst=dstMac)\
|
||||
/IP(src="192.168.1.91",dst="192.168.4.3",id=pktID,flags="DF")\
|
||||
/UDP(dport=dport,sport=sport)\
|
||||
/rocev2_bth(psn=psn)\
|
||||
/rocev2_deth(sourceQP=dport)\
|
||||
/rocev2_mad()\
|
||||
/rocev2_connectRequest(dstPort=dport,sourceQP=sourceQP,lComID=lComID)\
|
||||
/rocev2_icrc()
|
||||
|
||||
return pkt
|
||||
|
||||
def craft_ReadyToUse(rComID):
|
||||
sport = 10000
|
||||
srcMac = "6c:ec:5a:62:39:c8"
|
||||
dstMac = "08:c0:eb:58:92:89"
|
||||
pktID = 0x2c70 #stolen from cloned traffic
|
||||
psn = 100
|
||||
dport = int(args.port) #will also be the advertised source QP, to ensure consistent and unique
|
||||
#sport=dport
|
||||
#psn=dport
|
||||
#pktID=dport
|
||||
lComID=dport
|
||||
|
||||
pkt = Ether(src=srcMac,dst=dstMac)\
|
||||
/IP(src="192.168.1.91",dst="192.168.4.3",id=pktID,flags="DF")\
|
||||
/UDP(dport=dport,sport=sport)\
|
||||
/rocev2_bth(psn=psn)\
|
||||
/rocev2_deth(sourceQP=dport)\
|
||||
/rocev2_mad(part5=0x00140000)\
|
||||
/rocev2_readyToUse(rComID=rComID,lComID=lComID)\
|
||||
/rocev2_icrc()
|
||||
|
||||
return pkt
|
||||
|
||||
def craft_ack(msgSeqNum,qpNum):
|
||||
sport = 10000
|
||||
srcMac = "6c:ec:5a:62:39:c8"
|
||||
dstMac = "08:c0:eb:58:92:89"
|
||||
pktID = 0x2c70 #stolen from cloned traffic
|
||||
psn = 100
|
||||
dport = int(args.port) #will also be the advertised source QP, to ensure consistent and unique
|
||||
#sport=dport
|
||||
#psn=dport
|
||||
#pktID=dport
|
||||
|
||||
pkt = Ether(src=srcMac,dst=dstMac)\
|
||||
/IP(src="192.168.1.91",dst="192.168.4.3",id=pktID,flags="DF")\
|
||||
/UDP(dport=dport,sport=sport)\
|
||||
/rocev2_bth(opcode=0x11,psn=psn,destQP=qpNum)\
|
||||
/rocev2_aeth(msgSeqNum=msgSeqNum)\
|
||||
/rocev2_icrc()
|
||||
|
||||
return pkt
|
||||
|
||||
def process_connectReply(packet):
|
||||
#Manually extract the RoCEv2 header from the packet
|
||||
pkt_bytes = binascii.hexlify(bytes(packet[UDP].payload))
|
||||
bth_bytes = pkt_bytes[0:24] #BTH header size 12B (correct)
|
||||
deth_bytes = pkt_bytes[24:40] #DETH header size 8B (correct)
|
||||
mad_bytes = pkt_bytes[40:88] #MAD header size 23B (correct)
|
||||
CM_connectreply_bytes = pkt_bytes[88:552] #CM ConnectReply header size 231B (correct)
|
||||
icrc_bytes = pkt_bytes[552:560] #iCRC header size 4B (correct)
|
||||
|
||||
print("bth_bytes", bth_bytes)
|
||||
print("deth_bytes", deth_bytes)
|
||||
print("mad_bytes", mad_bytes)
|
||||
print("CM_connectreply_bytes", CM_connectreply_bytes)
|
||||
print("icrc_bytes", icrc_bytes)
|
||||
|
||||
|
||||
#Extract essential values from the packet
|
||||
lComID_offset = 0
|
||||
lComID_size = 8
|
||||
lComID = int(CM_connectreply_bytes[lComID_offset:lComID_offset+lComID_size],16)
|
||||
|
||||
qpNum_offset = 8+8+8
|
||||
qpNum_size = 6
|
||||
qpNum = int(CM_connectreply_bytes[qpNum_offset:qpNum_offset+qpNum_size],16)
|
||||
|
||||
psn_offset = qpNum_offset+6+10
|
||||
psn_size = 6
|
||||
psn = int(CM_connectreply_bytes[psn_offset:psn_offset+psn_size],16)
|
||||
|
||||
return lComID,qpNum,psn
|
||||
|
||||
def process_sendAllocatedBuffer(packet):
|
||||
print("Processing server metadata packet")
|
||||
|
||||
#Manually extract the RoCEv2 header from the packet
|
||||
pkt_bytes = binascii.hexlify(bytes(packet[UDP].payload))
|
||||
bth_bytes = pkt_bytes[0:24] #BTH header size 12B (correct)
|
||||
payload_bytes = pkt_bytes[24:56] #payload size 16B (correct)
|
||||
icrc_bytes = pkt_bytes[56:64] #iCRC header size 4B (correct)
|
||||
|
||||
print("pkt_bytes", pkt_bytes)
|
||||
print("bth_bytes", bth_bytes)
|
||||
print("payload_bytes", payload_bytes)
|
||||
print("icrc_bytes", icrc_bytes)
|
||||
|
||||
#Fix address endianess and extract server-allocated memory buffer address
|
||||
addr1 = payload_bytes[0:2]
|
||||
addr2 = payload_bytes[2:4]
|
||||
addr3 = payload_bytes[4:6]
|
||||
addr4 = payload_bytes[6:8]
|
||||
addr5 = payload_bytes[8:10]
|
||||
addr6 = payload_bytes[10:12]
|
||||
addr7 = payload_bytes[12:14]
|
||||
addr8 = payload_bytes[14:16]
|
||||
memory_start = addr8+addr7+addr6+addr5+addr4+addr3+addr2+addr1
|
||||
print("memory_start:", hex(int(memory_start,16)))
|
||||
|
||||
len1 = payload_bytes[16:18]
|
||||
len2 = payload_bytes[18:20]
|
||||
len3 = payload_bytes[20:22]
|
||||
len4 = payload_bytes[22:24]
|
||||
memory_length = len4+len3+len2+len1
|
||||
print("memory_length:", hex(int(memory_length,16)))
|
||||
|
||||
rkey1 = payload_bytes[24:26]
|
||||
rkey2 = payload_bytes[26:28]
|
||||
rkey3 = payload_bytes[28:30]
|
||||
rkey4 = payload_bytes[30:32]
|
||||
remote_key = rkey4+rkey3+rkey2+rkey1
|
||||
print("remote_key:", hex(int(remote_key,16)))
|
||||
|
||||
return int(memory_start,16), int(memory_length,16), int(remote_key,16)
|
||||
|
||||
|
||||
|
||||
num_processed_rocev2 = 0
|
||||
qpNum = 0
|
||||
psn = 0
|
||||
memory_start = 0
|
||||
memory_length = 0
|
||||
remote_key = 0
|
||||
|
||||
def informOfMetadata():
|
||||
print("The QP number is: %i" %qpNum)
|
||||
print("The initial PSN is: %i" %psn)
|
||||
print("Memory start address in collector: %u" %memory_start)
|
||||
print("Memory length in collector: %u" %memory_length)
|
||||
print("The remote key is: %u" %remote_key)
|
||||
|
||||
path = args.dir
|
||||
#create the path
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except:
|
||||
pass
|
||||
|
||||
print("Writing RDMA connection metadata to %s" %path)
|
||||
|
||||
f = open("%s/tmp_qpnum"%path, "w")
|
||||
f.write(str(qpNum))
|
||||
f.close()
|
||||
|
||||
f = open("%s/tmp_psn"%path, "w")
|
||||
f.write(str(psn))
|
||||
f.close()
|
||||
|
||||
f = open("%s/tmp_memaddr"%path, "w")
|
||||
f.write(str(memory_start))
|
||||
f.close()
|
||||
|
||||
f = open("%s/tmp_memlen"%path, "w")
|
||||
f.write(str(memory_length))
|
||||
f.close()
|
||||
|
||||
f = open("%s/tmp_rkey"%path, "w")
|
||||
f.write(str(remote_key))
|
||||
f.close()
|
||||
|
||||
def process_rocev2(packet):
|
||||
global num_processed_rocev2,qpNum,psn,memory_start,remote_key,memory_length
|
||||
|
||||
num_processed_rocev2 += 1
|
||||
print("num_processed_rocev2", num_processed_rocev2)
|
||||
|
||||
print(binascii.hexlify(bytes(packet[UDP].payload)))
|
||||
|
||||
#If this is the first rocev2 packet, must be ConnectReply
|
||||
if num_processed_rocev2 == 1:
|
||||
lComID,qpNum,psn = process_connectReply(packet)
|
||||
|
||||
reply = craft_ReadyToUse(lComID)
|
||||
|
||||
sendp(reply, iface="ma1")
|
||||
elif num_processed_rocev2 == 2:
|
||||
memory_start,memory_length,remote_key = process_sendAllocatedBuffer(packet)
|
||||
|
||||
informOfMetadata()
|
||||
|
||||
#Send back an ack, and we're done!
|
||||
pkt_ack = craft_ack(num_processed_rocev2,qpNum)
|
||||
sendp(pkt_ack, iface="ma1")
|
||||
|
||||
else:
|
||||
print("Random roce packet. Ignoring")
|
||||
pkt_ack = craft_ack(num_processed_rocev2,qpNum)
|
||||
#sendp(pkt_ack, iface="ma1")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
#Start sniffing for incoming RoCEv2 traffic
|
||||
sniffer = AsyncSniffer(filter="inbound and udp", iface='ma1', prn=process_rocev2)
|
||||
|
||||
#Create a connection request packet
|
||||
pkt = craft_ConnectRequest()
|
||||
|
||||
|
||||
print("Starting sniffer...")
|
||||
sniffer.start()
|
||||
|
||||
print("Waiting 1 sec before continuing")
|
||||
time.sleep(1)
|
||||
|
||||
print("Sending packet", pkt)
|
||||
sendp(pkt, iface="ma1")
|
||||
|
||||
print("Waiting 10 sec to allow setup to complete")
|
||||
time.sleep(10)
|
||||
|
||||
sniffer.stop()
|
178
wly_experiment/dta_codes/translator/inject_dta.py
Normal file
178
wly_experiment/dta_codes/translator/inject_dta.py
Normal file
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env python3
|
||||
# Written by Jonatan Langlet for Direct Telemetry Access
|
||||
# 用于向 Translator 注入合成的 DTA Report 数据包,但是速度太慢,不太适合基准测试,但比较适合测试转换能力
|
||||
|
||||
#from scapy.all import send, IP, ICMP
|
||||
from scapy.all import *
|
||||
import random
|
||||
import sys
|
||||
import binascii
|
||||
import struct
|
||||
import argparse
|
||||
import time
|
||||
|
||||
parser = argparse.ArgumentParser(description='Inject a DTA Key-Write packet into the Tofino ASIC.')
|
||||
parser.add_argument('operation', type=str, nargs='+', choices=["keywrite", "append"], help='The DTA operation')
|
||||
parser.add_argument('--data', type=int, nargs='+', help='The telemetry data')
|
||||
parser.add_argument('--key', type=int, nargs='+', help='The telemetry key for KeyWrite operations')
|
||||
parser.add_argument('--redundancy', type=int, nargs='+', help='The telemetry redundancy for KeyWrite operations')
|
||||
parser.add_argument('--listID', type=int, nargs='+', help='The telemetry list ID for Append operations')
|
||||
parser.add_argument('--loop', action='store_true', help='Indicates that the script should loop, generating traffic continuously')
|
||||
parser.add_argument('--increment_data', action='store_true', help='Indicates that the data value should increment, if looping is enabled')
|
||||
parser.add_argument('--increment_key', action='store_true', help='Indicates that the key-write key should increment, if looping is enabled')
|
||||
parser.add_argument('--ipg', type=float, default=0.0, help='The IPG to replay traffic at, if emitting multiple packets simultaneously')
|
||||
parser.add_argument('--batchsize', type=int, default=1, help='The batch size to use when --loop is enabled')
|
||||
|
||||
#args = vars(parser.parse_args())
|
||||
args = parser.parse_args()
|
||||
print(args)
|
||||
|
||||
|
||||
class dta_base(Packet):
|
||||
name = "dtaBase"
|
||||
fields_desc = [
|
||||
XByteField("opcode", 0x01),
|
||||
XByteField("seqnum", 0), # DTA sequence number
|
||||
BitField("immediate", 0, 1),
|
||||
BitField("retransmitable", 0, 1),
|
||||
BitField("reserved", 0, 6)
|
||||
]
|
||||
|
||||
class dta_keyWrite(Packet):
|
||||
name = "dtaKeyWrite"
|
||||
fields_desc = [
|
||||
ByteField("redundancy", 0x02),
|
||||
# IntField("key", 0),
|
||||
IntField("key", 0),
|
||||
IntField("data", 0)
|
||||
]
|
||||
|
||||
class dta_append(Packet):
|
||||
name = "dtaAppend"
|
||||
fields_desc = [
|
||||
IntField("listID", 0),
|
||||
IntField("data", 0)
|
||||
]
|
||||
|
||||
def craft_dta_keywrite(key, data, redundancy):
|
||||
''' 使用 key, data, redundancy 创建一个 DTA Key-Write 数据包 '''
|
||||
|
||||
print("Crafting a keywrite packet with key:%i data:%i, redundancy:%i" %(key,data,redundancy))
|
||||
|
||||
key_bin = struct.pack(">I", key)
|
||||
|
||||
pkt = Ether(dst="08:c0:eb:58:92:89")\
|
||||
/IP(src="192.168.1.91", dst="192.168.4.3")\
|
||||
/UDP(sport=40041, dport=40040)\
|
||||
/dta_base(opcode=0x01)\
|
||||
/dta_keyWrite(redundancy=redundancy, key=RawVal(key_bin), data=data)
|
||||
#/dta_keyWrite(redundancy=redundancy, key=key, data=data)
|
||||
|
||||
return pkt
|
||||
|
||||
|
||||
def craft_dta_append(listID, data):
|
||||
''' 使用 listID, data 创建一个 DTA Append 数据包 '''
|
||||
|
||||
print("Crafting an append packet with listID:%i data:%i" %(listID,data))
|
||||
|
||||
pkt = Ether(dst="08:c0:eb:58:92:89")\
|
||||
/IP(src="192.168.1.91", dst="192.168.4.3")\
|
||||
/UDP(sport=40041, dport=40040)\
|
||||
/dta_base(opcode=0x02)\
|
||||
/dta_append(listID=listID, data=data)
|
||||
|
||||
return pkt
|
||||
|
||||
|
||||
def emitPacket(pkts):
|
||||
''' 以 IPG(Inter Packet Gap)(如果定义了的话) 为时间间隔来发送传入的数据包 '''
|
||||
|
||||
ipg = args.ipg
|
||||
print("Sending %i DTA packet(s) at ipg:%.3f" % (len(pkts), ipg))
|
||||
sendp(pkts, inter=ipg, iface="ma1")
|
||||
|
||||
|
||||
# DTA Key-Write 数据包测试
|
||||
if args.operation[0] == "keywrite":
|
||||
print("Crafting a DTA KeyWrite packet...")
|
||||
|
||||
assert args.key, "No telemetry key specified!"
|
||||
assert args.redundancy, "No telemetry redundancy specified!"
|
||||
assert args.data, "No telemetry data specified!"
|
||||
|
||||
# 首先传入 Key-Write 数据包制作所需的参数
|
||||
key = args.key[0]
|
||||
data = args.data[0]
|
||||
redundancy = args.redundancy[0]
|
||||
doLoop = args.loop
|
||||
increment_data = args.increment_data
|
||||
increment_key = args.increment_key
|
||||
|
||||
# 如果启用循环的话,则持续递增,用于一次性的可靠性测试
|
||||
if doLoop:
|
||||
# 单个批次中元素的数量
|
||||
batchSize = args.batchsize
|
||||
print("Looping enabled with batchsize %i" % batchSize)
|
||||
|
||||
# 依次制作 batchSize 个 Key-Write 数据包并一起发送
|
||||
pkts = []
|
||||
while True:
|
||||
pkt = craft_dta_keywrite(key=key, data=data, redundancy=redundancy)
|
||||
|
||||
pkts.append(pkt)
|
||||
|
||||
if len(pkts) >= batchSize:
|
||||
emitPacket(pkts)
|
||||
pkts = []
|
||||
print("sleeping 1 sec before next batch...")
|
||||
time.sleep(1)
|
||||
|
||||
# 对 key 和 data 进行依次递增操作
|
||||
if increment_key:
|
||||
key = key + 1
|
||||
if increment_data:
|
||||
data = data + 1
|
||||
print("Incremented key:%i and data:%i" %(key,data))
|
||||
# 这是默认功能,用于制作和发送单个数据包
|
||||
else:
|
||||
print("Looping disabled, sending single packet")
|
||||
pkt = craft_dta_keywrite(key=key, data=data, redundancy=redundancy)
|
||||
emitPacket(pkt)
|
||||
|
||||
# DTA Append 数据包测试
|
||||
if args.operation[0] == "append":
|
||||
print("Crafting a DTA Append packet...")
|
||||
|
||||
assert args.listID, "No telemetry list ID specified!"
|
||||
assert args.data, "No telemetry data specified!"
|
||||
|
||||
# 首先传入 Append 数据包制作所需的参数
|
||||
doLoop = args.loop
|
||||
listID = args.listID[0]
|
||||
data = args.data[0]
|
||||
increment_data = args.increment_data
|
||||
|
||||
# 如果启用循环的话,则持续递增,用于一次性的可靠性测试
|
||||
if doLoop:
|
||||
batchSize = args.batchsize
|
||||
print("Looping enabled with batchsize %i" %batchSize)
|
||||
|
||||
pkts = []
|
||||
while True:
|
||||
pkt = craft_dta_append(listID=listID, data=data)
|
||||
|
||||
pkts.append(pkt)
|
||||
|
||||
if len(pkts) >= batchSize:
|
||||
emitPacket(pkts)
|
||||
pkts = []
|
||||
|
||||
if increment_data:
|
||||
data = data + 1
|
||||
print("Incremented data:%i" %(data))
|
||||
# 这是默认功能,用于制作和发送单个数据包
|
||||
else:
|
||||
pkt = craft_dta_append(listID=listID, data=data)
|
||||
emitPacket(pkt)
|
||||
|
86
wly_experiment/dta_codes/translator/l3_setup.py
Normal file
86
wly_experiment/dta_codes/translator/l3_setup.py
Normal file
@ -0,0 +1,86 @@
|
||||
from ipaddress import ip_address
|
||||
|
||||
p4 = bfrt.simple_l3.pipe
|
||||
|
||||
# This function can clear all the tables and later on other fixed objects
|
||||
# once bfrt support is added.
|
||||
def clear_all(verbose=True, batching=True):
|
||||
global p4
|
||||
global bfrt
|
||||
|
||||
def _clear(table, verbose=False, batching=False):
|
||||
if verbose:
|
||||
print("Clearing table {:<40} ... ".
|
||||
format(table['full_name']), end='', flush=True)
|
||||
try:
|
||||
entries = table['node'].get(regex=True, print_ents=False)
|
||||
try:
|
||||
if batching:
|
||||
bfrt.batch_begin()
|
||||
for entry in entries:
|
||||
entry.remove()
|
||||
except Exception as e:
|
||||
print("Problem clearing table {}: {}".format(
|
||||
table['name'], e.sts))
|
||||
finally:
|
||||
if batching:
|
||||
bfrt.batch_end()
|
||||
except Exception as e:
|
||||
if e.sts == 6:
|
||||
if verbose:
|
||||
print('(Empty) ', end='')
|
||||
finally:
|
||||
if verbose:
|
||||
print('Done')
|
||||
|
||||
# Optionally reset the default action, but not all tables
|
||||
# have that
|
||||
try:
|
||||
table['node'].reset_default()
|
||||
except:
|
||||
pass
|
||||
|
||||
# The order is important. We do want to clear from the top, i.e.
|
||||
# delete objects that use other objects, e.g. table entries use
|
||||
# selector groups and selector groups use action profile members
|
||||
|
||||
|
||||
# Clear Match Tables
|
||||
for table in p4.info(return_info=True, print_info=False):
|
||||
if table['type'] in ['MATCH_DIRECT', 'MATCH_INDIRECT_SELECTOR']:
|
||||
_clear(table, verbose=verbose, batching=batching)
|
||||
|
||||
# Clear Selectors
|
||||
for table in p4.info(return_info=True, print_info=False):
|
||||
if table['type'] in ['SELECTOR']:
|
||||
_clear(table, verbose=verbose, batching=batching)
|
||||
|
||||
# Clear Action Profiles
|
||||
for table in p4.info(return_info=True, print_info=False):
|
||||
if table['type'] in ['ACTION_PROFILE']:
|
||||
_clear(table, verbose=verbose, batching=batching)
|
||||
|
||||
#clear_all()
|
||||
|
||||
ipv4_host = p4.Ingress.ipv4_host
|
||||
ipv4_host.add_with_send(dst_addr=ip_address('192.168.4.3'), port=180)
|
||||
ipv4_host.add_with_send(dst_addr=ip_address('192.168.3.3'), port=148)
|
||||
ipv4_host.add_with_send(dst_addr=ip_address('192.168.1.91'), port=64)
|
||||
|
||||
# mac_host = p4.Ingress.mac_host
|
||||
# mac_host.add_with_send(dst_addr='08:c0:eb:58:92:89', port=180)
|
||||
# mac_host.add_with_send(dst_addr='08:c0:eb:e3:b8:52', port=148)
|
||||
|
||||
|
||||
bfrt.complete_operations()
|
||||
|
||||
# Final programming
|
||||
print("""
|
||||
******************* PROGAMMING RESULTS *****************
|
||||
""")
|
||||
print ("Table ipv4_host:")
|
||||
ipv4_host.dump(table=True)
|
||||
# print ("Table mac_host:")
|
||||
# mac_host.dump(table=True)
|
||||
|
||||
|
2083
wly_experiment/dta_codes/translator/p4src/dta_translator.p4
Normal file
2083
wly_experiment/dta_codes/translator/p4src/dta_translator.p4
Normal file
File diff suppressed because it is too large
Load Diff
257
wly_experiment/dta_codes/translator/p4src/simple_l3.p4
Normal file
257
wly_experiment/dta_codes/translator/p4src/simple_l3.p4
Normal file
@ -0,0 +1,257 @@
|
||||
/* -*- P4_16 -*- */
|
||||
|
||||
#include <core.p4>
|
||||
#include <tna.p4>
|
||||
|
||||
/*************************************************************************
|
||||
************* C O N S T A N T S A N D T Y P E S *******************
|
||||
**************************************************************************/
|
||||
const bit<16> ETHERTYPE_TPID = 0x8100;
|
||||
const bit<16> ETHERTYPE_IPV4 = 0x0800;
|
||||
|
||||
/* Table Sizes */
|
||||
const int IPV4_HOST_SIZE = 65536;
|
||||
|
||||
#ifdef USE_ALPM
|
||||
const int MAC_SIZE = 400*1024;
|
||||
#else
|
||||
const int MAC_SIZE = 12288;
|
||||
#endif
|
||||
|
||||
/*************************************************************************
|
||||
*********************** H E A D E R S *********************************
|
||||
*************************************************************************/
|
||||
|
||||
/* Define all the headers the program will recognize */
|
||||
/* The actual sets of headers processed by each gress can differ */
|
||||
|
||||
/* Standard ethernet header */
|
||||
header ethernet_h {
|
||||
bit<48> dst_addr;
|
||||
bit<48> src_addr;
|
||||
bit<16> ether_type;
|
||||
}
|
||||
|
||||
header vlan_tag_h {
|
||||
bit<3> pcp;
|
||||
bit<1> cfi;
|
||||
bit<12> vid;
|
||||
bit<16> ether_type;
|
||||
}
|
||||
|
||||
header ipv4_h {
|
||||
bit<4> version;
|
||||
bit<4> ihl;
|
||||
bit<8> diffserv;
|
||||
bit<16> total_len;
|
||||
bit<16> identification;
|
||||
bit<3> flags;
|
||||
bit<13> frag_offset;
|
||||
bit<8> ttl;
|
||||
bit<8> protocol;
|
||||
bit<16> hdr_checksum;
|
||||
bit<32> src_addr;
|
||||
bit<32> dst_addr;
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
************** I N G R E S S P R O C E S S I N G *******************
|
||||
*************************************************************************/
|
||||
|
||||
/*********************** H E A D E R S ************************/
|
||||
|
||||
struct my_ingress_headers_t {
|
||||
ethernet_h ethernet;
|
||||
vlan_tag_h vlan_tag;
|
||||
ipv4_h ipv4;
|
||||
}
|
||||
|
||||
/****** G L O B A L I N G R E S S M E T A D A T A *********/
|
||||
|
||||
struct my_ingress_metadata_t {
|
||||
}
|
||||
|
||||
/*********************** P A R S E R **************************/
|
||||
parser IngressParser(packet_in pkt,
|
||||
/* User */
|
||||
out my_ingress_headers_t hdr,
|
||||
out my_ingress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
out ingress_intrinsic_metadata_t ig_intr_md)
|
||||
{
|
||||
/* This is a mandatory state, required by Tofino Architecture */
|
||||
state start {
|
||||
pkt.extract(ig_intr_md);
|
||||
pkt.advance(PORT_METADATA_SIZE);
|
||||
transition parse_ethernet;
|
||||
}
|
||||
|
||||
state parse_ethernet {
|
||||
pkt.extract(hdr.ethernet);
|
||||
transition select(hdr.ethernet.ether_type) {
|
||||
ETHERTYPE_TPID: parse_vlan_tag;
|
||||
ETHERTYPE_IPV4: parse_ipv4;
|
||||
default: accept;
|
||||
}
|
||||
}
|
||||
|
||||
state parse_vlan_tag {
|
||||
pkt.extract(hdr.vlan_tag);
|
||||
transition select(hdr.vlan_tag.ether_type) {
|
||||
ETHERTYPE_IPV4: parse_ipv4;
|
||||
default: accept;
|
||||
}
|
||||
}
|
||||
|
||||
state parse_ipv4 {
|
||||
pkt.extract(hdr.ipv4);
|
||||
transition accept;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/***************** M A T C H - A C T I O N *********************/
|
||||
|
||||
control Ingress(
|
||||
/* User */
|
||||
inout my_ingress_headers_t hdr,
|
||||
inout my_ingress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
in ingress_intrinsic_metadata_t ig_intr_md,
|
||||
in ingress_intrinsic_metadata_from_parser_t ig_prsr_md,
|
||||
inout ingress_intrinsic_metadata_for_deparser_t ig_dprsr_md,
|
||||
inout ingress_intrinsic_metadata_for_tm_t ig_tm_md)
|
||||
{
|
||||
action send(PortId_t port) {
|
||||
ig_tm_md.ucast_egress_port = port;
|
||||
#ifdef BYPASS_EGRESS
|
||||
ig_tm_md.bypass_egress = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
action drop() {
|
||||
ig_dprsr_md.drop_ctl = 1;
|
||||
}
|
||||
|
||||
table ipv4_host {
|
||||
key = { hdr.ipv4.dst_addr : exact; }
|
||||
actions = {
|
||||
send; drop;
|
||||
#ifdef ONE_STAGE
|
||||
@defaultonly NoAction;
|
||||
#endif /* ONE_STAGE */
|
||||
}
|
||||
|
||||
#ifdef ONE_STAGE
|
||||
const default_action = NoAction();
|
||||
#endif /* ONE_STAGE */
|
||||
|
||||
size = IPV4_HOST_SIZE;
|
||||
}
|
||||
|
||||
#if defined(USE_ALPM)
|
||||
@alpm(1)
|
||||
@alpm_partitions(2048)
|
||||
#endif
|
||||
table mac_host {
|
||||
key = { hdr.ethernet.dst_addr : exact; }
|
||||
actions = { send; drop; }
|
||||
|
||||
default_action = send(64);
|
||||
size = MAC_SIZE;
|
||||
}
|
||||
|
||||
apply {
|
||||
if (!mac_host.apply().hit) {
|
||||
ipv4_host.apply();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/********************* D E P A R S E R ************************/
|
||||
|
||||
control IngressDeparser(packet_out pkt,
|
||||
/* User */
|
||||
inout my_ingress_headers_t hdr,
|
||||
in my_ingress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
in ingress_intrinsic_metadata_for_deparser_t ig_dprsr_md)
|
||||
{
|
||||
apply {
|
||||
pkt.emit(hdr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*************************************************************************
|
||||
**************** E G R E S S P R O C E S S I N G *******************
|
||||
*************************************************************************/
|
||||
|
||||
/*********************** H E A D E R S ************************/
|
||||
|
||||
struct my_egress_headers_t {
|
||||
}
|
||||
|
||||
/******** G L O B A L E G R E S S M E T A D A T A *********/
|
||||
|
||||
struct my_egress_metadata_t {
|
||||
}
|
||||
|
||||
/*********************** P A R S E R **************************/
|
||||
|
||||
parser EgressParser(packet_in pkt,
|
||||
/* User */
|
||||
out my_egress_headers_t hdr,
|
||||
out my_egress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
out egress_intrinsic_metadata_t eg_intr_md)
|
||||
{
|
||||
/* This is a mandatory state, required by Tofino Architecture */
|
||||
state start {
|
||||
pkt.extract(eg_intr_md);
|
||||
transition accept;
|
||||
}
|
||||
}
|
||||
|
||||
/***************** M A T C H - A C T I O N *********************/
|
||||
|
||||
control Egress(
|
||||
/* User */
|
||||
inout my_egress_headers_t hdr,
|
||||
inout my_egress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
in egress_intrinsic_metadata_t eg_intr_md,
|
||||
in egress_intrinsic_metadata_from_parser_t eg_prsr_md,
|
||||
inout egress_intrinsic_metadata_for_deparser_t eg_dprsr_md,
|
||||
inout egress_intrinsic_metadata_for_output_port_t eg_oport_md)
|
||||
{
|
||||
apply {
|
||||
}
|
||||
}
|
||||
|
||||
/********************* D E P A R S E R ************************/
|
||||
|
||||
control EgressDeparser(packet_out pkt,
|
||||
/* User */
|
||||
inout my_egress_headers_t hdr,
|
||||
in my_egress_metadata_t meta,
|
||||
/* Intrinsic */
|
||||
in egress_intrinsic_metadata_for_deparser_t eg_dprsr_md)
|
||||
{
|
||||
apply {
|
||||
pkt.emit(hdr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/************ F I N A L P A C K A G E ******************************/
|
||||
Pipeline(
|
||||
IngressParser(),
|
||||
Ingress(),
|
||||
IngressDeparser(),
|
||||
EgressParser(),
|
||||
Egress(),
|
||||
EgressDeparser()
|
||||
) pipe;
|
||||
|
||||
Switch(pipe) main;
|
20
wly_experiment/dta_codes/translator/pktgen.py
Normal file
20
wly_experiment/dta_codes/translator/pktgen.py
Normal file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
#from scapy.all import send, IP, ICMP
|
||||
from scapy.all import *
|
||||
import random
|
||||
import sys
|
||||
|
||||
|
||||
# 该函数应该只是构造 IP 数据包(并具有数据包 ID),并将其从源主机发送到对应的目标主机
|
||||
if len(sys.argv) == 1:
|
||||
pktID = random.randint(1,1000)
|
||||
print("Using random pktID=%i" %pktID)
|
||||
else:
|
||||
pktID = int(sys.argv[1])
|
||||
print("Using pktID=%i" %pktID)
|
||||
|
||||
pkt = Ether() / IP(src="192.168.1.91", dst="192.168.1.87", id=pktID, ttl=255)
|
||||
|
||||
print("Sending packet", pkt)
|
||||
sendp(pkt, iface="ma1")
|
258
wly_experiment/dta_codes/translator/send_rdma_synthetic.py
Normal file
258
wly_experiment/dta_codes/translator/send_rdma_synthetic.py
Normal file
@ -0,0 +1,258 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# from scapy.all import send, IP, ICMP
|
||||
from scapy.all import *
|
||||
# from scapy.contrib import roce
|
||||
import random
|
||||
import sys
|
||||
import struct
|
||||
import time
|
||||
import random
|
||||
import binascii
|
||||
import ipaddress
|
||||
|
||||
|
||||
# dstMAC = "56:2B:95:DB:33:39"
|
||||
# dstMAC = "b8:ce:f6:61:a0:f6"
|
||||
# dstMAC = "ff:ff:ff:ff:ff:ff"
|
||||
# dstMAC = "b8:ce:f6:61:9f:96" # host
|
||||
|
||||
# dstMAC = "b8:ce:f6:61:9f:96" # host13
|
||||
dstMAC = "b8:ce:f6:61:9f:9a" # dpu13
|
||||
|
||||
srcIP = "11.11.11.1"
|
||||
dstIP = "10.1.0.3"
|
||||
|
||||
# rocev2_port = 4791 # Default RoCEv2=4791
|
||||
rocev2_port = 5000
|
||||
|
||||
# PSN 在所有数据包的基本传输报头 (BTH) 中传输。它们用于检测丢失或无序的数据包,并且为了可靠的服务,
|
||||
# 将响应包与给定的请求包相关联。每个 IBA QP 由一个发送队列和一个接收队列组成;同样,EE 上下文有一个发送端和一个接收端。
|
||||
# 请求者发送队列上的 PSN 与响应者相应接收队列上的 PSN 之间存在关系。因此,QP(或 EE 上下文)的每一半维护一个独立的PSN;
|
||||
# 在给定队列对的发送队列和接收队列上所使用的 PSN,或者在不同的连接之间所使用的 PSN 是没有关系的。
|
||||
|
||||
|
||||
class BTH(Packet):
|
||||
''' RDMA 的 BTH (Base Transport Header) 报头的字段定义 '''
|
||||
|
||||
name = "BTH"
|
||||
fields_desc = [
|
||||
ByteField("opcode", 0),
|
||||
BitField("solicitedEvent", 0, 1),
|
||||
BitField("migReq", 0, 1),
|
||||
BitField("padCount", 0, 2),
|
||||
BitField("transportHeaderVersion", 0, 4),
|
||||
XShortField("partitionKey", 0),
|
||||
XByteField("reserved1", 0),
|
||||
ThreeBytesField("destinationQP", 0),
|
||||
BitField("ackRequest", 0, 1),
|
||||
BitField("reserved2", 0, 7),
|
||||
ThreeBytesField("packetSequenceNumber", 0)
|
||||
]
|
||||
|
||||
|
||||
class RETH(Packet):
|
||||
''' RDMA 的 RETH (RDMA Extended Transport Header) 报头的字段定义,
|
||||
它包含 RDMA 操作的附加传输字段。RETH 只存在于 RDMA 请求的第一个 (或唯一) 数据包中 '''
|
||||
|
||||
# Virtual Address 表示缓冲区的起始地址
|
||||
name = "RETH"
|
||||
fields_desc = [
|
||||
BitField("virtualAddress", 0, 64),
|
||||
IntField("rKey", 0),
|
||||
IntField("dmaLength", 0)
|
||||
]
|
||||
|
||||
|
||||
class iCRC(Packet):
|
||||
''' RDMA 的 ICRC (不变CRC) 报头的字段定义 (它覆盖了从源到目的地的数据包中不发生变化的字段) '''
|
||||
|
||||
name = "iCRC"
|
||||
fields_desc = [
|
||||
IntField("iCRC", 0),
|
||||
|
||||
]
|
||||
|
||||
# PSN 表示 RoCEv2 报文的序列号,可通过检测 PSN 是否连续来判断是否存在丢失的数据包,若出现了丢包,就会返回NAK报文
|
||||
packetSequenceNumber = 0
|
||||
|
||||
def makeRocev2Write(payload=0xdeadbeef, address=0x0):
|
||||
''' 制作具有 32 比特大小有效载荷的 RDMA Write 数据包 '''
|
||||
|
||||
global packetSequenceNumber
|
||||
# 在 InfiniBand 框架中,需要在特定节点之间提供隔离,并创建只有这些节点能够通信的 "Virtual Fabric"
|
||||
# 我们称之为分区(Partitioning)。而分区我们使用分区密钥 (P_Key) 来表示和实现,这是一个 16 位值,由两部分组成:
|
||||
# The msb is the membership bit:
|
||||
# 0: limited membership
|
||||
# 1: full membership
|
||||
# The lower 15 bits are the key portion which defines the partition value
|
||||
# 只有配置为同一分区的 QP 才能进行通信,而且其中至少有一个 QP 应是该分区的正式成员
|
||||
partitionKey = 0
|
||||
# Destination Queue Pair,目的QP,用来标识一条RoCEv2流,相当于RoCEv2报文中的目的端口Dport (Destination port)
|
||||
destinationQP = 0
|
||||
# RETH 报头中的 dmaLength 字段表示 DMA 操作的长度 (以字节为单位)
|
||||
dmaLength = 32
|
||||
virtualAddress = address # RDMA 缓冲区的起始虚拟内存地址
|
||||
rKey = 0 # MR 被注册的远程键, 用于授权其他主机访问本地主机已注册的指定内存区域
|
||||
|
||||
|
||||
iCRC_checksum = 0 #TODO: calculate this? Or ignore?
|
||||
|
||||
payload = struct.pack(">I", payload)
|
||||
# virtualAddress = struct.pack(">Q", virtualAddress)
|
||||
|
||||
packetSequenceNumber = packetSequenceNumber + 1
|
||||
|
||||
pkt = Ether(src="b8:ce:f6:61:a0:f2", dst=dstMAC)
|
||||
pkt = pkt / IP(src=srcIP,dst=dstIP,ihl=5,flags=0b010,proto=0x11)
|
||||
pkt = pkt / UDP(sport=0xc0de,dport=rocev2_port,chksum=0)
|
||||
pkt = pkt / BTH(opcode=0b01010,partitionKey=partitionKey,destinationQP=destinationQP, packetSequenceNumber=packetSequenceNumber) # WRITE-ONLY 类型的 RDMA 数据包
|
||||
pkt = pkt / RETH(dmaLength=dmaLength,virtualAddress=virtualAddress,rKey=rKey)
|
||||
pkt = pkt / Raw(payload)
|
||||
pkt = pkt / iCRC(iCRC=iCRC_checksum)
|
||||
|
||||
return pkt
|
||||
|
||||
|
||||
def calc_iCRC(pkt):
|
||||
''' 计算 iCRC 校验和 '''
|
||||
|
||||
#pkt_icrc_mod = pkt.copy()
|
||||
|
||||
print("Calculating iCRC on packet", pkt)
|
||||
|
||||
print("version", pkt["IP"].version)
|
||||
print("ihl", pkt["IP"].ihl)
|
||||
|
||||
#CRC part 1
|
||||
crc_part_1 = struct.pack("!Q", 0xffffffffffffffff)
|
||||
|
||||
#CRC part 2
|
||||
tmp1 = (pkt["IP"].version<<4) + (pkt["IP"].ihl)
|
||||
print("tmp1: %i(0x%x)" %(tmp1, tmp1) )
|
||||
tmp2 = (pkt["IP"].flags<<3) + (pkt["IP"].frag)
|
||||
print("tmp2: %i(0x%x)" %(tmp2, tmp2) )
|
||||
print(pkt["IP"].len)
|
||||
crc_part_2 = struct.pack("!BBHH", tmp1, 0xff, pkt["IP"].len, tmp2 )
|
||||
|
||||
#CRC part 3
|
||||
srcIP = int(ipaddress.ip_address(pkt["IP"].src))
|
||||
print("srcIP", srcIP)
|
||||
crc_part_3 = struct.pack("!BBHI", 0xff, pkt["IP"].proto, 0xffff, srcIP )
|
||||
|
||||
#CRC part 4
|
||||
dstIP = int(ipaddress.ip_address(pkt["IP"].dst))
|
||||
print("dstIP", dstIP)
|
||||
crc_part_4 = struct.pack("!IHH", dstIP, pkt["UDP"].sport, pkt["UDP"].dport )
|
||||
|
||||
#CRC part 5
|
||||
tmp3 = (pkt["BTH"].solicitedEvent<<4+2+1) + (pkt["BTH"].migReq<<4+2) + (pkt["BTH"].padCount<<4) + (pkt["BTH"].transportHeaderVersion)
|
||||
print(pkt["UDP"].len, 0xffff, pkt["BTH"].opcode, tmp3, pkt["BTH"].partitionKey)
|
||||
crc_part_5 = struct.pack("!HHBBH", pkt["UDP"].len, 0xffff, pkt["BTH"].opcode, tmp3, pkt["BTH"].partitionKey )
|
||||
|
||||
#CRC part 6
|
||||
dqp_1_1B = pkt["BTH"].destinationQP>>16
|
||||
dqp_2_2B = pkt["BTH"].destinationQP&0xffff
|
||||
tmp4 = (pkt["BTH"].ackRequest<<7) + (pkt["BTH"].reserved2)
|
||||
psn_1_1B = pkt["BTH"].packetSequenceNumber>>16
|
||||
psn_2_2B = pkt["BTH"].packetSequenceNumber&0xffff
|
||||
crc_part_6 = struct.pack("!BBHBBH", 0xff, dqp_1_1B, dqp_2_2B, tmp4, psn_1_1B, psn_2_2B)
|
||||
print("crc_part_1", crc_part_1)
|
||||
print("crc_part_2", crc_part_2)
|
||||
print("crc_part_3", crc_part_3)
|
||||
print("crc_part_4", crc_part_4)
|
||||
print("crc_part_5", crc_part_5)
|
||||
print("crc_part_6", crc_part_6)
|
||||
|
||||
|
||||
crc_indata_full = crc_part_1+crc_part_2+crc_part_3+crc_part_4+crc_part_5+crc_part_6
|
||||
|
||||
print("crc_indata_full", crc_indata_full)
|
||||
|
||||
output = binascii.crc32( crc_indata_full )
|
||||
print("iCRC checksum: ", output)
|
||||
return output
|
||||
|
||||
|
||||
def makeRocev2Send():
|
||||
''' 制作 RDMA SEND 数据包 '''
|
||||
|
||||
global packetSequenceNumber
|
||||
partitionKey = 0
|
||||
destinationQP = 0
|
||||
rKey = 0x42069 # MR 被注册的远程键, 用于授权其他主机访问本地主机已注册的指定内存区域
|
||||
|
||||
|
||||
packetSequenceNumber = packetSequenceNumber + 1
|
||||
|
||||
|
||||
pkt = Ether(src="b8:ce:f6:61:a0:f2", dst=dstMAC)
|
||||
pkt = pkt / IP(src=srcIP, dst=dstIP, ihl=5, flags=0b010, proto=0x11)
|
||||
pkt = pkt / UDP(sport=0xc0de,dport=rocev2_port,chksum=0)
|
||||
# BTH 的操作码字段: WRITE-ONLY: 0b01010, SEND-ONLY: 0b00100
|
||||
pkt = pkt / BTH(opcode=0b00100,partitionKey=partitionKey,destinationQP=destinationQP, packetSequenceNumber=packetSequenceNumber)
|
||||
|
||||
|
||||
#Force some field updates...
|
||||
pkt["IP"].len = 44 # 48?
|
||||
pkt["UDP"].len = 24
|
||||
|
||||
iCRC_checksum = calc_iCRC(pkt)
|
||||
|
||||
pkt = pkt / iCRC(iCRC=iCRC_checksum)
|
||||
|
||||
pkt.show2()
|
||||
pkt.show2()
|
||||
|
||||
return pkt
|
||||
|
||||
|
||||
def makeIPPacket():
|
||||
''' 制作 IP 数据包 '''
|
||||
|
||||
pkt = Ether(src="b8:ce:f6:61:a0:f2",dst=dstMAC)
|
||||
pkt = pkt / IP(src=srcIP,dst=dstIP)
|
||||
return pkt
|
||||
|
||||
|
||||
def makeUDPPacket():
|
||||
''' 制作 UDP 数据包 '''
|
||||
|
||||
pkt = Ether(src="b8:ce:f6:61:a0:f2",dst=dstMAC)
|
||||
pkt = pkt / IP(src=srcIP,dst=dstIP) / UDP()
|
||||
return pkt
|
||||
|
||||
|
||||
|
||||
|
||||
pkt = makeRocev2Send()
|
||||
print("Sending packet", pkt)
|
||||
sendp(pkt, iface="enp4s0f0")
|
||||
wrpcap("rocev2_send_pkt.pcap", pkt)
|
||||
|
||||
|
||||
'''
|
||||
numFlows = 5
|
||||
flowHashes = []
|
||||
for flowID in range(numFlows):
|
||||
flowHash = random.randint(0,2**64-1)
|
||||
flowHashes.append(flowHash)
|
||||
|
||||
#Send traffic
|
||||
flowID = 0
|
||||
while True:
|
||||
flowID += 1
|
||||
if flowID >= numFlows:
|
||||
flowID = 0
|
||||
|
||||
payload = flowID
|
||||
address = flowHashes[flowID]
|
||||
|
||||
print("Transmitting telemetry data. Flow:%i, payload:%i, hash:%i" %(flowID, payload, address))
|
||||
|
||||
pkt = makeRocev2Write(payload=payload, address=address)
|
||||
print("Sending packet", pkt)
|
||||
sendp(pkt, iface="enp4s0f0")
|
||||
|
||||
time.sleep(0.5)
|
||||
'''
|
422
wly_experiment/dta_codes/translator/switch_cpu.py
Normal file
422
wly_experiment/dta_codes/translator/switch_cpu.py
Normal file
@ -0,0 +1,422 @@
|
||||
#This is the switch-local controller for the DTA translator
|
||||
#Written by Jonatan Langlet for Direct Telemetry Access
|
||||
import datetime
|
||||
import ipaddress
|
||||
import hashlib
|
||||
import struct
|
||||
import os
|
||||
p4 = bfrt.dta_translator.pipe
|
||||
mirror = bfrt.mirror
|
||||
pre = bfrt.pre
|
||||
|
||||
logfile = "/root/wly_experiment/dta_results/dta_translator.log"
|
||||
|
||||
# add_with_XXX() 函数的关键字必须为小写, 否则无法正常运行 (识别不出来)
|
||||
|
||||
|
||||
# 根据测试平台拓扑添加静态转发规则
|
||||
forwardingRules = [
|
||||
("192.168.1.91", 64), # Tofino CPU
|
||||
("192.168.3.3", 148), # Generator
|
||||
("192.168.4.3", 180) # Collector
|
||||
]
|
||||
|
||||
|
||||
# 将收集器的目标 IP 映射到出口端口 (确保所有这些端口都存在 mcRules)(用于 Key-Write 和 Key-Increment 原语)
|
||||
collectorIPtoPorts = [
|
||||
("192.168.4.3", 180),
|
||||
]
|
||||
|
||||
|
||||
# Key-Write 原语中每个插槽的大小, 默认为 8 字节 (4B+4B), 并确保这块与 P4 代码中的定义一致
|
||||
keywrite_slot_size_B = 8
|
||||
|
||||
|
||||
# Postcarder 原语中每个插槽的大小, 默认为 32 字节 (5x4B+12B), 其中 12 字节为 padding
|
||||
# 这块的 padding 是为了实现 P4,我们需要它是 2 的幂次
|
||||
postcarder_slot_size_B = 32
|
||||
|
||||
# data list 的数量
|
||||
num_data_lists = 4
|
||||
|
||||
# 多播规则, 用于将出口端口 (egress port) 和冗余 (redundancy) 映射到多播组 ID
|
||||
mcRules = [
|
||||
{
|
||||
"mgid":1,
|
||||
"egressPort":148,
|
||||
"redundancy":1
|
||||
},
|
||||
{
|
||||
"mgid":2,
|
||||
"egressPort":148,
|
||||
"redundancy":2
|
||||
},
|
||||
{
|
||||
"mgid":3,
|
||||
"egressPort":148,
|
||||
"redundancy":3
|
||||
},
|
||||
{
|
||||
"mgid":4,
|
||||
"egressPort":148,
|
||||
"redundancy":4
|
||||
},
|
||||
{
|
||||
"mgid":5,
|
||||
"egressPort":64,
|
||||
"redundancy":1
|
||||
},
|
||||
{
|
||||
"mgid":6,
|
||||
"egressPort":64,
|
||||
"redundancy":2
|
||||
},
|
||||
{
|
||||
"mgid":7,
|
||||
"egressPort":64,
|
||||
"redundancy":3
|
||||
},
|
||||
{
|
||||
"mgid":8,
|
||||
"egressPort":64,
|
||||
"redundancy":4
|
||||
},
|
||||
{
|
||||
"mgid":9,
|
||||
"egressPort":180,
|
||||
"redundancy":1
|
||||
},
|
||||
{
|
||||
"mgid":10,
|
||||
"egressPort":180,
|
||||
"redundancy":2
|
||||
},
|
||||
{
|
||||
"mgid":11,
|
||||
"egressPort":180,
|
||||
"redundancy":3
|
||||
},
|
||||
{
|
||||
"mgid":12,
|
||||
"egressPort":180,
|
||||
"redundancy":4
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
def log(text):
|
||||
global logfile, datetime
|
||||
line = "%s \t DigProc: %s" %(str(datetime.datetime.now()), str(text))
|
||||
print(line)
|
||||
|
||||
f = open(logfile, "a")
|
||||
f.write(line + "\n")
|
||||
f.close()
|
||||
|
||||
|
||||
def digest_callback(dev_id, pipe_id, direction, parser_id, session, msg):
|
||||
global p4, log, Digest
|
||||
# smac = p4.Ingress.smac
|
||||
log("Received message from data plane!")
|
||||
for dig in msg:
|
||||
print(dig)
|
||||
|
||||
return 0
|
||||
|
||||
def bindDigestCallback():
|
||||
global digest_callback, log, p4
|
||||
|
||||
try:
|
||||
p4.SwitchIngressDeparser.debug_digest.callback_deregister()
|
||||
except:
|
||||
pass
|
||||
finally:
|
||||
log("Deregistering old callback function (if any)")
|
||||
|
||||
#Register as callback for digests (bind to DMA?)
|
||||
log("Registering callback...")
|
||||
p4.SwitchIngressDeparser.debug_digest.callback_register(digest_callback)
|
||||
|
||||
log("Bound callback to digest")
|
||||
|
||||
|
||||
def insertForwardingRules():
|
||||
''' 下发转发规则(DstIP -> Egress Port) '''
|
||||
|
||||
global p4, log, ipaddress, forwardingRules
|
||||
log("Inserting forwarding rules...")
|
||||
|
||||
for dstAddr, egrPort in forwardingRules:
|
||||
dstIP = ipaddress.ip_address(dstAddr)
|
||||
log("%s->%i" %(dstIP, egrPort))
|
||||
print(type(dstIP))
|
||||
p4.SwitchIngress.tbl_forward.add_with_forward(dstaddr=dstIP, port=egrPort)
|
||||
|
||||
|
||||
def insertKeyWriteRules():
|
||||
''' 下发 Key-Write 规则(CollectorIP <-> EgressPort, EgressPort <-> redundancy, mgid) '''
|
||||
|
||||
global p4, log, ipaddress, collectorIPtoPorts, mcRules
|
||||
log("Inserting KeyWrite rules...")
|
||||
|
||||
maxRedundancyLevel = 4
|
||||
|
||||
for collectorIP, egrPort in collectorIPtoPorts:
|
||||
collectorIP_bin = ipaddress.ip_address(collectorIP)
|
||||
|
||||
for redundancyLevel in range(1, maxRedundancyLevel+1):
|
||||
|
||||
log("%s,%i,%i" %(collectorIP,egrPort,redundancyLevel))
|
||||
|
||||
# 从 mcRules 列表中查找到正确的多播组 ID (匹配 redundancy 和 egressPort)
|
||||
rule = [ r for r in mcRules if r["redundancy"]==redundancyLevel and r["egressPort"]==egrPort ]
|
||||
log(rule[0])
|
||||
multicastGroupID = rule[0]["mgid"]
|
||||
|
||||
# multicastGroupID = 1 # Static for now. Update to match created multicast groups
|
||||
|
||||
log("Adding multiwrite rule %s, N=%i - %i" % (collectorIP, redundancyLevel, multicastGroupID))
|
||||
|
||||
p4.SwitchIngress.ProcessDTAPacket.tbl_Prep_KeyWrite.add_with_prep_MultiWrite(dstaddr=collectorIP_bin, redundancylevel=redundancyLevel, mcast_grp=multicastGroupID)
|
||||
|
||||
|
||||
|
||||
def getCollectorMetadata(port):
|
||||
''' 获取 Collector 的元数据信息 '''
|
||||
|
||||
global log, os
|
||||
|
||||
# 存放 Metadata 的目录
|
||||
metadata_dir = "/root/wly_experiment/dta_results/rdma_metadata/%i" % port
|
||||
|
||||
log("Setting up a new RDMA connection from virtual client... port %i dir %s" % (port, metadata_dir))
|
||||
os.system("python3 /root/wly_experiment/dta_codes/translator/init_rdma_connection.py --port %i --dir %s" %(port, metadata_dir))
|
||||
|
||||
log("Reading collector metadata from disk...")
|
||||
try:
|
||||
# 队列对
|
||||
f = open("%s/tmp_qpnum" % metadata_dir, "r")
|
||||
queue_pair = int(f.read())
|
||||
f.close()
|
||||
|
||||
# 起始的数据包序列号
|
||||
f = open("%s/tmp_psn" % metadata_dir, "r")
|
||||
start_psn = int(f.read())
|
||||
f.close()
|
||||
|
||||
# 起始内存地址
|
||||
f = open("%s/tmp_memaddr" % metadata_dir, "r")
|
||||
memory_start = int(f.read())
|
||||
f.close()
|
||||
|
||||
# 能够用于存放数据的长度
|
||||
f = open("%s/tmp_memlen" % metadata_dir, "r")
|
||||
memory_length = int(f.read())
|
||||
f.close()
|
||||
|
||||
# 远程键(用于获取访问远端主机内存的权限)
|
||||
f = open("%s/tmp_rkey" % metadata_dir, "r")
|
||||
remote_key = int(f.read())
|
||||
f.close()
|
||||
except:
|
||||
log(" !!! !!! Failed to read RDMA metadata !!! !!! ")
|
||||
|
||||
log("Collector metadata read from disk!")
|
||||
|
||||
return queue_pair, start_psn, memory_start, memory_length, remote_key
|
||||
|
||||
|
||||
psn_reg_index = 0
|
||||
|
||||
|
||||
def setupKeyvalConnection(port=1337):
|
||||
''' 对 Keyval 连接所需要的信息进行设置 (端口号为 1337) '''
|
||||
|
||||
global p4, log, ipaddress, collectorIPtoPorts, getCollectorMetadata, psn_reg_index, keywrite_slot_size_B
|
||||
|
||||
# 端口号作为源队列对的编号
|
||||
source_qp = port
|
||||
|
||||
print("Setting up KeyVal connection...")
|
||||
|
||||
# 初始化与键值存储相关的 RDMA 连接(首先根据端口号获取收集器的元数据信息)
|
||||
queue_pair, start_psn, memory_start, memory_length, remote_key = getCollectorMetadata(port)
|
||||
print("queue_pair", queue_pair)
|
||||
|
||||
for dstAddr, _ in collectorIPtoPorts:
|
||||
dstIP = ipaddress.ip_address(dstAddr)
|
||||
|
||||
# 计算收集器中分配了多少个数据插槽,即 memory_length / (csum+data) (size in bytes)
|
||||
collector_num_storage_slots = int(memory_length/keywrite_slot_size_B)
|
||||
|
||||
# 填充存放数据包序列号的寄存器
|
||||
p4.SwitchEgress.CraftRDMA.reg_rdma_sequence_number.mod(f1=start_psn, REGISTER_INDEX=psn_reg_index)
|
||||
|
||||
log("Populating PSN-resynchronization lookup table for QP->regIndex mapping")
|
||||
p4.SwitchEgress.RDMARatelimit.tbl_get_qp_reg_num.add_with_set_qp_reg_num(queue_pair=source_qp, qp_reg_index=psn_reg_index)
|
||||
|
||||
log("Inserting KeyWrite RDMA lookup rule for collector ip %s" %dstAddr)
|
||||
print("psn_reg_index", psn_reg_index)
|
||||
# 生成关于收集器的元数据信息的表项, 并将其填充到对应的表中
|
||||
p4.SwitchEgress.PrepareKeyWrite.tbl_getCollectorMetadataFromIP.add_with_set_server_info(dstaddr=dstIP, remote_key=remote_key, queue_pair=queue_pair, memory_address_start=memory_start, collector_num_storage_slots=collector_num_storage_slots, qp_reg_index=psn_reg_index)
|
||||
|
||||
psn_reg_index += 1
|
||||
|
||||
|
||||
def setupDatalistConnection():
|
||||
''' 对 Append 连接所需要的信息进行设置 (端口号为 1338-1341) '''
|
||||
|
||||
global p4, log, getCollectorMetadata, psn_reg_index, num_data_lists
|
||||
|
||||
# 在此你需要指定与多少个 dataLists 建立连接,并用 (listID, rdmaCMPort) 元组列表的元数据来填充 ASIC
|
||||
# lists = [(1,1338),(2,1339),(3,1340),(4,1341)] # 4 lists
|
||||
# lists = [(1,1338),(2,1339),(3,1340)] # 3 lists
|
||||
# lists = [(1,1338),(2,1339)] # 2 lists
|
||||
# lists = [(1,1338)] # 1 list
|
||||
|
||||
# Append 原语中列表中每个插槽的大小,以字节为单位 (数据大小)
|
||||
listSlotSize = 4
|
||||
|
||||
# Append 原语中列表的起始端口号
|
||||
list_start_port = 1338
|
||||
|
||||
# for listID, port in lists:
|
||||
for listID in range(num_data_lists):
|
||||
# 根据起始端口号和列表 ID 生成当前列表的端口号(范围为 1338 到 1338 + lists_num - 1)
|
||||
port = list_start_port + listID
|
||||
|
||||
print("Setting up dataList connection to list %i port %i..." % (listID, port))
|
||||
|
||||
# 初始化与 Append 相关的 RDMA 连接(首先根据端口号获取收集器的元数据信息)
|
||||
queue_pair, start_psn, memory_start, memory_length, remote_key = getCollectorMetadata(port)
|
||||
|
||||
# 端口号作为源队列对的编号
|
||||
source_qp = port
|
||||
|
||||
# 填充存放数据包序列号的寄存器
|
||||
p4.SwitchEgress.CraftRDMA.reg_rdma_sequence_number.mod(f1=start_psn, REGISTER_INDEX=psn_reg_index)
|
||||
|
||||
log("Populating PSN-resynchronization lookup table for QP->regIndex mapping")
|
||||
p4.SwitchEgress.RDMARatelimit.tbl_get_qp_reg_num.add_with_set_qp_reg_num(queue_pair=source_qp, qp_reg_index=psn_reg_index)
|
||||
|
||||
# 计算收集器中分配了多少个数据插槽,即 memory_length / (slot data size in bytes)
|
||||
collector_num_storage_slots = int(memory_length / listSlotSize)
|
||||
psn_reg_index = int(psn_reg_index)
|
||||
|
||||
|
||||
log("Inserting Append-to-List RDMA lookup rule for listID %i" % listID)
|
||||
print("psn_reg_index", psn_reg_index)
|
||||
print("collector_num_storage_slots", collector_num_storage_slots)
|
||||
|
||||
# 生成关于收集器的元数据信息的表项, 并将其填充到对应的表中 (这里拆成两部分进行填充)
|
||||
p4.SwitchEgress.PrepareAppend.tbl_getCollectorMetadataFromListID_1.add_with_set_server_info_1(listid=listID, remote_key=remote_key, queue_pair=queue_pair, memory_address_start=memory_start)
|
||||
p4.SwitchEgress.PrepareAppend.tbl_getCollectorMetadataFromListID_2.add_with_set_server_info_2(listid=listID, collector_num_storage_slots=collector_num_storage_slots, qp_reg_index=psn_reg_index)
|
||||
psn_reg_index += 1
|
||||
|
||||
|
||||
def setupPostcarderConnection(port=1336):
|
||||
global p4, log, ipaddress, collectorIPtoPorts, getCollectorMetadata, psn_reg_index, postcarder_slot_size_B
|
||||
|
||||
# 端口号作为源队列对的编号
|
||||
source_qp = port
|
||||
|
||||
print("Setting up Postcarder connection...")
|
||||
# 初始化与 Postcarder 相关的 RDMA 连接(首先根据端口号获取收集器的元数据信息)
|
||||
queue_pair, start_psn, memory_start, memory_length, remote_key = getCollectorMetadata(port)
|
||||
print("queue_pair", queue_pair)
|
||||
|
||||
for dstAddr, _ in collectorIPtoPorts:
|
||||
dstIP = ipaddress.ip_address(dstAddr)
|
||||
|
||||
# 计算收集器中分配了多少个数据插槽,即 memory_length / (slot data size in bytes, i.e 32 Bytes)
|
||||
collector_num_storage_slots = int(memory_length / postcarder_slot_size_B)
|
||||
|
||||
# 填充存放数据包序列号的寄存器
|
||||
p4.SwitchEgress.CraftRDMA.reg_rdma_sequence_number.mod(f1=start_psn, REGISTER_INDEX=psn_reg_index)
|
||||
|
||||
log("Populating PSN-resynchronization lookup table for QP->regIndex mapping")
|
||||
p4.SwitchEgress.RDMARatelimit.tbl_get_qp_reg_num.add_with_set_qp_reg_num(queue_pair=source_qp, qp_reg_index=psn_reg_index)
|
||||
|
||||
log("Inserting Postcarder RDMA lookup rule for collector ip %s" %dstAddr)
|
||||
print("psn_reg_index", psn_reg_index)
|
||||
# 生成关于收集器的元数据信息的表项, 并将其填充到对应的表中
|
||||
p4.SwitchEgress.PreparePostcarder.tbl_getCollectorMetadataFromIP.add_with_set_server_info(dstaddr=dstIP, remote_key=remote_key, queue_pair=queue_pair, memory_address_start=memory_start, collector_num_storage_slots=collector_num_storage_slots, qp_reg_index=psn_reg_index)
|
||||
|
||||
psn_reg_index += 1
|
||||
|
||||
|
||||
def insertCollectorMetadataRules():
|
||||
''' 将收集器的 RDMA 元数据规则插入到 ASIC 中 (Append, KeyVal, Postcarder) '''
|
||||
|
||||
global p4, log, ipaddress, collectorIPtoPorts, getCollectorMetadata, setupKeyvalConnection, setupDatalistConnection, setupPostcarderConnection
|
||||
log("Inserting RDMA metadata into ASIC...")
|
||||
|
||||
setupPostcarderConnection()
|
||||
|
||||
setupKeyvalConnection()
|
||||
|
||||
setupDatalistConnection()
|
||||
|
||||
|
||||
# NOTE: this might break ALL rules about multicasting. Very hacky
|
||||
def configMulticasting():
|
||||
global p4, pre, log, mcRules
|
||||
log("Configuring mirroring sessions...")
|
||||
|
||||
lastNodeID = 0
|
||||
|
||||
for mcastGroup in mcRules:
|
||||
mgid = mcastGroup["mgid"]
|
||||
egressPort = mcastGroup["egressPort"]
|
||||
redundancy = mcastGroup["redundancy"]
|
||||
log("Setting up multicast %i, egress port:%i, redundancy:%i" %(mgid, egressPort, redundancy))
|
||||
|
||||
nodeIDs = []
|
||||
log("Adding multicast nodes...")
|
||||
for i in range(redundancy):
|
||||
lastNodeID += 1
|
||||
log("Creating node %i" %lastNodeID)
|
||||
# pre.node.add(DEV_PORT=[egressPort], MULTICAST_NODE_ID=lastNodeID)
|
||||
pre.node.add(dev_port=[egressPort], multicast_node_id=lastNodeID)
|
||||
nodeIDs.append(lastNodeID)
|
||||
|
||||
log("Creating the multicast group")
|
||||
# pre.mgid.add(MGID=mgid, MULTICAST_NODE_ID=nodeIDs, MULTICAST_NODE_L1_XID=[0]*redundancy, MULTICAST_NODE_L1_XID_VALID=[False]*redundancy)
|
||||
pre.mgid.add(mgid=mgid, multicast_node_id=nodeIDs, multicast_node_l1_xid=[0]*redundancy, multicast_node_l1_xid_valid=[False]*redundancy)
|
||||
|
||||
|
||||
def configMirrorSessions():
|
||||
global mirror, log
|
||||
log("Configuring mirroring sessions...")
|
||||
|
||||
#TODO: fix truncation length
|
||||
mirror.cfg.add_with_normal(sid=1, session_enable=True, ucast_egress_port=65, ucast_egress_port_valid=True, direction="BOTH", max_pkt_len=43) #Mirror header+Ethernet+IP
|
||||
|
||||
|
||||
def populateTables():
|
||||
global p4, log, insertForwardingRules, insertKeyWriteRules, insertCollectorMetadataRules
|
||||
|
||||
log("Populating the P4 tables...")
|
||||
|
||||
insertForwardingRules()
|
||||
insertKeyWriteRules()
|
||||
insertCollectorMetadataRules()
|
||||
|
||||
log("Starting")
|
||||
# 已注释
|
||||
# configMulticasting()
|
||||
populateTables()
|
||||
configMirrorSessions()
|
||||
bindDigestCallback()
|
||||
|
||||
# log("Starting periodic injection of DTA write packet (keeping system alive)")
|
||||
# os.system("watch \"sudo /home/sde/dta/translator/inject_dta.py keywrite --data 10000 --key 0 --redundancy 1\" &")
|
||||
|
||||
print("*** Now start period WRITE function manually")
|
||||
print("*** Now start period WRITE function manually")
|
||||
|
||||
|
||||
|
||||
log("Bootstrap complete")
|
243
wly_experiment/dta_results/dta_translator.log
Normal file
243
wly_experiment/dta_results/dta_translator.log
Normal file
@ -0,0 +1,243 @@
|
||||
2024-02-22 11:04:13.082951 DigProc: Starting
|
||||
2024-02-22 11:04:13.083089 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 11:04:13.083165 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 11:04:13.083229 DigProc: Adding multicast nodes...
|
||||
2024-02-22 11:04:13.083286 DigProc: Creating node 1
|
||||
2024-02-22 11:21:17.356589 DigProc: Starting
|
||||
2024-02-22 11:21:17.356731 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 11:21:17.356818 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 11:21:17.356886 DigProc: Adding multicast nodes...
|
||||
2024-02-22 11:21:17.356955 DigProc: Creating node 1
|
||||
2024-02-22 11:21:24.980153 DigProc: Starting
|
||||
2024-02-22 11:21:24.980283 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 11:21:24.980366 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 11:21:24.980436 DigProc: Adding multicast nodes...
|
||||
2024-02-22 11:21:24.980506 DigProc: Creating node 1
|
||||
2024-02-22 11:48:18.655489 DigProc: Starting
|
||||
2024-02-22 11:48:18.655601 DigProc: Populating the P4 tables...
|
||||
2024-02-22 11:48:18.655671 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 11:48:18.655809 DigProc: 192.168.1.91->64
|
||||
2024-02-22 12:09:16.332419 DigProc: Starting
|
||||
2024-02-22 12:09:16.332549 DigProc: Populating the P4 tables...
|
||||
2024-02-22 12:09:16.332639 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 12:09:16.332802 DigProc: 192.168.1.91->64
|
||||
2024-02-22 12:16:54.067753 DigProc: Starting
|
||||
2024-02-22 12:16:54.067868 DigProc: Populating the P4 tables...
|
||||
2024-02-22 12:16:54.067934 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 12:16:54.068060 DigProc: 192.168.1.91->64
|
||||
2024-02-22 12:26:14.020314 DigProc: Starting
|
||||
2024-02-22 12:26:14.020431 DigProc: Populating the P4 tables...
|
||||
2024-02-22 12:26:14.020498 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 12:26:14.020609 DigProc: 192.168.1.91->64
|
||||
2024-02-22 12:27:32.267278 DigProc: Starting
|
||||
2024-02-22 12:27:32.267390 DigProc: Populating the P4 tables...
|
||||
2024-02-22 12:27:32.267452 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 12:27:32.267554 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:23:43.935375 DigProc: Starting
|
||||
2024-02-22 13:23:43.935486 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:23:43.935555 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:23:43.935674 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:25:54.796017 DigProc: Starting
|
||||
2024-02-22 13:25:54.796130 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:25:54.796195 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:25:54.796305 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:26:49.000341 DigProc: Starting
|
||||
2024-02-22 13:26:49.000456 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:26:49.000534 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:26:49.000647 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:26:49.001081 DigProc: 192.168.1.86->3
|
||||
2024-02-22 13:26:49.001308 DigProc: 192.168.1.87->7
|
||||
2024-02-22 13:26:49.001481 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 13:26:49.001588 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 13:26:49.001655 DigProc: {'mgid': 9, 'redundancy': 1, 'egressPort': 7}
|
||||
2024-02-22 13:26:49.001738 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 13:27:40.093491 DigProc: Starting
|
||||
2024-02-22 13:27:40.093604 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:27:40.093682 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:27:40.093813 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:28:18.147528 DigProc: Starting
|
||||
2024-02-22 13:28:18.147642 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:28:18.147713 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:28:18.147815 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:28:18.148185 DigProc: 192.168.1.86->3
|
||||
2024-02-22 13:28:18.148387 DigProc: 192.168.1.87->7
|
||||
2024-02-22 13:28:18.148557 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 13:28:18.148644 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 13:28:18.148712 DigProc: {'mgid': 9, 'redundancy': 1, 'egressPort': 7}
|
||||
2024-02-22 13:28:18.148765 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 13:34:01.046623 DigProc: Starting
|
||||
2024-02-22 13:34:01.046739 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 13:34:01.046799 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 13:34:01.046846 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:34:01.046904 DigProc: Creating node 1
|
||||
2024-02-22 13:34:53.222689 DigProc: Starting
|
||||
2024-02-22 13:34:53.222804 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 13:34:53.222879 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 13:34:53.222933 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:34:53.222981 DigProc: Creating node 1
|
||||
2024-02-22 13:34:53.223313 DigProc: Creating the multicast group
|
||||
2024-02-22 13:34:53.223821 DigProc: Setting up multicast 2, egress port:3, redundancy:2
|
||||
2024-02-22 13:34:53.223898 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:34:53.223955 DigProc: Creating node 2
|
||||
2024-02-22 13:34:53.224144 DigProc: Creating node 3
|
||||
2024-02-22 13:34:53.224340 DigProc: Creating the multicast group
|
||||
2024-02-22 13:34:53.224881 DigProc: Setting up multicast 3, egress port:3, redundancy:3
|
||||
2024-02-22 13:34:53.224965 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:34:53.225025 DigProc: Creating node 4
|
||||
2024-02-22 13:34:53.225189 DigProc: Creating node 5
|
||||
2024-02-22 13:34:53.225391 DigProc: Creating node 6
|
||||
2024-02-22 13:34:53.225554 DigProc: Creating the multicast group
|
||||
2024-02-22 13:34:53.226241 DigProc: Setting up multicast 4, egress port:3, redundancy:4
|
||||
2024-02-22 13:34:53.226317 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:34:53.226371 DigProc: Creating node 7
|
||||
2024-02-22 13:34:53.226553 DigProc: Creating node 8
|
||||
2024-02-22 13:34:53.226764 DigProc: Creating node 9
|
||||
2024-02-22 13:34:53.226925 DigProc: Creating node 10
|
||||
2024-02-22 13:34:53.227096 DigProc: Creating the multicast group
|
||||
2024-02-22 13:39:26.337460 DigProc: Starting
|
||||
2024-02-22 13:39:26.337574 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 13:39:26.337654 DigProc: Setting up multicast 1, egress port:3, redundancy:1
|
||||
2024-02-22 13:39:26.337722 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:39:26.337783 DigProc: Creating node 1
|
||||
2024-02-22 13:39:26.338168 DigProc: Creating the multicast group
|
||||
2024-02-22 13:39:26.338627 DigProc: Setting up multicast 2, egress port:3, redundancy:2
|
||||
2024-02-22 13:39:26.338736 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:39:26.338819 DigProc: Creating node 2
|
||||
2024-02-22 13:39:26.339075 DigProc: Creating node 3
|
||||
2024-02-22 13:39:26.339318 DigProc: Creating the multicast group
|
||||
2024-02-22 13:39:26.340019 DigProc: Setting up multicast 3, egress port:3, redundancy:3
|
||||
2024-02-22 13:39:26.340120 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:39:26.340210 DigProc: Creating node 4
|
||||
2024-02-22 13:39:26.340469 DigProc: Creating node 5
|
||||
2024-02-22 13:39:26.340729 DigProc: Creating node 6
|
||||
2024-02-22 13:39:26.340980 DigProc: Creating the multicast group
|
||||
2024-02-22 13:39:26.341832 DigProc: Setting up multicast 4, egress port:3, redundancy:4
|
||||
2024-02-22 13:39:26.341931 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:39:26.342013 DigProc: Creating node 7
|
||||
2024-02-22 13:39:26.342270 DigProc: Creating node 8
|
||||
2024-02-22 13:39:26.342527 DigProc: Creating node 9
|
||||
2024-02-22 13:39:26.342787 DigProc: Creating node 10
|
||||
2024-02-22 13:39:26.343040 DigProc: Creating the multicast group
|
||||
2024-02-22 13:41:43.143848 DigProc: Starting
|
||||
2024-02-22 13:41:43.143965 DigProc: Configuring mirroring sessions...
|
||||
2024-02-22 13:41:43.144056 DigProc: Setting up multicast 0, egress port:3, redundancy:1
|
||||
2024-02-22 13:41:43.144124 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:41:43.144201 DigProc: Creating node 1
|
||||
2024-02-22 13:41:43.144615 DigProc: Creating the multicast group
|
||||
2024-02-22 13:41:43.145153 DigProc: Setting up multicast 1, egress port:3, redundancy:2
|
||||
2024-02-22 13:41:43.145247 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:41:43.145333 DigProc: Creating node 2
|
||||
2024-02-22 13:41:43.145546 DigProc: Creating node 3
|
||||
2024-02-22 13:41:43.145792 DigProc: Creating the multicast group
|
||||
2024-02-22 13:41:43.146368 DigProc: Setting up multicast 2, egress port:3, redundancy:3
|
||||
2024-02-22 13:41:43.146465 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:41:43.146558 DigProc: Creating node 4
|
||||
2024-02-22 13:41:43.146776 DigProc: Creating node 5
|
||||
2024-02-22 13:41:43.147017 DigProc: Creating node 6
|
||||
2024-02-22 13:41:43.147270 DigProc: Creating the multicast group
|
||||
2024-02-22 13:41:43.148226 DigProc: Setting up multicast 3, egress port:3, redundancy:4
|
||||
2024-02-22 13:41:43.148330 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:41:43.148417 DigProc: Creating node 7
|
||||
2024-02-22 13:41:43.148674 DigProc: Creating node 8
|
||||
2024-02-22 13:41:43.148888 DigProc: Creating node 9
|
||||
2024-02-22 13:41:43.149139 DigProc: Creating node 10
|
||||
2024-02-22 13:41:43.149323 DigProc: Creating the multicast group
|
||||
2024-02-22 13:41:43.150142 DigProc: Setting up multicast 5, egress port:64, redundancy:1
|
||||
2024-02-22 13:41:43.150239 DigProc: Adding multicast nodes...
|
||||
2024-02-22 13:41:43.150311 DigProc: Creating node 11
|
||||
2024-02-22 13:41:43.150575 DigProc: Creating the multicast group
|
||||
2024-02-22 13:44:16.638186 DigProc: Starting
|
||||
2024-02-22 13:44:16.638298 DigProc: Populating the P4 tables...
|
||||
2024-02-22 13:44:16.638362 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 13:44:16.638482 DigProc: 192.168.1.91->64
|
||||
2024-02-22 13:44:16.638884 DigProc: 192.168.1.86->3
|
||||
2024-02-22 13:44:16.639100 DigProc: 192.168.1.87->7
|
||||
2024-02-22 13:44:16.639281 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 13:44:16.639373 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 13:44:16.639429 DigProc: {'egressPort': 7, 'redundancy': 1, 'mgid': 9}
|
||||
2024-02-22 13:44:16.639488 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 13:44:16.639647 DigProc: 192.168.1.87,7,2
|
||||
2024-02-22 13:44:16.639731 DigProc: {'egressPort': 7, 'redundancy': 2, 'mgid': 10}
|
||||
2024-02-22 13:44:16.639788 DigProc: Adding multiwrite rule 192.168.1.87,N=2 - 10
|
||||
2024-02-22 13:44:16.639921 DigProc: 192.168.1.87,7,3
|
||||
2024-02-22 13:44:16.639996 DigProc: {'egressPort': 7, 'redundancy': 3, 'mgid': 11}
|
||||
2024-02-22 13:44:16.640055 DigProc: Adding multiwrite rule 192.168.1.87,N=3 - 11
|
||||
2024-02-22 13:44:16.640179 DigProc: 192.168.1.87,7,4
|
||||
2024-02-22 13:44:16.640244 DigProc: {'egressPort': 7, 'redundancy': 4, 'mgid': 12}
|
||||
2024-02-22 13:44:16.640303 DigProc: Adding multiwrite rule 192.168.1.87,N=4 - 12
|
||||
2024-02-22 13:44:16.640453 DigProc: Inserting RDMA metadata into ASIC...
|
||||
2024-02-22 13:44:16.640541 DigProc: Setting up a new RDMA connection from virtual client... port 1336 dir /root/wly_experiment/dta_results/rdma_metadata/1336
|
||||
2024-02-22 13:44:28.244177 DigProc: Reading collector metadata from disk...
|
||||
2024-02-22 13:44:28.244859 DigProc: !!! !!! Failed to read RDMA metadata !!! !!!
|
||||
2024-02-22 13:44:28.245051 DigProc: Collector metadata read from disk!
|
||||
2024-02-22 14:42:03.961299 DigProc: Starting
|
||||
2024-02-22 14:42:03.961408 DigProc: Populating the P4 tables...
|
||||
2024-02-22 14:42:03.961465 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 14:42:03.961569 DigProc: 192.168.1.91->64
|
||||
2024-02-22 14:42:03.961993 DigProc: 192.168.1.86->3
|
||||
2024-02-22 14:42:03.962209 DigProc: 192.168.1.87->7
|
||||
2024-02-22 14:42:03.962395 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 14:42:03.962497 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 14:42:03.962562 DigProc: {'egressPort': 7, 'mgid': 9, 'redundancy': 1}
|
||||
2024-02-22 14:42:03.962627 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 14:42:03.962819 DigProc: 192.168.1.87,7,2
|
||||
2024-02-22 14:42:03.962894 DigProc: {'egressPort': 7, 'mgid': 10, 'redundancy': 2}
|
||||
2024-02-22 14:42:03.962954 DigProc: Adding multiwrite rule 192.168.1.87,N=2 - 10
|
||||
2024-02-22 14:42:03.963111 DigProc: 192.168.1.87,7,3
|
||||
2024-02-22 14:42:03.963187 DigProc: {'egressPort': 7, 'mgid': 11, 'redundancy': 3}
|
||||
2024-02-22 14:42:03.963245 DigProc: Adding multiwrite rule 192.168.1.87,N=3 - 11
|
||||
2024-02-22 14:42:03.963383 DigProc: 192.168.1.87,7,4
|
||||
2024-02-22 14:42:03.963468 DigProc: {'egressPort': 7, 'mgid': 12, 'redundancy': 4}
|
||||
2024-02-22 14:42:03.963536 DigProc: Adding multiwrite rule 192.168.1.87,N=4 - 12
|
||||
2024-02-22 14:42:03.963690 DigProc: Inserting RDMA metadata into ASIC...
|
||||
2024-02-22 14:42:03.963774 DigProc: Setting up a new RDMA connection from virtual client... port 1336 dir /root/wly_experiment/dta_results/rdma_metadata/1336
|
||||
2024-02-22 14:42:15.564067 DigProc: Reading collector metadata from disk...
|
||||
2024-02-22 14:42:15.564753 DigProc: !!! !!! Failed to read RDMA metadata !!! !!!
|
||||
2024-02-22 14:42:15.564995 DigProc: Collector metadata read from disk!
|
||||
2024-02-22 21:10:53.156916 DigProc: Starting
|
||||
2024-02-22 21:10:53.157026 DigProc: Populating the P4 tables...
|
||||
2024-02-22 21:10:53.157129 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 21:10:53.157250 DigProc: 192.168.1.91->64
|
||||
2024-02-22 21:10:53.157712 DigProc: 192.168.1.86->3
|
||||
2024-02-22 21:10:53.157980 DigProc: 192.168.1.87->7
|
||||
2024-02-22 21:10:53.158160 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 21:10:53.158264 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 21:10:53.158339 DigProc: {'mgid': 9, 'egressPort': 7, 'redundancy': 1}
|
||||
2024-02-22 21:10:53.158424 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 21:10:53.158603 DigProc: 192.168.1.87,7,2
|
||||
2024-02-22 21:10:53.158706 DigProc: {'mgid': 10, 'egressPort': 7, 'redundancy': 2}
|
||||
2024-02-22 21:10:53.158762 DigProc: Adding multiwrite rule 192.168.1.87,N=2 - 10
|
||||
2024-02-22 21:10:53.158897 DigProc: 192.168.1.87,7,3
|
||||
2024-02-22 21:10:53.158989 DigProc: {'mgid': 11, 'egressPort': 7, 'redundancy': 3}
|
||||
2024-02-22 21:10:53.159053 DigProc: Adding multiwrite rule 192.168.1.87,N=3 - 11
|
||||
2024-02-22 21:10:53.159188 DigProc: 192.168.1.87,7,4
|
||||
2024-02-22 21:10:53.159272 DigProc: {'mgid': 12, 'egressPort': 7, 'redundancy': 4}
|
||||
2024-02-22 21:10:53.159335 DigProc: Adding multiwrite rule 192.168.1.87,N=4 - 12
|
||||
2024-02-22 21:10:53.159495 DigProc: Inserting RDMA metadata into ASIC...
|
||||
2024-02-22 21:10:53.159573 DigProc: Setting up a new RDMA connection from virtual client... port 1336 dir /root/wly_experiment/dta_results/rdma_metadata/1336
|
||||
2024-02-22 21:11:04.775371 DigProc: Reading collector metadata from disk...
|
||||
2024-02-22 21:11:04.776094 DigProc: !!! !!! Failed to read RDMA metadata !!! !!!
|
||||
2024-02-22 21:11:04.776371 DigProc: Collector metadata read from disk!
|
||||
2024-02-22 21:20:36.578074 DigProc: Starting
|
||||
2024-02-22 21:20:36.578186 DigProc: Populating the P4 tables...
|
||||
2024-02-22 21:20:36.578255 DigProc: Inserting forwarding rules...
|
||||
2024-02-22 21:20:36.578379 DigProc: 192.168.1.91->64
|
||||
2024-02-22 21:20:36.578788 DigProc: 192.168.1.86->3
|
||||
2024-02-22 21:20:36.578990 DigProc: 192.168.1.87->7
|
||||
2024-02-22 21:20:36.579152 DigProc: Inserting KeyWrite rules...
|
||||
2024-02-22 21:20:36.579243 DigProc: 192.168.1.87,7,1
|
||||
2024-02-22 21:20:36.579304 DigProc: {'egressPort': 7, 'redundancy': 1, 'mgid': 9}
|
||||
2024-02-22 21:20:36.579359 DigProc: Adding multiwrite rule 192.168.1.87,N=1 - 9
|
||||
2024-02-22 21:20:36.579523 DigProc: 192.168.1.87,7,2
|
||||
2024-02-22 21:20:36.579595 DigProc: {'egressPort': 7, 'redundancy': 2, 'mgid': 10}
|
||||
2024-02-22 21:20:36.579649 DigProc: Adding multiwrite rule 192.168.1.87,N=2 - 10
|
||||
2024-02-22 21:20:36.579793 DigProc: 192.168.1.87,7,3
|
||||
2024-02-22 21:20:36.579860 DigProc: {'egressPort': 7, 'redundancy': 3, 'mgid': 11}
|
||||
2024-02-22 21:20:36.579924 DigProc: Adding multiwrite rule 192.168.1.87,N=3 - 11
|
||||
2024-02-22 21:20:36.580053 DigProc: 192.168.1.87,7,4
|
||||
2024-02-22 21:20:36.580118 DigProc: {'egressPort': 7, 'redundancy': 4, 'mgid': 12}
|
||||
2024-02-22 21:20:36.580170 DigProc: Adding multiwrite rule 192.168.1.87,N=4 - 12
|
||||
2024-02-22 21:20:36.580310 DigProc: Inserting RDMA metadata into ASIC...
|
||||
2024-02-22 21:20:36.580390 DigProc: Setting up a new RDMA connection from virtual client... port 1336 dir /root/wly_experiment/dta_results/rdma_metadata/1336
|
||||
2024-02-22 21:20:48.196762 DigProc: Reading collector metadata from disk...
|
||||
2024-02-22 21:20:48.198298 DigProc: Collector metadata read from disk!
|
@ -0,0 +1 @@
|
||||
950591785133954132
|
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_memlen
Normal file
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_memlen
Normal file
@ -0,0 +1 @@
|
||||
1397704714
|
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_psn
Normal file
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_psn
Normal file
@ -0,0 +1 @@
|
||||
7304805
|
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_qpnum
Normal file
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_qpnum
Normal file
@ -0,0 +1 @@
|
||||
7562352
|
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_rkey
Normal file
1
wly_experiment/dta_results/rdma_metadata/1336/tmp_rkey
Normal file
@ -0,0 +1 @@
|
||||
840972884
|
1
wly_experiment/start_tofino.sh
Normal file
1
wly_experiment/start_tofino.sh
Normal file
@ -0,0 +1 @@
|
||||
./../bf-sde-9.2.0/run_switchd.sh -p dta_translator
|
1
wly_experiment/table_rules.sh
Normal file
1
wly_experiment/table_rules.sh
Normal file
@ -0,0 +1 @@
|
||||
./../bf-sde-9.2.0/run_bfshell.sh -b ../wly_experiment/dta_codes/translator/switch_cpu.py -i
|
77
wly_experiment/zlog-cfg-cur
Normal file
77
wly_experiment/zlog-cfg-cur
Normal file
@ -0,0 +1,77 @@
|
||||
[global]
|
||||
strict init = false
|
||||
buffer min = 1024
|
||||
buffer max = 2MB
|
||||
default format = "%d(%F %X).%us %-6V (%c:%F:%U:%L) - %m%n"
|
||||
file perms = 666
|
||||
fsync period = 1K
|
||||
|
||||
[levels]
|
||||
|
||||
[formats]
|
||||
null = "%n"
|
||||
print = "[%-10.3d(%F)]%n"
|
||||
file_format = "%d(%F %X).%us %-5V %c %m%n"
|
||||
console_format = "%d(%F %X).%us %c %5V - %m%n"
|
||||
|
||||
[rules]
|
||||
|
||||
BF_SYS.ERROR >stdout;console_format
|
||||
BF_SYS.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_LLD.ERROR >stdout;console_format
|
||||
BF_LLD.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PIPE.ERROR >stdout;console_format
|
||||
BF_PIPE.ERROR "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_TM.ERROR >stdout;console_format
|
||||
BF_TM.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_MC.ERROR >stdout;console_format
|
||||
BF_MC.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PKT.ERROR >stdout;console_format
|
||||
BF_PKT.ERROR "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_DVM.ERROR >stdout;console_format
|
||||
BF_DVM.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PORT.ERROR >stdout;console_format
|
||||
BF_PORT.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_AVAGO.ERROR >stdout;console_format
|
||||
BF_AVAGO.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_DRU.ERROR >stdout;console_format
|
||||
BF_DRU.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_API.ERROR >stdout;console_format
|
||||
BF_API.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_SAI.ERROR >stdout;console_format
|
||||
BF_SAI.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PI.ERROR >stdout;console_format
|
||||
BF_PI.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PLTFM.ERROR >stdout;console_format
|
||||
BF_PLTFM.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PAL.ERROR >stdout;console_format
|
||||
BF_PAL.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_PM.ERROR >stdout;console_format
|
||||
BF_PM.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_KNET.ERROR >stdout;console_format
|
||||
BF_KNET.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_BFRT.ERROR >stdout;console_format
|
||||
BF_BFRT.ERROR "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
BF_P4RT.ERROR >stdout;console_format
|
||||
BF_P4RT.DEBUG "bf_drivers.log", 5M * 5 ;file_format
|
||||
|
||||
*.ERROR >syslog , LOG_USER
|
||||
|
Loading…
Reference in New Issue
Block a user