Add working ordered_wires fuzzer. Signed-off-by: Keith Rothman <537074+litghost@users.noreply.github.com>
diff --git a/fuzzers/003-ordered_wires/Makefile b/fuzzers/003-ordered_wires/Makefile new file mode 100644 index 0000000..a9d305f --- /dev/null +++ b/fuzzers/003-ordered_wires/Makefile
@@ -0,0 +1,27 @@ +N := 1 +BUILD_DIR = build_${URAY_PART} +SPECIMENS := $(addprefix $(BUILD_DIR)/specimen_,$(shell seq -f '%03.0f' $(N))) +SPECIMENS_OK := $(addsuffix /OK,$(SPECIMENS)) +MAX_VIVADO_PROCESS ?= 4 +MAX_PIPS_INSTANCE ?= 340000 + +database: $(SPECIMENS_OK) + true + +pushdb: + true + +$(SPECIMENS_OK): + bash generate.sh $(subst /OK,,$@) -p=$(MAX_VIVADO_PROCESS) -t=$(MAX_PIPS_INSTANCE) + touch $@ + +run: + rm -rf $(BUILD_DIR) run.${URAY_PART}.ok + $(MAKE) database + $(MAKE) pushdb + touch run.${URAY_PART}.ok + +clean: + rm -rf build_* run.*.ok + +.PHONY: database pushdb run clean
diff --git a/fuzzers/003-ordered_wires/generate.sh b/fuzzers/003-ordered_wires/generate.sh new file mode 100644 index 0000000..c934e6e --- /dev/null +++ b/fuzzers/003-ordered_wires/generate.sh
@@ -0,0 +1,6 @@ +#!/bin/bash -x + +source ${URAY_GENHEADER} + +python3 $FUZDIR/run_fuzzer.py $2 $3 +
diff --git a/fuzzers/003-ordered_wires/get_pipscount.tcl b/fuzzers/003-ordered_wires/get_pipscount.tcl new file mode 100644 index 0000000..153d242 --- /dev/null +++ b/fuzzers/003-ordered_wires/get_pipscount.tcl
@@ -0,0 +1,11 @@ +create_project -force -part $::env(URAY_PART) design design +set_property design_mode PinPlanning [current_fileset] +open_io_design -name io_1 + +#set_param tcl.collectionResultDisplayLimit 0 +set_param messaging.disableStorage 1 + +set nbpips_fp [open nb_pips.txt w] + +set pips [get_pips] +puts $nbpips_fp [llength $pips]
diff --git a/fuzzers/003-ordered_wires/job.tcl b/fuzzers/003-ordered_wires/job.tcl new file mode 100644 index 0000000..ef08e4c --- /dev/null +++ b/fuzzers/003-ordered_wires/job.tcl
@@ -0,0 +1,31 @@ +set blocknb [lindex $argv 0] +set start [expr int([lindex $argv 1])] +set stop [expr int([lindex $argv 2])] + +create_project -force -part $::env(URAY_PART) $blocknb $blocknb +set_property design_mode PinPlanning [current_fileset] +open_io_design -name io_1 + +#set_param tcl.collectionResultDisplayLimit 0 +set_param messaging.disableStorage 1 + +set pips [get_pips] + +set dwnhill_fp [open "wires/downhill_wires_${blocknb}.txt" w] +set uphill_fp [open "wires/uphill_wires_${blocknb}.txt" w] + +for { set i $start } { $i < $stop } { incr i } { + set pip [lindex $pips $i] + foreach downhill_node [get_nodes -downhill -of_object $pip] { + set ordered_downhill_wires [get_wires -from $pip -of_object $downhill_node] + puts $dwnhill_fp "$pip $downhill_node $ordered_downhill_wires" + } + foreach uphill_node [get_nodes -uphill -of_object $pip] { + set ordered_uphill_wires [get_wires -to $pip -of_object $uphill_node] + puts $uphill_fp "$pip $uphill_node $ordered_uphill_wires" + } + +} + +close $dwnhill_fp +close $uphill_fp
diff --git a/fuzzers/003-ordered_wires/run_fuzzer.py b/fuzzers/003-ordered_wires/run_fuzzer.py new file mode 100644 index 0000000..112e01e --- /dev/null +++ b/fuzzers/003-ordered_wires/run_fuzzer.py
@@ -0,0 +1,155 @@ +import os +import shutil +import sys +import subprocess +import signal +from multiprocessing import Pool, Lock +from itertools import chain +import argparse + +# Can be used to redirect vivado tons of output +# stdout=DEVNULL in subprocess.check_call + + +# Worker function called from threads. +# Once the worker completes the job, the temporary files +# get merged with the final outputs in a thread-safe way +# and deleted to save disk usage. +# To do so, a global Lock is provided at the Pool initialization. +def start_pips(argList): + blockID, start, stop, total = argList + print("Running instance :" + str(blockID) + " / " + str(total)) + subprocess.check_call( + "${URAY_VIVADO} -mode batch -source $FUZDIR/job.tcl -tclargs " + + str(blockID) + " " + str(start) + " " + str(stop), + shell=True) + + uphill_wires = "wires/uphill_wires_{}.txt".format(blockID) + downhill_wires = "wires/downhill_wires_{}.txt".format(blockID) + + # Locking to write on final file and remove the temporary one + Lock.acquire() + with open("uphill_wires.txt", "a") as wfd: + f = uphill_wires + with open(f, "r") as fd: + shutil.copyfileobj(fd, wfd) + + with open("downhill_wires.txt", "a") as wfd: + f = downhill_wires + with open(f, "r") as fd: + shutil.copyfileobj(fd, wfd) + Lock.release() + + os.remove(uphill_wires) + os.remove(downhill_wires) + + +# Function called once to get the total numbers of pips to list +def get_nb_pips(): + print("Fetching total number of pips") + subprocess.check_call( + "${URAY_VIVADO} -mode batch -source $FUZDIR/get_pipscount.tcl", + shell=True) + countfile = open("nb_pips.txt", "r") + return int(countfile.readline()) + + +def pool_init(lock): + global Lock + Lock = lock + + +def run_pool(itemcount, nbBlocks, blocksize, nbParBlock, workFunc): + # We handle the case of not integer multiple of pips + intitemcount = blocksize * nbBlocks + lastRun = False + modBlocks = itemcount - intitemcount + if modBlocks != 0: + lastRun = True + nbBlocks = nbBlocks + 1 + + print("Items Count: " + str(itemcount) + " - Number of blocks: " + + str(nbBlocks) + " - Parallel blocks: " + str(nbParBlock) + + " - Blocksize: " + str(blocksize) + " - Modulo Blocks: " + + str(modBlocks)) + + blockId = range(0, nbBlocks) + startI = range(0, intitemcount, blocksize) + stopI = range(blocksize, intitemcount + 1, blocksize) + totalBlock = [nbBlocks for _ in range(nbBlocks)] + + # In case we have a last incomplete block we add it as a last + # element in the arguments list + if lastRun == True: + startI = chain(startI, [intitemcount]) + stopI = chain(stopI, [itemcount]) + + mpLock = Lock() + + argList = zip(blockId, startI, stopI, totalBlock) + + with Pool( + processes=nbParBlock, initializer=pool_init, + initargs=(mpLock, )) as pool: + pool.map(workFunc, argList) + + return nbBlocks + + +# ========================================================================== +# ===== FPGA Logic Items data ============================================== +# For Artix 7 50T: +# - Total pips: 22002368 +# - Total tiles: 18055 +# - Total nodes: 1953452 +# For Kintex 7 70T: +# - Total pips: 29424910 +# - Total tiles: 24453 +# - Total nodes: 2663055 +# For Zynq 7 z010: +# - Total pips: 12462138 +# - Total tiles: 13440 +# - Total nodes: 1122477 +# ========================================================================= +# Dividing by about 64 over 4 core is not optimized but a default to run +# on most computer +# ========================================================================= + + +def main(argv): + parser = argparse.ArgumentParser() + parser.add_argument( + "-p", + "--nbPar", + help="Number of parallel instances of Vivado", + type=int, + default=4) + parser.add_argument( + "-t", + "--sizePipsBlock", + help="Define the number of pips to process per instance", + type=int, + default=340000) + args = parser.parse_args() + + nbParBlock = args.nbPar + blockPipsSize = args.sizePipsBlock + + pipscount = get_nb_pips() + nbPipsBlock = int(pipscount / blockPipsSize) + + if not os.path.exists("wires"): + os.mkdir("wires") + + print(" nbPar: " + str(nbParBlock) + " blockPipsSize: " + + str(blockPipsSize)) + + pipsFileCount = run_pool(pipscount, nbPipsBlock, blockPipsSize, nbParBlock, + start_pips) + + print("Work done !") + return 0 + + +if __name__ == "__main__": + sys.exit(main(sys.argv))