diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index e2ba47ec29..91104653f6 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -22,4 +22,5 @@ jobs: export FINN_ROOT=$(pwd) export FINN_BUILD_DIR=/tmp/finn_gha export FINN_INST_NAME=finn_gha + export FINN_SKIP_XRT_DOWNLOAD=1 ./run-docker.sh quicktest diff --git a/AUTHORS.rst b/AUTHORS.rst index 5a11497fc8..5ad2b26ac2 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -34,3 +34,6 @@ Contributors * Shashwat Khandelwal (@shashwat1198) * Ian Colbert (@i-colbert) * Rachit Garg (@rstar900) +* Christoph Berganski (@iksnagreb) +* Jonas Kuehle (@vopade) +* Aditya S (@Adityasrinivas24) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 2ceb1f4195..5126ed3ff4 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -31,6 +31,8 @@ FROM ubuntu:jammy-20230126 LABEL maintainer="Jakoba Petri-Koenig , Yaman Umuroglu " ARG XRT_DEB_VERSION="xrt_202220.2.14.354_22.04-amd64-xrt" +ARG SKIP_XRT +ARG LOCAL_XRT WORKDIR /workspace @@ -78,15 +80,19 @@ RUN cd verilator && \ make install # install XRT -RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb -RUN apt install -y /tmp/$XRT_DEB_VERSION.deb -RUN rm /tmp/$XRT_DEB_VERSION.deb +RUN if [ -z "$LOCAL_XRT" ] && [ -z "$SKIP_XRT" ];then \ + wget -U 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17' "https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb" -O /tmp/$XRT_DEB_VERSION.deb; fi + +COPY requirements.txt $XRT_DEB_VERSION.* /tmp/ + +RUN if [ -z "$SKIP_XRT" ];then \ + apt install -y /tmp/$XRT_DEB_VERSION.deb && \ + rm /tmp/$XRT_DEB_VERSION.deb; fi # versioned Python package requirements for FINN compiler # these are given in requirements.txt -COPY requirements.txt . -RUN pip install -r requirements.txt -RUN rm requirements.txt +RUN pip install -r /tmp/requirements.txt +RUN rm /tmp/requirements.txt # install PyTorch RUN pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 @@ -126,6 +132,9 @@ RUN pip install tokenize-rt==4.2.1 # pyverilator RUN pip install tclwrapper==0.0.1 +# assure that we have the right setuptools version +RUN pip install setuptools==68.2.2 + # extra environment variables for FINN compiler ENV VIVADO_IP_CACHE "/tmp/vivado_ip_cache" diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index 61c8f78665..c7500bcaa6 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -86,7 +86,7 @@ if [ -f "$VITIS_PATH/settings64.sh" ];then source $XILINX_XRT/setup.sh gecho "Found XRT at $XILINX_XRT" else - recho "XRT not found on $XILINX_XRT, did the installation fail?" + recho "XRT not found on $XILINX_XRT, did you skip the download or did the installation fail?" exit -1 fi else diff --git a/fetch-repos.sh b/fetch-repos.sh index 073c052d67..2033973f2a 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="fd61cfeebbdaba351abf7e9d54cd785d7776fa4f" -FINN_EXP_COMMIT="de99347e936d51715f5356a1b6c64e37b91c23c2" +FINN_EXP_COMMIT="0724be21111a21f0d81a072fccc1c446e053f851" BREVITAS_COMMIT="84f42259ec869eb151af4cb8a8b23ad925f493db" PYVERILATOR_COMMIT="ce0a08c20cb8c1d1e84181d6f392390f846adbd1" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" diff --git a/finn-rtllib/fifo/hdl/Q_srl.v b/finn-rtllib/fifo/hdl/Q_srl.v index 11cef604e0..d1ce33c41f 100644 --- a/finn-rtllib/fifo/hdl/Q_srl.v +++ b/finn-rtllib/fifo/hdl/Q_srl.v @@ -74,7 +74,8 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); parameter depth = 16; // - greatest #items in queue (2 <= depth <= 256) parameter width = 16; // - width of data (i_d, o_d) - parameter addrwidth = $clog2(depth); + localparam countwidth = $clog2(depth + 1); + localparam addrwidth = $clog2(depth); input clock; input reset; @@ -89,10 +90,10 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); input o_r; // - output stream ready wire o_b; // - output stream back-pressure - output [addrwidth:0] count; // - output number of elems in queue - output [addrwidth:0] maxcount; // - maximum observed count since reset + output [countwidth-1:0] count; // - output number of elems in queue + output [countwidth-1:0] maxcount; // - maximum observed count since reset - reg [addrwidth:0] maxcount_reg; // - maximum count seen until now + reg [countwidth-1:0] maxcount_reg; // - maximum count seen until now reg [addrwidth-1:0] addr, addr_, a_; // - SRL16 address // for data output reg shift_en_; // - SRL16 shift enable diff --git a/finn-rtllib/mvu/mvu_4sx4u.sv b/finn-rtllib/mvu/mvu_4sx4u.sv index 0ac2628ee5..ccb25380c8 100644 --- a/finn-rtllib/mvu/mvu_4sx4u.sv +++ b/finn-rtllib/mvu/mvu_4sx4u.sv @@ -36,8 +36,9 @@ module mvu_4sx4u #( int unsigned SIMD, int unsigned ACCU_WIDTH, - int unsigned VERSION = 1, + int unsigned VERSION = 1, // Version 1 (DSP48E1) *must* commit to NARROW_WEIGHTS bit SIGNED_ACTIVATIONS = 0, + bit NARROW_WEIGHTS = 0, // Weights from [-7:7] rather than [-8:7] bit FORCE_BEHAVIORAL = 0 )( // Global Control @@ -62,6 +63,55 @@ module mvu_4sx4u #( `endif FORCE_BEHAVIORAL; + //----------------------------------------------------------------------- + // Determine Lane Configuration + initial begin + if(!NARROW_WEIGHTS && (VERSION == 1)) begin + $error("%m: Need NARROW_WEIGHTS for DSP48E1."); + $finish; + end + end + + /** + * Lane Slicing + * Assumptions: + * - Internal lane widths differ, at most, by a single bit. + * - The rightmost lane (#0) has the maximum internal width. + * - The leftmost lane (#3) extends into the wide DSP accumulation path and + * is constrained by ACCU_WIDTH rather than the next lane. It doesn't have + * an external high extension. + * - The one but leftmost lane (#2) has the minimum internal width and, hence, + * the macimum external high extension. + */ + typedef int unsigned lane_offset_v[4:0]; + function lane_offset_v sliceLanes(); + unique case(VERSION) + 1: begin + return NARROW_WEIGHTS? + lane_offset_v'{ ACCU_WIDTH+21, 21, 14, 7, 0 } : + lane_offset_v'{ 0, 0, 0, 0, 0 }; // not supported + end + 2: begin + return NARROW_WEIGHTS? + lane_offset_v'{ ACCU_WIDTH+23, 23, 16, 8, 0 } : + lane_offset_v'{ ACCU_WIDTH+22, 22, 15, 8, 0 }; + end + endcase + endfunction : sliceLanes + localparam lane_offset_v OFFSETS = sliceLanes(); + + function int unsigned lo_width(input int unsigned i); + return OFFSETS[i+1] - OFFSETS[i]; + endfunction : lo_width + function int unsigned hi_width(input int unsigned i); + return 1 + $clog2(2**(ACCU_WIDTH-lo_width(i)-1)+SIMD); + endfunction : hi_width + localparam int unsigned LO_WIDTH_MAX = OFFSETS[1] - OFFSETS[0]; + localparam int unsigned HI_WIDTH_MAX = hi_width(2); + + localparam int unsigned A_WIDTH = 23 + 2*VERSION; // Width of A datapath + + // Compute the count of decendents for all nodes in the reduction trees. typedef int unsigned leave_load_t[2*SIMD-1]; function leave_load_t init_leave_loads(); automatic leave_load_t res; @@ -79,8 +129,6 @@ module mvu_4sx4u #( assign vld = L[5]; // Stages #1 - #3: DSP Lanes + cross-lane canaries duplicated with SIMD parallelism - localparam int unsigned D[4:0] = '{ ACCU_WIDTH+22, 22, 15, 8, 0 }; // Lane offsets - localparam int unsigned PIPE_COUNT = (PE+3)/4; for(genvar c = 0; c < PIPE_COUNT; c++) begin : genPipes @@ -88,7 +136,7 @@ module mvu_4sx4u #( localparam int unsigned PE_END = PE < 4*(c+1)? PE : 4*(c+1); localparam int unsigned PE_REM = 4*(c+1) - PE_END; - uwire [57:0] p3[SIMD]; + uwire [47:0] p3[SIMD]; uwire signed [ 1:0] h3[SIMD][3]; for(genvar s = 0; s < SIMD; s++) begin : genSIMD @@ -98,10 +146,10 @@ module mvu_4sx4u #( logic [26:0] dd; logic [ 1:0] xx[3:1]; if(1) begin : blkVectorize - uwire [3:0] ww[PE_END - PE_BEG]; + uwire signed [3:0] ww[PE_END - PE_BEG]; for(genvar pe = 0; pe < PE_END - PE_BEG; pe++) begin assign ww[pe] = w[PE_BEG + pe][s]; - if(pe) begin + if(pe > 0) begin if(BEHAVIORAL) assign xx[pe + PE_REM] = zero? 0 : ww[pe] * a[s]; `ifndef VERILATOR else begin @@ -123,8 +171,19 @@ module mvu_4sx4u #( dd = '0; aa = '0; for(int unsigned pe = 0; pe < PE_END - PE_BEG; pe++) begin - dd[D[pe + PE_REM]+:3] = ww[pe]; - aa[D[pe + PE_REM]+ 3] = ww[pe][3]; + automatic int unsigned ofs = OFFSETS[pe + PE_REM]; + dd[ofs+:3] = ww[pe]; + assert(!NARROW_WEIGHTS || rst || !en || zero || (ww[pe] != -8)) else begin + $warning("%m: Weight of -8 violates NARROW_WEIGHTS commitment."); + end + + // The sign of the weights are generally put on the subtracted A port. + // However, when coinciding with the actual sign bit position of the + // multiplier input path, it also goes onto the D input. This prevents + // sign extensions that may happen when a DSP primitive is auto-promoted + // to a newer generation. + if(ofs+3 == A_WIDTH-1) dd[ofs+3] = ww[pe][3]; + else aa[ofs+3] = ww[pe][3]; end end end : blkVectorize @@ -135,6 +194,7 @@ module mvu_4sx4u #( // rst can be only applied to AD and zero only to B // with the same effect as zeroing both. if(BEHAVIORAL) begin : genBehav + // Stage #1: Input Refine logic signed [17:0] B1 = 0; always_ff @(posedge clk) begin @@ -142,7 +202,7 @@ module mvu_4sx4u #( else if(en) B1 <= bb; end - logic signed [26:0] AD1 = 0; + logic signed [A_WIDTH-1:0] AD1 = 0; always_ff @(posedge clk) begin if(rst) AD1 <= 0; else if(en) AD1 <= dd - aa; @@ -429,14 +489,14 @@ module mvu_4sx4u #( X1 <= xx; X2 <= X1; foreach(X3[i]) begin - X3[i] <= X2[i] + (L[3]? 2'h0 : pp[D[i]+:2]); + X3[i] <= X2[i] + (L[3]? 2'h0 : pp[OFFSETS[i]+:2]); end end end // Derive actual cross-lane overflows for(genvar i = 0; i < 3; i++) begin - assign h3[s][i] = pp[D[i+1]+:2] - X3[i+1]; + assign h3[s][i] = pp[OFFSETS[i+1]+:2] - X3[i+1]; end assign p3[s] = pp; @@ -445,48 +505,59 @@ module mvu_4sx4u #( // Stage #4: Cross-SIMD Reduction // Count leaves reachable from each node - localparam leave_load_t LEAVE_LOAD = SIMD > 1 ? init_leave_loads() : '{ default: 1}; // SIMD=1 requires no adder tree, so zero-ing out, otherwise init_leave_loads ends up in infinite loop + localparam leave_load_t LEAVE_LOAD = SIMD > 1 ? init_leave_loads() : '{ default: 1 }; // SIMD=1 requires no adder tree, so zero-ing out, otherwise init_leave_loads ends up in infinite loop - uwire signed [ACCU_WIDTH -1:0] up4; - uwire signed [ACCU_WIDTH -8:0] hi4[3]; - uwire [$clog2(SIMD)+7:0] lo4[3]; + uwire signed [ACCU_WIDTH-1:0] up4; + uwire signed [ HI_WIDTH_MAX-1:0] hi4[3]; + uwire [$clog2(SIMD)+LO_WIDTH_MAX-1:0] lo4[3]; for(genvar i = 0; i < 4; i++) begin - localparam int unsigned LO_WIDTH = D[i+1] - D[i]; - localparam int unsigned HI_WIDTH = ACCU_WIDTH - LO_WIDTH; // Conclusive high part accumulation - if(i >= PE_REM && i < 3) begin : genHi - // Adder Tree across all SIMD high contributions, each from [-1:1] - uwire signed [2*SIMD-2:0][$clog2(1+SIMD):0] tree; - for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = h3[s][i]; - for(genvar n = 0; n < SIMD-1; n++) begin - // Sum truncated to actual maximum bit width at this node - uwire signed [$clog2(1+LEAVE_LOAD[n]):0] s = $signed(tree[2*n+1]) + $signed(tree[2*n+2]); - assign tree[n] = s; - end + if(i < 3) begin : genHi + if(i < PE_REM) assign hi4[i] = '0; + else begin + localparam int unsigned HI_WIDTH = hi_width(i); + + // Adder Tree across all SIMD high contributions, each from [-1:1] + uwire signed [2*SIMD-2:0][$clog2(1+SIMD):0] tree; + for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = h3[s][i]; + for(genvar n = 0; n < SIMD-1; n++) begin + // Sum truncated to actual maximum bit width at this node + uwire signed [$clog2(1+LEAVE_LOAD[n]):0] s = $signed(tree[2*n+1]) + $signed(tree[2*n+2]); + assign tree[n] = s; + end + + // High Sideband Accumulation + logic signed [HI_WIDTH-1:0] Hi4 = 0; + always_ff @(posedge clk) begin + if(rst) Hi4 <= 0; + else if(en) begin + automatic logic signed [HI_WIDTH:0] h = $signed(L[4]? 0 : Hi4) + $signed(tree[0]); + assert(h[HI_WIDTH] == h[HI_WIDTH-1]) else begin + $error("%m: Accumulation overflow for ACCU_WIDTH=%0d", ACCU_WIDTH); + $stop; + end + Hi4 <= h; + end + end + assign hi4[i] = Hi4; - // High Sideband Accumulation - logic signed [HI_WIDTH-1:0] Hi4 = 0; - always_ff @(posedge clk) begin - if(rst) Hi4 <= 0; - else if(en) Hi4 <= (L[4]? 0 : Hi4) + $signed(tree[0]); end - assign hi4[i] = Hi4; end : genHi - else if (i < 3) begin : genHiZero - assign hi4[i] = '0; - end : genHiZero - // Conclusive low part accumulation - if(i >= PE_REM) begin : blkLo + // Conclusive low part accumulation (all unsigned arithmetic) + if(i < PE_REM) assign lo4[i] = '0; + else begin : genLo + localparam int unsigned LO_WIDTH = lo_width(i); + // Adder Tree across all SIMD low contributions localparam int unsigned ROOT_WIDTH = $clog2(1 + SIMD*(2**LO_WIDTH-1)); uwire [2*SIMD-2:0][ROOT_WIDTH-1:0] tree; - for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = p3[s][D[i]+:LO_WIDTH]; + for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = p3[s][OFFSETS[i]+:LO_WIDTH]; for(genvar n = 0; n < SIMD-1; n++) begin // Sum truncated to actual maximum bit width at this node localparam int unsigned NODE_WIDTH = $clog2(1 + LEAVE_LOAD[n]*(2**LO_WIDTH-1)); - uwire [NODE_WIDTH-1:0] s = $signed(tree[2*n+1]) + $signed(tree[2*n+2]); + uwire [NODE_WIDTH-1:0] s = tree[2*n+1] + tree[2*n+2]; assign tree[n] = s; end @@ -498,10 +569,7 @@ module mvu_4sx4u #( if(i == 3) assign up4 = Lo4; else assign lo4[i] = Lo4; - end : blkLo - else begin : blkLoZero - assign lo4[i] = '0; - end : blkLoZero + end : genLo end @@ -511,9 +579,9 @@ module mvu_4sx4u #( if(rst) Res5 <= '{ default: 0 }; else if(en) begin Res5[3] <= up4 - hi4[2]; - Res5[2] <= $signed({ hi4[2], {(D[3] - D[2]){1'b0}} }) + $signed({ 1'b0, lo4[2] }) - hi4[1]; - Res5[1] <= $signed({ hi4[1], {(D[2] - D[1]){1'b0}} }) + $signed({ 1'b0, lo4[1] }) - hi4[0]; - Res5[0] <= $signed({ hi4[0], {(D[1] - D[0]){1'b0}} }) + $signed({ 1'b0, lo4[0] }); + Res5[2] <= $signed({ hi4[2], {(lo_width(2)){1'b0}} }) + $signed({ 1'b0, lo4[2] }) - hi4[1]; + Res5[1] <= $signed({ hi4[1], {(lo_width(1)){1'b0}} }) + $signed({ 1'b0, lo4[1] }) - hi4[0]; + Res5[0] <= $signed({ hi4[0], {(lo_width(0)){1'b0}} }) + $signed({ 1'b0, lo4[0] }); end end diff --git a/finn-rtllib/mvu/mvu_8sx8u_dsp48.sv b/finn-rtllib/mvu/mvu_8sx8u_dsp48.sv index fbf48784f0..414c4b0be0 100644 --- a/finn-rtllib/mvu/mvu_8sx8u_dsp48.sv +++ b/finn-rtllib/mvu/mvu_8sx8u_dsp48.sv @@ -91,7 +91,7 @@ module mvu_8sx8u_dsp48 #( localparam int unsigned PE_END = PE < 2*(c+1)? PE : 2*(c+1); localparam int unsigned PE_REM = 2*(c+1) - PE_END; - uwire [57:0] p3[SIMD]; + uwire [47:0] p3[SIMD]; uwire signed [ 1:0] h3[SIMD]; for(genvar s = 0; s < SIMD; s++) begin : genSIMD @@ -447,13 +447,30 @@ module mvu_8sx8u_dsp48 #( // Count leaves reachable from each node localparam leave_load_t LEAVE_LOAD = SIMD > 1 ? init_leave_loads() : '{ default: 0}; // SIMD=1 requires no adder tree, so zero-ing out, otherwise init_leave_loads ends up in infinite loop - uwire signed [ACCU_WIDTH -1:0] up4; - uwire signed [ACCU_WIDTH -SINGLE_PROD_WIDTH:0] hi4; - uwire [$clog2(SIMD)+SINGLE_PROD_WIDTH-1:0] lo4; + // Range of Cross-lane Contribution Tracked in Hi4 + /* + * - Assumption: ACCU_WIDTH bounds right lane value at any point in time. + * - The value x beyond the lane boundary is hence bounded by: + * -2^(w-1) <= x <= 2^(w-1)-1 with w = ACCU_WIDTH - D[1] + * - This value decomposes into the tracked overflow h and the overflow l + * from the low SIMD lane reduction with: + * 0 <= l <= SIMD + * - From x = l + h follows: + * h = x - l + * -2^(w-1) - SIMD <= h <= 2^(w-1)-1 + * - This required bit width of the two's complement representation of this + * signed value is determined by its lower bound to be at least: + * 1 + $clog2(2^(w-1)+SIMD) + */ + localparam int unsigned HI_WIDTH = 1 + $clog2(2**(ACCU_WIDTH-D[1]-1)+SIMD); + + uwire signed [ACCU_WIDTH -1:0] up4; + uwire signed [HI_WIDTH -1:0] hi4; + uwire [$clog2(SIMD)+D[1]-1:0] lo4; // Conclusive high part accumulation if(PE_REM == 0) begin : genHi - localparam int unsigned HI_WIDTH = ACCU_WIDTH - D[1]; + // Adder Tree across all SIMD high contributions, each from [-1:1] uwire signed [2*SIMD-2:0][$clog2(1+SIMD):0] tree; for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = h3[s]; @@ -466,8 +483,15 @@ module mvu_8sx8u_dsp48 #( // High Sideband Accumulation logic signed [HI_WIDTH-1:0] Hi4 = 0; always_ff @(posedge clk) begin - if(rst) Hi4 <= 0; - else if(en) Hi4 <= (L[4]? 0 : Hi4) + $signed(tree[0]); + if(rst) Hi4 <= 0; + else if(en) begin + automatic logic signed [HI_WIDTH:0] h = $signed(L[4]? 0 : Hi4) + $signed(tree[0]); + assert(h[HI_WIDTH] == h[HI_WIDTH-1]) else begin + $error("%m: Accumulation overflow for ACCU_WIDTH=%0d", ACCU_WIDTH); + $stop; + end + Hi4 <= h; + end end assign hi4 = Hi4; end : genHi @@ -479,14 +503,14 @@ module mvu_8sx8u_dsp48 #( localparam int unsigned LO_WIDTH = D[i+1] - D[i]; // Conclusive low part accumulation if(i >= PE_REM) begin : blkLo - // Adder Tree across all SIMD low contributions + // Adder Tree across all SIMD low contributions (all unsigned arithmetic) localparam int unsigned ROOT_WIDTH = $clog2(1 + SIMD*(2**LO_WIDTH-1)); uwire [2*SIMD-2:0][ROOT_WIDTH-1:0] tree; for(genvar s = 0; s < SIMD; s++) assign tree[SIMD-1+s] = p3[s][D[i]+:LO_WIDTH]; for(genvar n = 0; n < SIMD-1; n++) begin // Sum truncated to actual maximum bit width at this node localparam int unsigned NODE_WIDTH = $clog2(1 + LEAVE_LOAD[n]*(2**LO_WIDTH-1)); - uwire [NODE_WIDTH-1:0] s = $signed(tree[2*n+1]) + $signed(tree[2*n+2]); + uwire [NODE_WIDTH-1:0] s = tree[2*n+1] + tree[2*n+2]; assign tree[n] = s; end diff --git a/finn-rtllib/mvu/mvu_vvu_axi.sv b/finn-rtllib/mvu/mvu_vvu_axi.sv index 6498530113..35325abdf9 100644 --- a/finn-rtllib/mvu/mvu_vvu_axi.sv +++ b/finn-rtllib/mvu/mvu_vvu_axi.sv @@ -55,6 +55,7 @@ module mvu_vvu_axi #( int unsigned ACTIVATION_WIDTH, int unsigned WEIGHT_WIDTH, int unsigned ACCU_WIDTH, + bit NARROW_WEIGHTS = 0, bit SIGNED_ACTIVATIONS = 0, bit PUMPED_COMPUTE = 0, @@ -306,8 +307,22 @@ module mvu_vvu_axi #( .last(dsp_last), .zero(dsp_zero), .w(dsp_w), .a(dsp_a), .vld(dsp_vld), .p(dsp_p) ); - "mvu_4sx4u": - mvu_4sx4u #(.PE(PE), .SIMD(DSP_SIMD), .ACCU_WIDTH(ACCU_WIDTH), .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL)) core ( + "mvu_4sx4u_dsp48e1": + mvu_4sx4u #( + .PE(PE), .SIMD(DSP_SIMD), + .ACCU_WIDTH(ACCU_WIDTH), .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), .NARROW_WEIGHTS(NARROW_WEIGHTS), + .VERSION(1), .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL) + ) core ( + .clk(dsp_clk), .rst, .en(dsp_en), + .last(dsp_last), .zero(dsp_zero), .w(dsp_w), .a(dsp_a), + .vld(dsp_vld), .p(dsp_p) + ); + "mvu_4sx4u_dsp48e2": + mvu_4sx4u #( + .PE(PE), .SIMD(DSP_SIMD), + .ACCU_WIDTH(ACCU_WIDTH), .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), .NARROW_WEIGHTS(NARROW_WEIGHTS), + .VERSION(2), .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL) + ) core ( .clk(dsp_clk), .rst, .en(dsp_en), .last(dsp_last), .zero(dsp_zero), .w(dsp_w), .a(dsp_a), .vld(dsp_vld), .p(dsp_p) diff --git a/finn-rtllib/mvu/mvu_vvu_axi_wrapper.v b/finn-rtllib/mvu/mvu_vvu_axi_wrapper.v index 50c15c1b02..4edf676008 100644 --- a/finn-rtllib/mvu/mvu_vvu_axi_wrapper.v +++ b/finn-rtllib/mvu/mvu_vvu_axi_wrapper.v @@ -42,6 +42,7 @@ module $MODULE_NAME_AXI_WRAPPER$ #( parameter ACTIVATION_WIDTH = $ACTIVATION_WIDTH$, parameter WEIGHT_WIDTH = $WEIGHT_WIDTH$, parameter ACCU_WIDTH = $ACCU_WIDTH$, + parameter NARROW_WEIGHTS = $NARROW_WEIGHTS$, parameter SIGNED_ACTIVATIONS = $SIGNED_ACTIVATIONS$, parameter SEGMENTLEN = $SEGMENTLEN$, parameter FORCE_BEHAVIORAL = $FORCE_BEHAVIORAL$, @@ -77,7 +78,7 @@ module $MODULE_NAME_AXI_WRAPPER$ #( mvu_vvu_axi #( .IS_MVU(IS_MVU), .COMPUTE_CORE(COMPUTE_CORE), .PUMPED_COMPUTE(PUMPED_COMPUTE), .MW(MW), .MH(MH), .PE(PE), .SIMD(SIMD), - .ACTIVATION_WIDTH(ACTIVATION_WIDTH), .WEIGHT_WIDTH(WEIGHT_WIDTH), .ACCU_WIDTH(ACCU_WIDTH), + .ACTIVATION_WIDTH(ACTIVATION_WIDTH), .WEIGHT_WIDTH(WEIGHT_WIDTH), .ACCU_WIDTH(ACCU_WIDTH), .NARROW_WEIGHTS(NARROW_WEIGHTS), .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), .SEGMENTLEN(SEGMENTLEN), .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL) ) inst ( .ap_clk(ap_clk), diff --git a/finn-rtllib/mvu/tb/mvu_axi_tb.sv b/finn-rtllib/mvu/tb/mvu_axi_tb.sv index 4ed7b4bf5f..f16c40db34 100644 --- a/finn-rtllib/mvu/tb/mvu_axi_tb.sv +++ b/finn-rtllib/mvu/tb/mvu_axi_tb.sv @@ -37,18 +37,18 @@ module mvu_axi_tb(); // Matrix & parallelism config localparam bit IS_MVU = 1; localparam string COMPUTE_CORE = "mvu_4sx4u"; - localparam int unsigned MW = 120; - localparam int unsigned MH = 40; - localparam int unsigned SIMD = 20; - localparam int unsigned PE = 10; - localparam int unsigned SEGMENTLEN = 2.0; - localparam bit FORCE_BEHAVIORAL = 1; + localparam int unsigned MW = 96; + localparam int unsigned MH = 32; + localparam int unsigned SIMD = 48; + localparam int unsigned PE = 16; + localparam int unsigned SEGMENTLEN = 2; + localparam bit FORCE_BEHAVIORAL = 0; localparam bit M_REG_LUT = 1; // Bit-width config localparam int unsigned ACTIVATION_WIDTH = 4; localparam int unsigned WEIGHT_WIDTH = 4; localparam int unsigned ACCU_WIDTH = ACTIVATION_WIDTH+WEIGHT_WIDTH+$clog2(MW); - localparam bit SIGNED_ACTIVATIONS = 0; + localparam bit SIGNED_ACTIVATIONS = 1; // Simulation constants localparam int unsigned NF = MH/PE; localparam int unsigned SF = MW/SIMD; @@ -70,7 +70,7 @@ module mvu_axi_tb(); uwire ap_clk = clk; - // Generate activations + // Generate shared Activations typedef logic [SIMD-1:0][ACTIVATION_WIDTH-1:0] activation_t; typedef activation_t activation_vector_t[SF]; @@ -82,148 +82,174 @@ module mvu_axi_tb(); activation_vector_t ACTIVATIONS = init_ACTIVATIONS(); - struct { - activation_t dat; - logic vld; - logic rdy; - } activations; - - initial begin - activations.vld = 0; - activations.dat = 'X; - @(posedge clk iff ap_rst_n); - - for (int i=0; i= 0; - @(posedge clk); - end while (!(activations.vld === 1 && activations.rdy === 1)); + // Run parallel instances across DSP versions and NARROW_WEIGHTS + bit [2:1][1:0] done = { 2: 2'b00, 1: 2'b01 }; // [ver][narrow] + always_comb begin + if(&done) begin + $display("Test completed."); + $finish; end - - activations.vld <= 0; - activations.dat <= 'x; end - // Generate weights - typedef logic [PE-1:0][SIMD-1:0][WEIGHT_WIDTH-1:0] weight_t; - typedef weight_t weight_matrix_t[NF][SF]; - - function weight_matrix_t init_WEIGHTS; - automatic weight_matrix_t res; - std::randomize(res); - return res; - endfunction : init_WEIGHTS; - - weight_matrix_t WEIGHTS = init_WEIGHTS(); - - struct { - weight_t dat; - logic vld; - logic rdy; - } weights; - - initial begin - weights.vld = 0; - weights.dat = 'X; - @(posedge clk iff ap_rst_n); - - weights.vld <= 1; - for (int i=0; i1 - // Hence, we need to 'untangle' the input stream, i.e. [..][SIMD*PE][..] --> [..][PE][SIMD][..] - // Note that for each 'SIMD' (S) and 'PE' (P) element, we have something like: - // (S_0, P_0), ..., (S_0, P_i), (S_1, P_0), ..., (S_1, P_i), ..., (S_i, P_i) which we need to 'untangle' to - // (S_0, P_0), ..., (S_i, P_0), (S_0, P_1), ..., (S_i,, P_1), ..., (S_i, P_i) - for (int i = 0; i < NF; i++) begin - for (int j = 0; j < SF; j++) begin - for (int k = 0; k < PE; k++) begin - for (int l = 0; l < SIMD; l++) begin - if (SIGNED_ACTIVATIONS) - res[i][k] = $signed(res[i][k]) + $signed(a[j][l]) * $signed(w[i][j][k][l]); - else - res[i][k] = $signed(res[i][k]) + $signed({1'b0, a[j][l]}) * $signed(w[i][j][k][l]); + // Instance-specifc Weights (may be narrow) + typedef logic [PE-1:0][SIMD-1:0][WEIGHT_WIDTH-1:0] weight_t; + typedef weight_t weight_matrix_t[NF][SF]; + + function weight_matrix_t init_WEIGHTS; + automatic weight_matrix_t res; + std::randomize(res); + if(narrow) begin // increment all weights of -8 + for(int unsigned nf = 0; nf < NF; nf++) begin + for(int unsigned sf = 0; sf < SF; sf++) begin + for(int unsigned pe = 0; pe < PE; pe++) begin + for(int unsigned simd = 0; simd < SIMD; simd++) begin + if(res[nf][sf][pe][simd] == (1 << (WEIGHT_WIDTH-1))) begin + res[nf][sf][pe][simd]++; + end + end + end end end end + return res; + endfunction : init_WEIGHTS; + + weight_matrix_t WEIGHTS = init_WEIGHTS(); + + // Weight Feed + struct { + weight_t dat; + logic vld; + logic rdy; + } weights; + + initial begin + weights.vld = 0; + weights.dat = 'X; + @(posedge clk iff ap_rst_n); + + weights.vld <= 1; + for(int unsigned i = 0; i < NF; i++) begin + for(int unsigned j = 0; j < SF; j++) begin + weights.dat <= WEIGHTS[i][j]; + @(posedge clk iff weights.rdy); + end + end + weights.vld <= 0; + weights.dat <= 'x; end - return res; - endfunction : check_output; - output_vector_t GOLDEN_OUTPUT = check_output(ACTIVATIONS, WEIGHTS); - - int unsigned NF_CNT = 0; - initial begin - outputs.rdy = 0; - while (NF_CNT < NF) begin - // Loop until both rdy & vld are asserted - do begin - outputs.rdy <= $urandom()%7 >= 0; - @(posedge clk iff ap_rst_n); - end while (!(outputs.rdy === 1 && outputs.vld === 1)); - - // Compare produced outputs against golden outputs - foreach(outputs.dat[i]) begin - assert ($signed(outputs.dat[i]) == $signed(GOLDEN_OUTPUT[NF_CNT][i])) $display(">>> [t=%0t] Test succeeded (NF=%0d)! Computed / GOLDEN = %0d / %0d", $time, NF_CNT, $signed(outputs.dat[i]), $signed(GOLDEN_OUTPUT[NF_CNT][i])); - else begin - $error(">>> [t=%0t] TEST failed (NF=%0d)! Computed / GOLDEN = %0d / %0d", $time, NF_CNT, $signed(outputs.dat[i]), $signed(GOLDEN_OUTPUT[NF_CNT][i])); - $stop; + // Function to compute golden output + // a: [SF][SIMD-1:0][ACTIVATION_WIDTH-1:0] + // a: [SF][PE*SIMD-1:0][ACTIVATION_WIDTH-1:0] + // w: [NF][SF][PE-1:0][SIMD-1:0][WEIGHT_WIDTH-1:0] + typedef logic signed [PE-1:0][ACCU_WIDTH-1:0] output_t; + typedef output_t output_vector_t [NF]; + + struct { + output_t dat; + logic vld; + logic rdy; + } outputs; + + function output_vector_t check_output(activation_vector_t a, weight_matrix_t w); + automatic output_vector_t res = '{default: 0}; + // The input stream will have the channels interleaved for VVU when PE>1 + // Hence, we need to 'untangle' the input stream, i.e. [..][SIMD*PE][..] --> [..][PE][SIMD][..] + // Note that for each 'SIMD' (S) and 'PE' (P) element, we have something like: + // (S_0, P_0), ..., (S_0, P_i), (S_1, P_0), ..., (S_1, P_i), ..., (S_i, P_i) which we need to 'untangle' to + // (S_0, P_0), ..., (S_i, P_0), (S_0, P_1), ..., (S_i,, P_1), ..., (S_i, P_i) + for (int i = 0; i < NF; i++) begin + for (int j = 0; j < SF; j++) begin + for (int k = 0; k < PE; k++) begin + for (int l = 0; l < SIMD; l++) begin + if (SIGNED_ACTIVATIONS) + res[i][k] = $signed(res[i][k]) + $signed(a[j][l]) * $signed(w[i][j][k][l]); + else + res[i][k] = $signed(res[i][k]) + $signed({1'b0, a[j][l]}) * $signed(w[i][j][k][l]); + end + end + end + end + return res; + endfunction : check_output; + + output_vector_t GOLDEN_OUTPUT = check_output(ACTIVATIONS, WEIGHTS); + initial begin + outputs.rdy = 0; + @(posedge clk iff ap_rst_n); + + for(int unsigned nf = 0; nf < NF; nf++) begin + while($urandom()%13 == 0) @(posedge clk); + outputs.rdy <= 1; + @(posedge clk iff outputs.vld); + outputs.rdy <= 0; + + // Compare produced outputs against golden outputs + foreach(outputs.dat[i]) begin + assert ($signed(outputs.dat[i]) == $signed(GOLDEN_OUTPUT[nf][i])) begin + $display(">>> [t=%0t] Test succeeded (nf=%0d)! Computed / GOLDEN = %0d / %0d", $time, nf, $signed(outputs.dat[i]), $signed(GOLDEN_OUTPUT[nf][i])); + end + else begin + $error(">>> [t=%0t] TEST failed (nf=%0d)! Computed / GOLDEN = %0d / %0d", $time, nf, $signed(outputs.dat[i]), $signed(GOLDEN_OUTPUT[nf][i])); + $stop; + end end end - NF_CNT += 1; + done[ver][narrow] = 1; end - $finish; - end - - // Instantiate DUT - mvu_vvu_axi #( - .IS_MVU(IS_MVU), - .COMPUTE_CORE(COMPUTE_CORE), - .MW(MW), - .MH(MH), - .PE(PE), - .SIMD(SIMD), - .ACTIVATION_WIDTH(ACTIVATION_WIDTH), - .WEIGHT_WIDTH(WEIGHT_WIDTH), - .ACCU_WIDTH(ACCU_WIDTH), - .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), - .SEGMENTLEN(SEGMENTLEN), - .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL), - .M_REG_LUT(M_REG_LUT) - ) - dut ( - .ap_clk, .ap_rst_n, .s_axis_weights_tdata({ {WEIGHT_WIDTH_BA_DELTA{1'b0}}, weights.dat }), .s_axis_weights_tvalid(weights.vld), - .s_axis_weights_tready(weights.rdy), .s_axis_input_tdata({ {ACTIVATION_WIDTH_BA_DELTA{1'b0}}, activations.dat }), .s_axis_input_tvalid(activations.vld), - .s_axis_input_tready(activations.rdy), .m_axis_output_tdata(outputs.dat), .m_axis_output_tvalid(outputs.vld), - .m_axis_output_tready(outputs.rdy) - ); + // Instantiate DUT + mvu_vvu_axi #( + .IS_MVU(IS_MVU), + .COMPUTE_CORE(ver == 1? "mvu_4sx4u_dsp48e1" : "mvu_4sx4u_dsp48e2"), + .MW(MW), + .MH(MH), + .PE(PE), + .SIMD(SIMD), + .ACTIVATION_WIDTH(ACTIVATION_WIDTH), + .WEIGHT_WIDTH(WEIGHT_WIDTH), + .ACCU_WIDTH(ACCU_WIDTH), + .NARROW_WEIGHTS(narrow), + .SIGNED_ACTIVATIONS(SIGNED_ACTIVATIONS), + .SEGMENTLEN(SEGMENTLEN), + .FORCE_BEHAVIORAL(FORCE_BEHAVIORAL), + .M_REG_LUT(M_REG_LUT) + ) + dut ( + .ap_clk, .ap_rst_n, .s_axis_weights_tdata({ {WEIGHT_WIDTH_BA_DELTA{1'b0}}, weights.dat }), .s_axis_weights_tvalid(weights.vld), + .s_axis_weights_tready(weights.rdy), .s_axis_input_tdata({ {ACTIVATION_WIDTH_BA_DELTA{1'b0}}, activations.dat }), .s_axis_input_tvalid(activations.vld), + .s_axis_input_tready(activations.rdy), .m_axis_output_tdata(outputs.dat), .m_axis_output_tvalid(outputs.vld), + .m_axis_output_tready(outputs.rdy) + ); + + end : genNarrowWide + end : genVersion endmodule : mvu_axi_tb diff --git a/finn-rtllib/thresholding/hdl/thresholding_axi.sv b/finn-rtllib/thresholding/hdl/thresholding_axi.sv index 5c7182b214..39756e5c2b 100644 --- a/finn-rtllib/thresholding/hdl/thresholding_axi.sv +++ b/finn-rtllib/thresholding/hdl/thresholding_axi.sv @@ -39,8 +39,9 @@ *****************************************************************************/ module thresholding_axi #( - int unsigned N, // output precision - int unsigned K, // input/threshold precision + int unsigned N, // output precision + int unsigned WI, // input precision + int unsigned WT, // threshold precision int unsigned C = 1, // Channels int unsigned PE = 1, // Processing Parallelism, requires C = k*PE @@ -96,7 +97,7 @@ module thresholding_axi #( //- AXI Stream - Input -------------- output logic s_axis_tready, input logic s_axis_tvalid, - input logic [((PE*K+7)/8)*8-1:0] s_axis_tdata, + input logic [((PE*WI+7)/8)*8-1:0] s_axis_tdata, //- AXI Stream - Output ------------- input logic m_axis_tready, @@ -109,13 +110,13 @@ module thresholding_axi #( uwire cfg_en; uwire cfg_we; uwire [ADDR_BITS-3:0] cfg_a; - uwire [K -1:0] cfg_d; + uwire [WT -1:0] cfg_d; uwire cfg_rack; - uwire [K -1:0] cfg_q; + uwire [WT -1:0] cfg_q; if(USE_AXILITE) begin uwire [ADDR_BITS-1:0] cfg_a0; - axi4lite_if #(.ADDR_WIDTH(ADDR_BITS), .DATA_WIDTH(32), .IP_DATA_WIDTH(K)) axi ( + axi4lite_if #(.ADDR_WIDTH(ADDR_BITS), .DATA_WIDTH(32), .IP_DATA_WIDTH(WT)) axi ( .aclk(ap_clk), .aresetn(ap_rst_n), .awready(s_axilite_AWREADY), .awvalid(s_axilite_AWVALID), .awaddr(s_axilite_AWADDR), .awprot('x), @@ -143,10 +144,42 @@ module thresholding_axi #( assign cfg_d = 'x; end + //----------------------------------------------------------------------- + // Cast Inputs into Threshold Data Type + uwire [PE-1:0][WT-1:0] idat; + for(genvar pe = 0; pe < PE; pe++) begin + if(WT == WI) begin : genCopy + assign idat[pe] = s_axis_tdata[pe*WI+:WI]; + end : genCopy + else begin + initial begin + if(FPARG) begin + $error("%m: Can't cast floating-point type."); + $finish; + end + end + + if(WT > WI) begin : genWiden + assign idat[pe] = { {(WT-WI){SIGNED? s_axis_tdata[(pe+1)*WI-1] : 1'b0}}, s_axis_tdata[pe*WI+:WI] }; + end : genWiden + else begin : genNarrow + // Saturate for clipping inputs + if(!SIGNED) begin + assign idat[pe] = |s_axis_tdata[pe*WI+WT+:WI-WT]? '1 : s_axis_tdata[pe*WI+:WT]; + end + else begin + assign idat[pe] = + (s_axis_tdata[pe*WI+WT+:WI-WT] == '1) || (s_axis_tdata[pe*WI+WT+:WI-WT] == '0)? s_axis_tdata[pe*WI+:WT] : + {s_axis_tdata[(pe+1)*WI-1], {(WT-1){!s_axis_tdata[(pe+1)*WI-1]}}}; + end + end : genNarrow + end + end + //----------------------------------------------------------------------- // Kernel Implementation thresholding #( - .N(N), .K(K), .C(C), .PE(PE), + .N(N), .K(WT), .C(C), .PE(PE), .SIGNED(SIGNED), .FPARG(FPARG), .BIAS(BIAS), .THRESHOLDS_PATH(THRESHOLDS_PATH), .USE_CONFIG(USE_AXILITE), .DEPTH_TRIGGER_URAM(DEPTH_TRIGGER_URAM), .DEPTH_TRIGGER_BRAM(DEPTH_TRIGGER_BRAM), @@ -157,7 +190,7 @@ module thresholding_axi #( .cfg_en, .cfg_we, .cfg_a, .cfg_d, .cfg_rack, .cfg_q, - .irdy(s_axis_tready), .ivld(s_axis_tvalid), .idat(s_axis_tdata), + .irdy(s_axis_tready), .ivld(s_axis_tvalid), .idat, .ordy(m_axis_tready), .ovld(m_axis_tvalid), .odat(m_axis_tdata) ); diff --git a/finn-rtllib/thresholding/hdl/thresholding_template_wrapper.v b/finn-rtllib/thresholding/hdl/thresholding_template_wrapper.v index f35db156f6..49a1f2bd8b 100644 --- a/finn-rtllib/thresholding/hdl/thresholding_template_wrapper.v +++ b/finn-rtllib/thresholding/hdl/thresholding_template_wrapper.v @@ -33,8 +33,9 @@ */ module $MODULE_NAME_AXI_WRAPPER$ #( - parameter N = $N$, // output precision - parameter K = $M$, // input/threshold precision + parameter N = $N$, // output precision + parameter WI = $WI$, // input precision + parameter WT = $WT$, // threshold precision parameter C = $C$, // Channels parameter PE = $PE$, // Processing Parallelism, requires C = k*PE @@ -87,7 +88,7 @@ module $MODULE_NAME_AXI_WRAPPER$ #( //- AXI Stream - Input -------------- output in0_V_TREADY, input in0_V_TVALID, - input [((PE*K+7)/8)*8-1:0] in0_V_TDATA, + input [((PE*WI+7)/8)*8-1:0] in0_V_TDATA, //- AXI Stream - Output ------------- input out_V_TREADY, @@ -96,7 +97,7 @@ module $MODULE_NAME_AXI_WRAPPER$ #( ); thresholding_axi #( - .N(N), .K(K), .C(C), .PE(PE), + .N(N), .WI(WI), .WT(WT), .C(C), .PE(PE), .SIGNED(SIGNED), .FPARG(FPARG), .BIAS(BIAS), diff --git a/finn-rtllib/thresholding/sim/thresholding_axi_tb.sv b/finn-rtllib/thresholding/sim/thresholding_axi_tb.sv index 429fb7776f..cfd875f5c4 100644 --- a/finn-rtllib/thresholding/sim/thresholding_axi_tb.sv +++ b/finn-rtllib/thresholding/sim/thresholding_axi_tb.sv @@ -110,7 +110,7 @@ module thresholding_axi_tb #( uwire ovld; uwire [PE-1:0][N-1:0] odat; - thresholding_axi #(.N(N), .K(K), .C(C), .PE(PE), .SIGNED(0), .USE_AXILITE(1)) dut ( + thresholding_axi #(.N(N), .WI(K), .WT(K), .C(C), .PE(PE), .SIGNED(0), .USE_AXILITE(1)) dut ( .ap_clk(clk), .ap_rst_n(!rst), // Configuration diff --git a/notebooks/advanced/3_folding.ipynb b/notebooks/advanced/3_folding.ipynb index 8c7b97d6c6..e9527a2ef7 100644 --- a/notebooks/advanced/3_folding.ipynb +++ b/notebooks/advanced/3_folding.ipynb @@ -159,6 +159,7 @@ "metadata": {}, "outputs": [], "source": [ + "from functools import partial\n", "from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer\n", "from finn.analysis.fpgadataflow.res_estimation import res_estimation" ] @@ -216,7 +217,7 @@ "metadata": {}, "outputs": [], "source": [ - "res_dict = model.analysis(res_estimation)\n", + "res_dict = model.analysis(partial(res_estimation, fpgapart=\"xc7z020clg400-1\"))\n", "res_dict" ] }, @@ -363,7 +364,7 @@ "metadata": {}, "outputs": [], "source": [ - "res_dict_updated = model.analysis(res_estimation)\n", + "res_dict_updated = model.analysis(partial(res_estimation, fpgapart=\"xc7z020clg400-1\"))\n", "res_dict_updated" ] }, @@ -567,7 +568,7 @@ "from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers\n", "\n", "model_updated = model_updated.transform(InsertDWC())\n", - "model_updated = model_updated.transform(SpecializeLayers())\n", + "model_updated = model_updated.transform(SpecializeLayers(\"xc7z020clg400-1\"))\n", "model_updated = model_updated.transform(GiveUniqueNodeNames())" ] }, @@ -596,7 +597,7 @@ "outputs": [], "source": [ "model_dwc = ModelWrapper(\"cybsec_DWC.onnx\")\n", - "res_dict_dwc = model_dwc.analysis(res_estimation)\n", + "res_dict_dwc = model_dwc.analysis(partial(res_estimation, fpgapart=\"xc7z020clg400-1\"))\n", "res_dict_dwc" ] }, diff --git a/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb b/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb index 5c2f10310f..1eaaeb138a 100644 --- a/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb +++ b/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb @@ -277,7 +277,7 @@ "metadata": {}, "outputs": [], "source": [ - "model = ModelWrapper(export_onnx_path_cleaned)\n", + "model = ModelWrapper(export_onnx_path_converted)\n", "input_dict = {\"global_in\": nph.to_array(input_tensor)}\n", "output_dict = oxe.execute_onnx(model, input_dict)\n", "produced_finn = output_dict[list(output_dict.keys())[0]]\n", diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 3141d54ddf..8b8cff8ee9 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -282,6 +282,12 @@ "metadata": {}, "outputs": [], "source": [ + "from finn.util.basic import pynq_part_map\n", + "# change this if you have a different PYNQ board, see list above\n", + "pynq_board = \"Pynq-Z1\"\n", + "fpga_part = pynq_part_map[pynq_board]\n", + "target_clk_ns = 10\n", + "\n", "import finn.transformation.fpgadataflow.convert_to_hw_layers as to_hw\n", "from finn.transformation.fpgadataflow.create_dataflow_partition import (\n", " CreateDataflowPartition,\n", @@ -314,7 +320,7 @@ "# save the dataflow partition with a different name for easier access\n", "# and specialize the layers to HLS variants\n", "dataflow_model = ModelWrapper(dataflow_model_filename)\n", - "dataflow_model = dataflow_model.transform(SpecializeLayers())\n", + "dataflow_model = dataflow_model.transform(SpecializeLayers(fpga_part))\n", "dataflow_model.save(build_dir + \"/end2end_cnv_w1a1_dataflow_model.onnx\")" ] }, @@ -432,12 +438,9 @@ "metadata": {}, "outputs": [], "source": [ - "test_pynq_board = \"Pynq-Z1\"\n", - "target_clk_ns = 10\n", - "\n", "from finn.transformation.fpgadataflow.make_zynq_proj import ZynqBuild\n", "model = ModelWrapper(build_dir+\"/end2end_cnv_w1a1_folded.onnx\")\n", - "model = model.transform(ZynqBuild(platform = test_pynq_board, period_ns = target_clk_ns))" + "model = model.transform(ZynqBuild(platform = pynq_board, period_ns = target_clk_ns))" ] }, { diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index bbaa74dbff..675ba23d2d 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -547,6 +547,36 @@ "thresh_node_inst.set_nodeattr(\"preferred_impl_style\", \"hls\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll define two helper variables that describe the Xilinx FPGA part name and the PYNQ board name that we are targeting." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# print the names of the supported PYNQ boards\n", + "from finn.util.basic import pynq_part_map\n", + "print(pynq_part_map.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# change this if you have a different PYNQ board, see list above\n", + "pynq_board = \"Pynq-Z1\"\n", + "fpga_part = pynq_part_map[pynq_board]\n", + "target_clk_ns = 10" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -561,7 +591,7 @@ "outputs": [], "source": [ "from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers\n", - "model = model.transform(SpecializeLayers())\n", + "model = model.transform(SpecializeLayers(fpga_part))\n", "\n", "model.save(build_dir+\"/tfc_w1_a1_specialize_layers.onnx\")\n", "showInNetron(build_dir+\"/tfc_w1_a1_specialize_layers.onnx\")" @@ -687,32 +717,7 @@ "source": [ "## 3. Hardware Build \n", "\n", - "We're finally ready to start generating hardware from our network. Depending on whether you want to target a Zynq or Alveo platform, FINN offers two transformations to build the accelerator, integrate into an appropriate shell and build a bitfile. These are `ZynqBuild` and `VitisBuild` for Zynq and Alveo, respectively. In this notebook we'll demonstrate the `ZynqBuild` as these boards are more common and it's much faster to complete bitfile generation for the smaller FPGAs found on them.\n", - "\n", - "As we will be dealing with FPGA synthesis tools in these tasks, we'll define two helper variables that describe the Xilinx FPGA part name and the PYNQ board name that we are targeting." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# print the names of the supported PYNQ boards\n", - "from finn.util.basic import pynq_part_map\n", - "print(pynq_part_map.keys())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# change this if you have a different PYNQ board, see list above\n", - "pynq_board = \"Pynq-Z1\"\n", - "fpga_part = pynq_part_map[pynq_board]\n", - "target_clk_ns = 10" + "We're finally ready to start generating hardware from our network. Depending on whether you want to target a Zynq or Alveo platform, FINN offers two transformations to build the accelerator, integrate into an appropriate shell and build a bitfile. These are `ZynqBuild` and `VitisBuild` for Zynq and Alveo, respectively. In this notebook we'll demonstrate the `ZynqBuild` as these boards are more common and it's much faster to complete bitfile generation for the smaller FPGAs found on them." ] }, { diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb index a07a8d2254..aacd12ef05 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb @@ -396,7 +396,7 @@ "child_model = child_model.transform(InsertDWC()) \n", "child_model = child_model.transform(InsertFIFO(create_shallow_fifos=True))\n", "# DWC and FIFOs need to be specialized to either HLS or RTL variants\n", - "child_model = child_model.transform(SpecializeLayers())\n", + "child_model = child_model.transform(SpecializeLayers(test_fpga_part))\n", "child_model.save(build_dir + \"/test.onnx\");\n", "child_model = child_model.transform(GiveUniqueNodeNames())\n", "child_model = child_model.transform(PrepareIP(test_fpga_part, target_clk_ns))\n", diff --git a/requirements.txt b/requirements.txt index c2973f9432..d4ca45cb37 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,6 @@ psutil==5.9.4 pyscaffold==4.4 scipy==1.10.1 setupext-janitor>=1.1.2 -setuptools==68.2.2 sigtools==4.0.1 toposort==1.7.0 vcdvcd==1.0.5 diff --git a/run-docker.sh b/run-docker.sh index e732492728..88fabff2fa 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -100,6 +100,8 @@ SCRIPTPATH=$(dirname "$SCRIPT") : ${NVIDIA_VISIBLE_DEVICES=""} : ${DOCKER_BUILDKIT="1"} : ${FINN_SINGULARITY=""} +: ${FINN_SKIP_XRT_DOWNLOAD=""} +: ${FINN_XRT_PATH=""} DOCKER_INTERACTIVE="" @@ -181,14 +183,27 @@ if [ "$FINN_SKIP_DEP_REPOS" = "0" ]; then ./fetch-repos.sh fi +# If xrt path given, copy .deb file to this repo +# Be aware that we assume a certain name of the xrt deb version +if [ -d "$FINN_XRT_PATH" ];then + cp $FINN_XRT_PATH/$XRT_DEB_VERSION.deb . + export LOCAL_XRT=1 +fi + # Build the FINN Docker image if [ "$FINN_DOCKER_PREBUILT" = "0" ] && [ -z "$FINN_SINGULARITY" ]; then # Need to ensure this is done within the finn/ root folder: OLD_PWD=$(pwd) cd $SCRIPTPATH - docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --tag=$FINN_DOCKER_TAG $FINN_DOCKER_BUILD_EXTRA . + docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --build-arg SKIP_XRT=$FINN_SKIP_XRT_DOWNLOAD --build-arg LOCAL_XRT=$LOCAL_XRT --tag=$FINN_DOCKER_TAG $FINN_DOCKER_BUILD_EXTRA . cd $OLD_PWD fi + +# Remove local xrt.deb file from repo +if [ ! -z "$LOCAL_XRT" ];then + rm $XRT_DEB_VERSION.deb +fi + # Launch container with current directory mounted # important to pass the --init flag here for correct Vivado operation, see: # https://stackoverflow.com/questions/55733058/vivado-synthesis-hangs-in-docker-container-spawned-by-jenkins diff --git a/src/finn/analysis/fpgadataflow/post_synth_res.py b/src/finn/analysis/fpgadataflow/post_synth_res.py index 7b65b60fa7..f7a3e6e2ba 100644 --- a/src/finn/analysis/fpgadataflow/post_synth_res.py +++ b/src/finn/analysis/fpgadataflow/post_synth_res.py @@ -58,14 +58,13 @@ def post_synth_res(model, override_synth_report_filename=None): else: raise Exception("Please run synthesis first") - # TODO build these indices based on table headers instead of harcoding restype_to_ind_default = { "LUT": 2, "SRL": 5, "FF": 6, "BRAM_36K": 7, "BRAM_18K": 8, - "DSP48": 9, + "DSP": 10, } restype_to_ind_vitis = { "LUT": 4, @@ -74,13 +73,36 @@ def post_synth_res(model, override_synth_report_filename=None): "BRAM_36K": 9, "BRAM_18K": 10, "URAM": 11, - "DSP48": 12, + "DSP": 12, } - if model.get_metadata_prop("platform") == "alveo": - restype_to_ind = restype_to_ind_vitis + # format: (human_readable_name_in_report, canonical_name) + res_types_to_search = [ + ("Total LUTs", "LUT"), + ("SRLs", "SRL"), + ("FFs", "FF"), + ("RAMB36", "BRAM_36K"), + ("RAMB18", "BRAM_18K"), + ("URAM", "URAM"), + ("DSP Blocks", "DSP"), + ] + + # try to infer resource type to table index by + # looking at the names in headings + header_row = root.findall(".//*[@contents='Instance']/..") + if header_row != []: + headers = [x.attrib["contents"] for x in list(header_row[0])] + restype_to_ind = {} + for res_type_name, res_type in res_types_to_search: + if res_type_name in headers: + restype_to_ind[res_type] = headers.index(res_type_name) else: - restype_to_ind = restype_to_ind_default + # could not infer resource types from header + # fall back to default indices + if model.get_metadata_prop("platform") == "alveo": + restype_to_ind = restype_to_ind_vitis + else: + restype_to_ind = restype_to_ind_default def get_instance_stats(inst_name): row = root.findall(".//*[@contents='%s']/.." % inst_name) diff --git a/src/finn/analysis/fpgadataflow/res_estimation.py b/src/finn/analysis/fpgadataflow/res_estimation.py index a6be1f1f53..fb12eed837 100644 --- a/src/finn/analysis/fpgadataflow/res_estimation.py +++ b/src/finn/analysis/fpgadataflow/res_estimation.py @@ -31,7 +31,7 @@ from finn.util.fpgadataflow import is_hls_node, is_rtl_node -def res_estimation(model): +def res_estimation(model, fpgapart): """Estimates the resources needed for the given model. Ensure that all nodes have unique names (by calling the GiveUniqueNodeNames transformation) prior to calling this analysis pass to ensure all nodes are @@ -43,12 +43,12 @@ def res_estimation(model): for node in model.graph.node: if is_hls_node(node) or is_rtl_node(node): inst = registry.getCustomOp(node) - res_dict[node.name] = inst.node_res_estimation() + res_dict[node.name] = inst.node_res_estimation(fpgapart) return res_dict -def res_estimation_complete(model): +def res_estimation_complete(model, fpgapart): """Estimates the resources needed for the given model and all values for resource-related switches. Ensure that all nodes have unique names (by calling the GiveUniqueNodeNames @@ -66,21 +66,21 @@ def res_estimation_complete(model): orig_restype = inst.get_nodeattr("resType") res_dict[node.name] = [] inst.set_nodeattr("resType", "dsp") - res_dict[node.name].append(inst.node_res_estimation()) + res_dict[node.name].append(inst.node_res_estimation(fpgapart)) inst.set_nodeattr("resType", "lut") - res_dict[node.name].append(inst.node_res_estimation()) + res_dict[node.name].append(inst.node_res_estimation(fpgapart)) inst.set_nodeattr("resType", orig_restype) elif op_type.startswith("ConvolutionInputGenerator"): orig_ramstyle = inst.get_nodeattr("ram_style") res_dict[node.name] = [] inst.set_nodeattr("ram_style", "block") - res_dict[node.name].append(inst.node_res_estimation()) + res_dict[node.name].append(inst.node_res_estimation(fpgapart)) inst.set_nodeattr("ram_style", "distributed") - res_dict[node.name].append(inst.node_res_estimation()) + res_dict[node.name].append(inst.node_res_estimation(fpgapart)) inst.set_nodeattr("ram_style", "ultra") - res_dict[node.name].append(inst.node_res_estimation()) + res_dict[node.name].append(inst.node_res_estimation(fpgapart)) inst.set_nodeattr("ram_style", orig_ramstyle) else: - res_dict[node.name] = [inst.node_res_estimation()] + res_dict[node.name] = [inst.node_res_estimation(fpgapart)] return res_dict diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 443d2df54c..ecc1d28c53 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -34,6 +34,7 @@ import warnings from copy import deepcopy from distutils.dir_util import copy_tree +from functools import partial from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount @@ -62,6 +63,7 @@ aggregate_dict_keys, op_and_param_counts, ) +from finn.analysis.fpgadataflow.post_synth_res import post_synth_res from finn.analysis.fpgadataflow.res_estimation import ( res_estimation, res_estimation_complete, @@ -469,11 +471,15 @@ def step_generate_estimate_reports(model: ModelWrapper, cfg: DataflowBuildConfig estimate_layer_cycles = model.analysis(exp_cycles_per_layer) with open(report_dir + "/estimate_layer_cycles.json", "w") as f: json.dump(estimate_layer_cycles, f, indent=2) - estimate_layer_resources = model.analysis(res_estimation) + estimate_layer_resources = model.analysis( + partial(res_estimation, fpgapart=cfg._resolve_fpga_part()) + ) estimate_layer_resources["total"] = aggregate_dict_keys(estimate_layer_resources) with open(report_dir + "/estimate_layer_resources.json", "w") as f: json.dump(estimate_layer_resources, f, indent=2) - estimate_layer_resources_complete = model.analysis(res_estimation_complete) + estimate_layer_resources_complete = model.analysis( + partial(res_estimation_complete, fpgapart=cfg._resolve_fpga_part()) + ) with open(report_dir + "/estimate_layer_config_alternatives.json", "w") as f: json.dump(estimate_layer_resources_complete, f, indent=2) # need to call AnnotateCycles before dataflow_performance @@ -540,7 +546,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): if cfg.auto_fifo_depths: if cfg.auto_fifo_strategy == "characterize": model = model.transform(InsertDWC()) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(cfg._resolve_fpga_part())) model = model.transform(GiveUniqueNodeNames()) model = model.transform( PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) @@ -558,7 +564,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): create_shallow_fifos=True, ) ) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(cfg._resolve_fpga_part())) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) elif cfg.auto_fifo_strategy == "largefifo_rtlsim": @@ -590,7 +596,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): # need to make sure all FIFOs are created so that their depth can be # set by ApplyConfig, so create_shallow_fifos=True model = model.transform(InsertFIFO(create_shallow_fifos=True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(cfg._resolve_fpga_part())) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) if cfg.folding_config_file is not None: @@ -801,6 +807,11 @@ def step_synthesize_bitfile(model: ModelWrapper, cfg: DataflowBuildConfig): model.get_metadata_prop("vivado_synth_rpt"), report_dir + "/post_synth_resources.xml", ) + + post_synth_resources = model.analysis(post_synth_res) + with open(report_dir + "/post_synth_resources.json", "w") as f: + json.dump(post_synth_resources, f, indent=2) + vivado_pynq_proj_dir = model.get_metadata_prop("vivado_pynq_proj") timing_rpt = ( "%s/finn_zynq_link.runs/impl_1/top_wrapper_timing_summary_routed.rpt" @@ -825,6 +836,10 @@ def step_synthesize_bitfile(model: ModelWrapper, cfg: DataflowBuildConfig): model.get_metadata_prop("vivado_synth_rpt"), report_dir + "/post_synth_resources.xml", ) + + post_synth_resources = model.analysis(post_synth_res) + with open(report_dir + "/post_synth_resources.json", "w") as f: + json.dump(post_synth_resources, f, indent=2) else: raise Exception("Unrecognized shell_flow_type: " + str(cfg.shell_flow_type)) print("Bitfile written into " + bitfile_dir) diff --git a/src/finn/custom_op/fpgadataflow/hls/matrixvectoractivation_hls.py b/src/finn/custom_op/fpgadataflow/hls/matrixvectoractivation_hls.py index 94f8cc0845..cae1c30eb6 100644 --- a/src/finn/custom_op/fpgadataflow/hls/matrixvectoractivation_hls.py +++ b/src/finn/custom_op/fpgadataflow/hls/matrixvectoractivation_hls.py @@ -118,7 +118,7 @@ def lut_estimation(self): c0 + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) + c2 ) - def dsp_estimation(self): + def dsp_estimation(self, fpgapart): # multiplication P = self.get_nodeattr("PE") res_type = self.get_nodeattr("resType") diff --git a/src/finn/custom_op/fpgadataflow/hls/streamingdatawidthconverter_hls.py b/src/finn/custom_op/fpgadataflow/hls/streamingdatawidthconverter_hls.py index d1f58d3e87..4619a1756b 100644 --- a/src/finn/custom_op/fpgadataflow/hls/streamingdatawidthconverter_hls.py +++ b/src/finn/custom_op/fpgadataflow/hls/streamingdatawidthconverter_hls.py @@ -78,12 +78,6 @@ def strm_decl(self): self.get_instream_width(), self.hls_sname(), self.hls_sname() ) ) - if self.needs_lcm(): - self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> intermediate ("intermediate");'.format( - self.get_iowidth_lcm() - ) - ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> out_{} ("out_{}");'.format( self.get_outstream_width(), self.hls_sname(), self.hls_sname() diff --git a/src/finn/custom_op/fpgadataflow/hls/vectorvectoractivation_hls.py b/src/finn/custom_op/fpgadataflow/hls/vectorvectoractivation_hls.py index 3e10b640c5..f9ba68e6b6 100644 --- a/src/finn/custom_op/fpgadataflow/hls/vectorvectoractivation_hls.py +++ b/src/finn/custom_op/fpgadataflow/hls/vectorvectoractivation_hls.py @@ -112,7 +112,7 @@ def lut_estimation(self): c0 + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) + c2 ) - def dsp_estimation(self): + def dsp_estimation(self, fpgapart): # multiplication P = self.get_nodeattr("PE") res_type = self.get_nodeattr("resType") diff --git a/src/finn/custom_op/fpgadataflow/hwcustomop.py b/src/finn/custom_op/fpgadataflow/hwcustomop.py index 57c0fec067..b40b8f3074 100644 --- a/src/finn/custom_op/fpgadataflow/hwcustomop.py +++ b/src/finn/custom_op/fpgadataflow/hwcustomop.py @@ -136,7 +136,7 @@ def get_rtlsim(self): sim = PyVerilator(rtlsim_so) return sim - def node_res_estimation(self): + def node_res_estimation(self, fpgapart): """Returns summarized resource estimation of BRAMs and LUTs of the node as a dictionary.""" ret = dict() @@ -145,7 +145,7 @@ def node_res_estimation(self): ret["LUT"] = self.lut_estimation() ret["URAM"] = self.uram_estimation() ret["URAM_efficiency"] = self.uram_efficiency_estimation() - ret["DSP"] = self.dsp_estimation() + ret["DSP"] = self.dsp_estimation(fpgapart) return ret def bram_efficiency_estimation(self): @@ -173,7 +173,7 @@ def lut_estimation(self): HWCustomOp class but has to be filled by every node""" return 0 - def dsp_estimation(self): + def dsp_estimation(self, fpgapart): """Function for DSP resource estimation, is member function of HWCustomOp class but has to be filled by every node""" return 0 diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 7bbe4c04e9..1c86ae7b7a 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -159,8 +159,8 @@ def execute_node(self, context, graph): if result.ndim == 4: # NCHW to NHWC result = result.transpose((0, 2, 3, 1)) - - context[node.output[0]] = result + oshape = context[node.output[0]].shape + context[node.output[0]] = result.reshape(oshape) def verify_node(self): info_messages = [] diff --git a/src/finn/custom_op/fpgadataflow/rtl/matrixvectoractivation_rtl.py b/src/finn/custom_op/fpgadataflow/rtl/matrixvectoractivation_rtl.py index d48b3a918d..3e81aa93e0 100644 --- a/src/finn/custom_op/fpgadataflow/rtl/matrixvectoractivation_rtl.py +++ b/src/finn/custom_op/fpgadataflow/rtl/matrixvectoractivation_rtl.py @@ -32,7 +32,7 @@ from finn.custom_op.fpgadataflow.matrixvectoractivation import MVAU from finn.custom_op.fpgadataflow.rtlbackend import RTLBackend -from finn.util.basic import get_rtlsim_trace_depth, make_build_dir +from finn.util.basic import get_dsp_block, get_rtlsim_trace_depth, make_build_dir from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy try: @@ -55,10 +55,7 @@ def __init__(self, onnx_node, **kwargs): super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): - my_attrs = { - # Flag to indicate if Versal device is targeted - "is_versal": ("i", False, 0, {0, 1}), - } + my_attrs = {} my_attrs.update(MVAU.get_nodeattr_types(self)) my_attrs.update(RTLBackend.get_nodeattr_types(self)) return my_attrs @@ -137,11 +134,12 @@ def execute_node(self, context, graph): def lut_estimation(self): return 0 - def dsp_estimation(self): + def dsp_estimation(self, fpgapart): # multiplication P = self.get_nodeattr("PE") Q = self.get_nodeattr("SIMD") - if self.get_nodeattr("is_versal"): + dsp_block = get_dsp_block(fpgapart) + if dsp_block == "DSP58": mult_dsp = P * np.ceil(Q / 3) else: mult_dsp = np.ceil(P / 4) * Q @@ -161,14 +159,24 @@ def instantiate_ip(self, cmd): ] for f in sourcefiles: cmd.append("add_files -norecurse %s" % (f)) - cmd.append( - "create_bd_cell -type hier -reference %s /%s/%s" - % ( - self.get_nodeattr("gen_top_module"), - self.onnx_node.name, - self.onnx_node.name, + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode == "internal_decoupled": + cmd.append( + "create_bd_cell -type hier -reference %s /%s/%s" + % ( + self.get_nodeattr("gen_top_module"), + self.onnx_node.name, + self.onnx_node.name, + ) + ) + else: + cmd.append( + "create_bd_cell -type hier -reference %s %s" + % ( + self.get_nodeattr("gen_top_module"), + self.onnx_node.name, + ) ) - ) def _resolve_segment_len(self, clk): # Insert pipeline registers in the DSP58 chain to meet target clock frequency @@ -186,7 +194,7 @@ def _resolve_segment_len(self, clk): dsp_chain_len = critical_path_dsps if critical_path_dsps < max_chain_len else max_chain_len return dsp_chain_len - def _resolve_impl_style(self, fpgapart): + def _resolve_impl_style(self, dsp_block): # Based on target device and activation/weight-width, choose the # supported RTL compute core assert ( @@ -198,15 +206,15 @@ def _resolve_impl_style(self, fpgapart): act_width = self.get_input_datatype(0).bitwidth() weight_width = self.get_input_datatype(1).bitwidth() - is_versal_family = self.get_nodeattr("is_versal") - if is_versal_family: + if dsp_block == "DSP58": return "mvu_vvu_8sx9_dsp58" else: - act_width = self.get_input_datatype(0).bitwidth() - weight_width = self.get_input_datatype(1).bitwidth() - if (act_width == 4 and weight_width == 4) and not (is_versal_family): - return "mvu_4sx4u" + if act_width <= 4 and weight_width <= 4: + if dsp_block == "DSP48E1": + return "mvu_4sx4u_dsp48e1" + elif dsp_block == "DSP48E2": + return "mvu_4sx4u_dsp48e2" else: return "mvu_8sx8u_dsp48" @@ -216,6 +224,11 @@ def generate_hdl(self, model, fpgapart, clk): self.generate_params(model, code_gen_dir) template_path, code_gen_dict = self.prepare_codegen_default(fpgapart, clk) + # determine if weights are narrow range and add parameter to code gen dict + weights = model.get_initializer(self.onnx_node.input[1]) + wdt = self.get_weight_datatype() + narrow_weights = 0 if np.min(weights) == wdt.min() else 1 + code_gen_dict["$NARROW_WEIGHTS$"] = str(narrow_weights) # add general parameters to dictionary code_gen_dict["$MODULE_NAME_AXI_WRAPPER$"] = [self.get_verilog_top_module_name()] # save top module name so we can refer to it after this node has been renamed @@ -248,9 +261,10 @@ def generate_hdl(self, model, fpgapart, clk): def prepare_codegen_default(self, fpgapart, clk): template_path = os.environ["FINN_ROOT"] + "/finn-rtllib/mvu/mvu_vvu_axi_wrapper.v" + dsp_block = get_dsp_block(fpgapart) code_gen_dict = {} code_gen_dict["$IS_MVU$"] = [str(1)] - code_gen_dict["$COMPUTE_CORE$"] = [self._resolve_impl_style(fpgapart)] + code_gen_dict["$COMPUTE_CORE$"] = [self._resolve_impl_style(dsp_block)] code_gen_dict["$MW$"] = [str(self.get_nodeattr("MW"))] code_gen_dict["$MH$"] = [str(self.get_nodeattr("MH"))] code_gen_dict["$PE$"] = [str(self.get_nodeattr("PE"))] diff --git a/src/finn/custom_op/fpgadataflow/rtl/streamingfifo_rtl.py b/src/finn/custom_op/fpgadataflow/rtl/streamingfifo_rtl.py index dfae607622..f8f27cb647 100644 --- a/src/finn/custom_op/fpgadataflow/rtl/streamingfifo_rtl.py +++ b/src/finn/custom_op/fpgadataflow/rtl/streamingfifo_rtl.py @@ -95,7 +95,7 @@ def generate_hdl(self, model, fpgapart, clk): code_gen_dict["$TOP_MODULE_NAME$"] = topname # make instream width a multiple of 8 for axi interface in_width = self.get_instream_width_padded() - count_width = int(self.get_nodeattr("depth") - 1).bit_length() + count_width = int(self.get_nodeattr("depth")).bit_length() code_gen_dict["$COUNT_RANGE$"] = "[{}:0]".format(count_width - 1) code_gen_dict["$IN_RANGE$"] = "[{}:0]".format(in_width - 1) code_gen_dict["$OUT_RANGE$"] = "[{}:0]".format(in_width - 1) diff --git a/src/finn/custom_op/fpgadataflow/rtl/thresholding_rtl.py b/src/finn/custom_op/fpgadataflow/rtl/thresholding_rtl.py index 67b41d0165..c31f90af0b 100644 --- a/src/finn/custom_op/fpgadataflow/rtl/thresholding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/rtl/thresholding_rtl.py @@ -167,41 +167,47 @@ def prepare_codegen_rtl_values(self, model): their key value(s) in the RTL template files""" code_gen_dict = {} - # TODO check for sortedness and size here? thresholds = model.get_initializer(self.onnx_node.input[1]) bias = self.get_nodeattr("ActVal") # activation bias value output_data_type = self.get_nodeattr("outputDataType") # output precision input_data_type = self.get_nodeattr("inputDataType") # input/threshold precision o_bitwidth = DataType[output_data_type].bitwidth() + t_path = self.get_nodeattr("code_gen_dir_ipgen") + if self.get_nodeattr("runtime_writeable_weights") == 1: + thresh_file_name = f"{t_path}/memblock.dat" + self.make_weight_file(thresholds, "decoupled", thresh_file_name) + # The RTL expects 2^N-1 thresholds, but narrow range quantization will result in - # one less threshold, prepending a dummy threshold and reducing bias by 1 to compensate. + # one less threshold, prepending a dummy threshold (minimal possible value determined by + # input data type) and decrease the bias by 1. + # Additionally, increase number of threshold steps to reflect new shape expected_thresholds = 2**o_bitwidth - 1 n_thres_steps = self.get_nodeattr("numSteps") - if expected_thresholds != n_thres_steps and DataType[input_data_type].signed() is not True: - min_val = np.amin(thresholds, axis=1) + wdt = self.get_weight_datatype() + if expected_thresholds != n_thres_steps: + min_val = wdt.min() thresholds = np.insert(thresholds, 0, min_val, axis=1) bias = bias - 1 + n_thres_steps += 1 # add dummy dimension as final dimension (that's what gets packed with next call) - thresholds = np.expand_dims(thresholds, axis=-1) - wdt = self.get_weight_datatype() + t_expand = np.expand_dims(thresholds, axis=-1) bw_hexdigit = roundup_to_integer_multiple(wdt.bitwidth(), 4) t_packed = pack_innermost_dim_as_hex_string( - thresholds, + t_expand, wdt, bw_hexdigit, prefix="", ) - t_path = self.get_nodeattr("code_gen_dir_ipgen") pe = self.get_nodeattr("PE") num_channels = self.get_nodeattr("NumChannels") # number of channels # If a single threshold value is found, broadcast the value - expected_shape = (num_channels, n_thres_steps) - if t_packed.shape == (1, 1): - t_packed = np.broadcast_to(t_packed, expected_shape) + if t_packed.shape[0] == 1: + t_packed = np.broadcast_to(t_packed, (pe, expected_thresholds)) + num_channels = pe channel_fold = int(num_channels / pe) @@ -235,9 +241,10 @@ def prepare_codegen_rtl_values(self, model): i_bitwidth = DataType[input_data_type].bitwidth() code_gen_dict["$N$"] = [str(o_bitwidth)] # output precision - convert bitwidth to string - code_gen_dict["$M$"] = [ - str(i_bitwidth) - ] # input/threshold precision - convert bitwidth to string + code_gen_dict["$WT$"] = [ + str(wdt.bitwidth()) + ] # threshold precision - convert bitwidth to string + code_gen_dict["$WI$"] = [str(i_bitwidth)] # input precision - convert bitwidth to string code_gen_dict["$C$"] = [str(num_channels)] # number of channels code_gen_dict["$BIAS$"] = [str(bias)] # activation bias value code_gen_dict["$PE$"] = [str(pe)] # requires C = M*PE @@ -255,7 +262,6 @@ def prepare_codegen_rtl_values(self, model): o_bits = 1 + math.ceil( math.log2(-bias if -bias >= 2 ** (o_bitwidth - 1) else 2**o_bitwidth + bias) ) - code_gen_dict["$O_BITS$"] = [str(int(o_bits))] rt_weights = self.get_nodeattr("runtime_writeable_weights") @@ -322,10 +328,6 @@ def generate_hdl(self, model, fpgapart, clk): # by PyVerilator and IPI generation self.set_nodeattr("gen_top_module", code_gen_dict["$TOP_MODULE$"][0]) - weights = model.get_initializer(self.onnx_node.input[1]) - weights_fname = f"{code_gen_dir}/memblock.dat" - self.make_weight_file(weights, "decoupled", weights_fname) - for rtl_file_path in self.get_rtl_file_paths(): # read in original RTL template file template_data = self.get_rtl_template_data(rtl_file_path) @@ -513,26 +515,32 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): * weight_file_name : filename for the weight file to be generated """ - threshold_tensor = self.get_hw_compatible_threshold_tensor(weights) - tdt = self.get_weight_datatype() - assert np.vectorize(tdt.allowed)( - threshold_tensor - ).all(), "Thresholds can't be expressed with type %s" % str(tdt) - + thresholds = weights pe = self.get_nodeattr("PE") ch = self.get_nodeattr("NumChannels") + output_data_type = self.get_nodeattr("outputDataType") # output precision + o_bitwidth = DataType[output_data_type].bitwidth() + # The RTL expects 2^N-1 thresholds, but narrow range quantization will result in + # one less threshold, prepending a dummy threshold (minimal possible value determined by + # input data type) and decrease the bias by 1. + # Additionally, increase number of threshold steps to reflect new shape + expected_thresholds = 2**o_bitwidth - 1 n_thres_steps = self.get_nodeattr("numSteps") + wdt = self.get_weight_datatype() + if expected_thresholds != n_thres_steps: + min_val = wdt.min() + thresholds = np.insert(thresholds, 0, min_val, axis=1) + n_thres_steps += 1 # If a single threshold value is found, broadcast the value - n_thres_steps = self.get_nodeattr("numSteps") - expected_shape = (ch, n_thres_steps) - if weights.shape == (1, 1): - weights = np.broadcast_to(weights, expected_shape) - - width_padded = roundup_to_integer_multiple(weights.shape[1], 4) - weight_padded = np.zeros((weights.shape[0], width_padded)) - weight_padded[: weights.shape[0], :n_thres_steps] = weights - weight_stream = [] + if thresholds.shape[0] == 1: + thresholds = np.broadcast_to(thresholds, (pe, expected_thresholds)) + ch = pe + + width_padded = roundup_to_integer_multiple(thresholds.shape[1], 2**o_bitwidth) + thresh_padded = np.zeros((thresholds.shape[0], width_padded)) + thresh_padded[: thresholds.shape[0], :n_thres_steps] = thresholds + thresh_stream = [] wdt = self.get_weight_datatype() bw_hexdigit = roundup_to_integer_multiple(wdt.bitwidth(), 32) padding = np.zeros(width_padded, dtype=np.int32) @@ -542,18 +550,18 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): for fold in range(cf): for c in range(2 ** (pe - 1).bit_length()): if (c == 0 or c % pe != 0) and c < pe: - for w in weight_padded[chan_ind]: - w_packed = pack_innermost_dim_as_hex_string( - [w], wdt, bw_hexdigit, prefix="" + for t in thresh_padded[chan_ind]: + t_packed = pack_innermost_dim_as_hex_string( + [t], wdt, bw_hexdigit, prefix="" ).item() - weight_stream.append(w_packed) + thresh_stream.append(t_packed) chan_ind += 1 else: for z in padding: - w_packed = pack_innermost_dim_as_hex_string( + t_packed = pack_innermost_dim_as_hex_string( [z], wdt, bw_hexdigit, prefix="" ).item() - weight_stream.append(w_packed) + thresh_stream.append(t_packed) with open(weight_file_name, "w") as f: - for val in weight_stream: + for val in thresh_stream: f.write(val + "\n") diff --git a/src/finn/custom_op/fpgadataflow/rtl/vectorvectoractivation_rtl.py b/src/finn/custom_op/fpgadataflow/rtl/vectorvectoractivation_rtl.py index 27fc9f10a1..32943d86cf 100644 --- a/src/finn/custom_op/fpgadataflow/rtl/vectorvectoractivation_rtl.py +++ b/src/finn/custom_op/fpgadataflow/rtl/vectorvectoractivation_rtl.py @@ -33,9 +33,8 @@ from finn.custom_op.fpgadataflow.rtlbackend import RTLBackend from finn.custom_op.fpgadataflow.vectorvectoractivation import VVAU -from finn.util.basic import get_rtlsim_trace_depth, make_build_dir +from finn.util.basic import get_rtlsim_trace_depth, is_versal, make_build_dir from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy -from finn.util.fpgadataflow import is_versal try: from pyverilator import PyVerilator @@ -143,7 +142,7 @@ def execute_node(self, context, graph): def lut_estimation(self): return 0 - def dsp_estimation(self): + def dsp_estimation(self, fpgapart): P = self.get_nodeattr("PE") Q = self.get_nodeattr("SIMD") return int(P * np.ceil(Q / 3)) @@ -162,14 +161,25 @@ def instantiate_ip(self, cmd): ] for f in sourcefiles: cmd.append("add_files -norecurse %s" % (f)) - cmd.append( - "create_bd_cell -type hier -reference %s /%s/%s" - % ( - self.get_nodeattr("gen_top_module"), - self.onnx_node.name, - self.onnx_node.name, + + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode == "internal_decoupled": + cmd.append( + "create_bd_cell -type hier -reference %s /%s/%s" + % ( + self.get_nodeattr("gen_top_module"), + self.onnx_node.name, + self.onnx_node.name, + ) + ) + else: + cmd.append( + "create_bd_cell -type hier -reference %s %s" + % ( + self.get_nodeattr("gen_top_module"), + self.onnx_node.name, + ) ) - ) def generate_hdl(self, model, fpgapart, clk): # Generate params as part of IP preparation @@ -177,6 +187,11 @@ def generate_hdl(self, model, fpgapart, clk): self.generate_params(model, code_gen_dir) template_path, code_gen_dict = self.prepare_codegen_default(fpgapart, clk) + # determine if weights are narrow range and add parameter to code gen dict + weights = model.get_initializer(self.onnx_node.input[1]) + wdt = self.get_weight_datatype() + narrow_weights = 0 if np.min(weights) == wdt.min() else 1 + code_gen_dict["$NARROW_WEIGHTS$"] = str(narrow_weights) # add general parameters to dictionary code_gen_dict["$MODULE_NAME_AXI_WRAPPER$"] = [self.get_verilog_top_module_name()] # save top module name so we can refer to it after this node has been renamed diff --git a/src/finn/custom_op/fpgadataflow/thresholding.py b/src/finn/custom_op/fpgadataflow/thresholding.py index dde813a293..12cb76be4e 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding.py +++ b/src/finn/custom_op/fpgadataflow/thresholding.py @@ -242,6 +242,7 @@ def execute_node(self, context, graph): node = self.onnx_node inp_values = context[node.input[0]] th_val = context[node.input[1]] + out_bias = self.get_nodeattr("ActVal") # MT expects inputs to be in the shape (N,C,H,W) or (N, C) # if 4D then input values in context are (N,H,W,C) and need to # be transposed. @@ -249,16 +250,13 @@ def execute_node(self, context, graph): is_4d = len(inp_values.shape) == 4 if is_4d: inp_values = np.transpose(inp_values, (0, 3, 1, 2)) - y = multithreshold(inp_values, th_val) + y = multithreshold(inp_values, th_val, out_bias=out_bias) if is_4d: y = y.transpose(0, 2, 3, 1) act = DataType[self.get_nodeattr("outputDataType")] if act == DataType["BIPOLAR"]: # binary to bipolar y = 2 * y - 1 - else: - # signed offset - y += act.min() context[node.output[0]] = y def calc_tmem(self): diff --git a/src/finn/qnn-data/cpp/npy2vectorstream.hpp b/src/finn/qnn-data/cpp/npy2vectorstream.hpp new file mode 100644 index 0000000000..cd26401ebf --- /dev/null +++ b/src/finn/qnn-data/cpp/npy2vectorstream.hpp @@ -0,0 +1,72 @@ +#include +#include "cnpy.h" +#include "hls_stream.h" +#include "ap_int.h" +#include +#include +#include + +#ifdef DEBUG +#define DEBUG_NPY2VECTORSTREAM(x) std::cout << "[npy2vectorstream] " << x << std::endl; +#define DEBUG_VECTORSTREAM2NPY(x) std::cout << "[vectorstream2npy] " << x << std::endl; +#else +#define DEBUG_NPY2VECTORSTREAM(x) ; +#define DEBUG_VECTORSTREAM2NPY(x) ; +#endif + +template +void npy2vectorstream(const char * npy_path, hls::stream> & out_stream, bool reverse_inner = true, size_t numReps = 1) { + for (size_t rep = 0; rep < numReps; rep++) { + cnpy::NpyArray arr = cnpy::npy_load(npy_path); + DEBUG_NPY2VECTORSTREAM("word_size " << arr.word_size << " num_vals " << arr.num_vals) + if (arr.word_size != sizeof(NpyT)) { + throw "Npy array word size and specified NpyT size do not match"; + } + NpyT* loaded_data = arr.data(); + size_t outer_dim_elems = 1; + for (size_t dim = 0; dim < arr.shape.size() - 1; dim++) { + outer_dim_elems *= arr.shape[dim]; + } + size_t inner_dim_elems = arr.shape[arr.shape.size() - 1]; + DEBUG_NPY2VECTORSTREAM("n_outer " << outer_dim_elems << " n_inner " << inner_dim_elems) + for (size_t outer_elem = 0; outer_elem < outer_dim_elems; outer_elem++) { + hls::vector vec; + for (size_t ii = 0; ii < inner_dim_elems; ii++) { + NpyT elemNpy = loaded_data[outer_elem * inner_dim_elems + ii]; + ElemT elem = loaded_data[outer_elem * inner_dim_elems + ii]; + DEBUG_NPY2VECTORSTREAM("npy2 elem = " << elem << ", loaded data = " << loaded_data[outer_elem * inner_dim_elems + ii]) + vec[ii] = elem; + } + out_stream << vec; + } + } +} + +template +void vectorstream2npy(hls::stream> & in_stream, const std::vector & shape, const char * npy_path, bool reverse_inner = false, size_t numReps = 1, size_t multi_pixel_out = 1) { + for(size_t rep = 0; rep < numReps; rep++) { + std::vector data_to_save; + size_t outer_dim_elems = 1; + for(size_t dim = 0; dim < shape.size()-1; dim++) { + outer_dim_elems *= shape[dim]; + } + size_t inner_dim_elems = shape[shape.size()-1] / multi_pixel_out; + DEBUG_VECTORSTREAM2NPY("n_outer " << outer_dim_elems << " n_inner " << inner_dim_elems << " n_multi_pixel_out " << multi_pixel_out) + for(size_t outer_elem = 0; outer_elem < outer_dim_elems; outer_elem++) { + for(size_t ii_multi_pixel_out = 0; ii_multi_pixel_out < multi_pixel_out; ii_multi_pixel_out++) { + // loop over multi_pixel_out blocks of inner_dim_elems separately, + // so that reverse_inner is not applied across multiple pixels + hls::vector elems; + in_stream >> elems; + for(size_t ii = 0; ii < inner_dim_elems; ii++) { + size_t i = ii_multi_pixel_out*inner_dim_elems; + i += reverse_inner ? inner_dim_elems-ii-1 : ii; + NpyT npyt = (NpyT) elems[i]; + DEBUG_VECTORSTREAM2NPY("elems[i] = " << elems[i] << ", NpyT = " << npyt) + data_to_save.push_back(npyt); + } + } + } + cnpy::npy_save(npy_path, &data_to_save[0], shape, "w"); + } +} diff --git a/src/finn/qnn-data/test_ext_weights/specialize_layers_config.json b/src/finn/qnn-data/test_ext_weights/specialize_layers_config.json new file mode 100644 index 0000000000..3218c2d89a --- /dev/null +++ b/src/finn/qnn-data/test_ext_weights/specialize_layers_config.json @@ -0,0 +1,30 @@ +{ + "Defaults": {}, + "Thresholding_0": { + "preferred_impl_style": "rtl" + }, + "MVAU_0": { + "preferred_impl_style": "rtl" + }, + "Thresholding_1": { + "preferred_impl_style": "rtl" + }, + "MVAU_1": { + "preferred_impl_style": "hls" + }, + "Thresholding_2": { + "preferred_impl_style": "rtl" + }, + "MVAU_2": { + "preferred_impl_style": "rtl" + }, + "Thresholding_3": { + "preferred_impl_style": "rtl" + }, + "MVAU_3": { + "preferred_impl_style": "rtl" + }, + "LabelSelect_0": { + "preferred_impl_style": "hls" + } +} diff --git a/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json b/src/finn/qnn-data/test_ext_weights/tfc-w2a2-extw.json similarity index 66% rename from src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json rename to src/finn/qnn-data/test_ext_weights/tfc-w2a2-extw.json index 9fe22443dc..29484e2940 100644 --- a/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json +++ b/src/finn/qnn-data/test_ext_weights/tfc-w2a2-extw.json @@ -3,22 +3,31 @@ "Thresholding_rtl_0": { "PE": 49 }, - "MVAU_hls_0": { + "MVAU_rtl_0": { "PE": 16, "SIMD": 49, "ram_style": "block" }, - "MVAU_hls_1": { + "Thresholding_rtl_1": { + "PE": 16 + }, + "MVAU_hls_0": { "PE": 8, "SIMD": 8, "mem_mode": "external" }, - "MVAU_hls_2": { + "Thresholding_rtl_2": { + "PE": 8 + }, + "MVAU_rtl_1": { "PE": 8, "SIMD": 8, "mem_mode": "external" }, - "MVAU_hls_3": { + "Thresholding_rtl_3": { + "PE": 8 + }, + "MVAU_rtl_2": { "PE": 10, "SIMD": 8, "ram_style": "distributed" diff --git a/src/finn/transformation/fpgadataflow/annotate_resources.py b/src/finn/transformation/fpgadataflow/annotate_resources.py index f07a5186d5..ee2da2094c 100644 --- a/src/finn/transformation/fpgadataflow/annotate_resources.py +++ b/src/finn/transformation/fpgadataflow/annotate_resources.py @@ -26,8 +26,8 @@ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - import qonnx.custom_op.registry as registry +from functools import partial from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.base import Transformation @@ -49,15 +49,16 @@ class AnnotateResources(Transformation): chosen mode (e.g. HLSSynthIP for hls) was previously run. """ - def __init__(self, mode, override_res_dict=None): + def __init__(self, mode, fpgapart, override_res_dict=None): super().__init__() self.mode = mode + self.fpgapart = fpgapart self.res_dict = override_res_dict def apply(self, model): graph = model.graph if self.mode == "estimate": - res_fxn = res_estimation + res_fxn = partial(res_estimation, fpgapart=self.fpgapart) elif self.mode == "hls": res_fxn = hls_synth_res_estimation elif self.mode == "synth": @@ -77,7 +78,9 @@ def apply(self, model): # recurse into model to manually annotate per-layer resources sdp_model_filename = getCustomOp(node).get_nodeattr("model") sdp_model = ModelWrapper(sdp_model_filename) - sdp_model = sdp_model.transform(AnnotateResources(self.mode, self.res_dict)) + sdp_model = sdp_model.transform( + AnnotateResources(self.mode, self.fpgapart, self.res_dict) + ) sdp_dict = sdp_model.get_metadata_prop("res_total_" + self.mode) sdp_dict = eval(sdp_dict) # save transformed model diff --git a/src/finn/transformation/fpgadataflow/convert_to_hw_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hw_layers.py index 897d714bf8..e14181b140 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hw_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hw_layers.py @@ -199,10 +199,16 @@ def apply(self, model): thl_in_shape = model.get_tensor_shape(thl_input) thl_thres_shape = model.get_tensor_shape(thl_threshold) idt = model.get_tensor_datatype(thl_input) - + tdt = model.get_tensor_datatype(thl_threshold) # skip conversion for layers with float input if not idt.is_integer(): continue + assert tdt.is_integer(), ( + node.name + + """: MultiThreshold cannot be converted + because thresholds are float type. Input data type is integer, + please run RoundAndClipThresholds to convert thresholds to integer.""" + ) # check layout of inputs/outputs, and convert if needed # check layout and convert if necessary @@ -253,7 +259,7 @@ def apply(self, model): PE=pe, numSteps=thl_thres_shape[1], inputDataType=idt.name, - weightDataType=idt.name, + weightDataType=tdt.name, outputDataType=odt.name, numInputVectors=list(thl_in_shape[:-1]), ActVal=actval, diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py index fc2047b08e..63ce2d3cbf 100644 --- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py +++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py @@ -322,7 +322,7 @@ def apply(self, model): prep_transforms = [ InsertIODMA(self.axi_port_width), InsertDWC(), - SpecializeLayers(), + SpecializeLayers(self.fpga_part), Floorplan(), CreateDataflowPartition(partition_model_dir=self.partition_model_dir), ] @@ -338,7 +338,7 @@ def apply(self, model): dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_model = kernel_model.transform(InsertFIFO()) - kernel_model = kernel_model.transform(SpecializeLayers()) + kernel_model = kernel_model.transform(SpecializeLayers(self.fpga_part)) kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) kernel_model = kernel_model.transform(PrepareIP(self.fpga_part, self.period_ns)) diff --git a/src/finn/transformation/fpgadataflow/specialize_layers.py b/src/finn/transformation/fpgadataflow/specialize_layers.py index e71d6c23a4..dbcadd1df5 100644 --- a/src/finn/transformation/fpgadataflow/specialize_layers.py +++ b/src/finn/transformation/fpgadataflow/specialize_layers.py @@ -26,18 +26,18 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import numpy as np import warnings from onnx import helper -from qonnx.core.datatype import DataType from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.base import Transformation from finn.custom_op.fpgadataflow.hls import custom_op as hls_variants from finn.custom_op.fpgadataflow.rtl import custom_op as rtl_variants -from finn.util.fpgadataflow import is_versal +from finn.util.basic import get_dsp_block, is_versal -def _determine_impl_style(node, fpgapart): +def _determine_impl_style(node, fpgapart, model): optype = node.op_type # check if there is an HLS or RTL variant or both @@ -45,8 +45,8 @@ def _determine_impl_style(node, fpgapart): rtl_variant = optype + "_rtl" in rtl_variants.keys() # check if user has specified a preferred_impl_style - inst = getCustomOp(node) - impl_style = inst.get_nodeattr("preferred_impl_style") + node_inst = getCustomOp(node) + impl_style = node_inst.get_nodeattr("preferred_impl_style") # if impl_style not set, for "simple" layers always try # to use rtl variant if available @@ -55,23 +55,19 @@ def _determine_impl_style(node, fpgapart): return _dwc_determine_impl_style(node) if rtl_variant: if optype == "MVAU": - inp_width_fit = ( - DataType[getCustomOp(node).get_nodeattr("inputDataType")].bitwidth() >= 4 - ) - weight_width_fit = ( - DataType[getCustomOp(node).get_nodeattr("weightDataType")].bitwidth() >= 4 - ) - if inp_width_fit and weight_width_fit and _mvu_rtl_possible(node): + idt = node_inst.get_input_datatype() + wdt = node_inst.get_weight_datatype() + inp_width_fit = idt.bitwidth() >= 4 + weight_width_fit = wdt.bitwidth() >= 4 + if inp_width_fit and weight_width_fit and _mvu_rtl_possible(node, fpgapart, model): return "rtl" else: return "hls" elif optype == "VVAU": - inp_width_fit = ( - DataType[getCustomOp(node).get_nodeattr("inputDataType")].bitwidth() >= 4 - ) - weight_width_fit = ( - DataType[getCustomOp(node).get_nodeattr("weightDataType")].bitwidth() >= 4 - ) + idt = node_inst.get_input_datatype() + wdt = node_inst.get_weight_datatype() + inp_width_fit = idt.bitwidth() >= 4 + weight_width_fit = wdt.bitwidth() >= 4 if inp_width_fit and weight_width_fit and _vvu_rtl_possible(node, fpgapart): return "rtl" else: @@ -136,7 +132,7 @@ def _determine_impl_style(node, fpgapart): # user setting can be fulfilled return "rtl" elif optype == "MVAU": - if _mvu_rtl_possible(node): + if _mvu_rtl_possible(node, fpgapart, model): return "rtl" else: warn_str = """There is no RTL variant for %s. The node will automatically be @@ -232,31 +228,43 @@ def _swg_hls_possible(node): return False -def _mvu_rtl_possible(n): +def _mvu_rtl_possible(n, fpgapart, model): # Checks whether RTL-based MVU is supported # Currently, for DSP48 we only support computations up to # 8sx8u (8-bit signed weights x 8-bit (un)signed activations) - # and for DSP58 we support up to 8sx9s. Next to that, - # embedded thresholding functionality is not supported and - # neither binaryxnormode computation. - inp_width_in_range = ( - DataType[getCustomOp(n).get_nodeattr("inputDataType")].bitwidth() <= 8 - ) or ( - DataType[getCustomOp(n).get_nodeattr("inputDataType")].bitwidth() == 9 - and DataType[getCustomOp(n).get_nodeattr("inputDataType")].min() < 0 - ) - weight_width_in_range = DataType[getCustomOp(n).get_nodeattr("weightDataType")].bitwidth() <= 8 - signed_weights = DataType[getCustomOp(n).get_nodeattr("weightDataType")].min() < 0 - no_activation = getCustomOp(n).get_nodeattr("noActivation") == 1 - not_binaryxnor_mode = getCustomOp(n).get_nodeattr("binaryXnorMode") == 0 + # and for DSP58 we support up to 8sx9s. + # Please note, DSP48E1 does only support narrow range for weights + # Next to that, embedded thresholding functionality is not supported + # and neither binaryxnormode computation. + node_inst = getCustomOp(n) + # first check if no Activation or binary xnor mode and return False + # immediately if one of them is True + no_activation = node_inst.get_nodeattr("noActivation") == 0 + not_binaryxnor_mode = node_inst.get_nodeattr("binaryXnorMode") == 1 + if no_activation or not_binaryxnor_mode: + return False - return ( - inp_width_in_range - and weight_width_in_range - and signed_weights - and no_activation - and not_binaryxnor_mode - ) + # check if weights are signed, if not return False + wdt = node_inst.get_weight_datatype() + if not wdt.signed(): + return False + + # check which dsp block is available on fpga + dsp_block = get_dsp_block(fpgapart) + # check if weights are narrow + weights = model.get_initializer(n.input[1]) + narrow_weights = False if np.min(weights) == wdt.min() else True + # if non narrow weights and only DSP48E1 available return False + if not narrow_weights and dsp_block == "DSP48E1": + return False + + # if none of the above constraints have been triggered + # we now check if input and weight data types are in range + idt = node_inst.get_input_datatype() + inp_width_in_range = (idt.bitwidth() <= 8) or (idt.bitwidth() == 9 and idt.signed()) + weight_width_in_range = wdt.bitwidth() <= 8 + + return inp_width_in_range and weight_width_in_range def _vvu_rtl_possible(n, fpgapart): @@ -264,30 +272,25 @@ def _vvu_rtl_possible(n, fpgapart): # Currently, we only support RTL-VVU on DSP58 up to 8sx9s inputs # (8-bit signed weights x (9-bit signed OR 8-bit (un)signed) activations). # Next to that, embedded thresholding functionality is not supported. - in_width_in_range = ( - DataType[getCustomOp(n).get_nodeattr("inputDataType")].bitwidth() <= 8 - ) or ( - DataType[getCustomOp(n).get_nodeattr("inputDataType")].bitwidth() == 9 - and DataType[getCustomOp(n).get_nodeattr("inputDataType")].min() < 0 - ) - weight_width_in_range = DataType[getCustomOp(n).get_nodeattr("weightDataType")].bitwidth() <= 8 - signed_weights = DataType[getCustomOp(n).get_nodeattr("weightDataType")].min() < 0 - is_versal_family = is_versal(fpgapart) - no_activation = getCustomOp(n).get_nodeattr("noActivation") == 1 + node_inst = getCustomOp(n) + if not node_inst.get_nodeattr("noActivation"): + return False + if not is_versal(fpgapart): + return False + + idt = node_inst.get_input_datatype() + wdt = node_inst.get_weight_datatype() + in_width_in_range = (idt.bitwidth() <= 8) or (idt.bitwidth() == 9 and idt.min() < 0) + weight_width_in_range = wdt.bitwidth() <= 8 + signed_weights = wdt.min() < 0 - return ( - in_width_in_range - and weight_width_in_range - and signed_weights - and is_versal_family - and no_activation - ) + return in_width_in_range and weight_width_in_range and signed_weights class SpecializeLayers(Transformation): """Specialize all layers to either HLS or RTL variants""" - def __init__(self, fpgapart=""): + def __init__(self, fpgapart): super().__init__() self.fpgapart = fpgapart @@ -300,7 +303,7 @@ def apply(self, model): if not node.domain == "finn.custom_op.fpgadataflow": continue node_ind += 1 - impl_style = _determine_impl_style(node, self.fpgapart) + impl_style = _determine_impl_style(node, self.fpgapart, model) optype = node.op_type + "_" + impl_style new_node = helper.make_node( @@ -313,9 +316,6 @@ def apply(self, model): for attribute in node.attribute: if attribute.name != "preferred_impl_style": new_node.attribute.append(attribute) - if new_node.op_type == "MVAU_rtl": - is_versal_family = is_versal(self.fpgapart) - getCustomOp(new_node).set_nodeattr("is_versal", is_versal_family) graph.node.insert(node_ind, new_node) # remove old nodes graph.node.remove(node) diff --git a/src/finn/transformation/fpgadataflow/vitis_build.py b/src/finn/transformation/fpgadataflow/vitis_build.py index da7624b8ff..157d81cf35 100644 --- a/src/finn/transformation/fpgadataflow/vitis_build.py +++ b/src/finn/transformation/fpgadataflow/vitis_build.py @@ -383,7 +383,7 @@ def __init__( def apply(self, model): _check_vitis_envvars() # prepare at global level, then break up into kernels - prep_transforms = [InsertIODMA(512), InsertDWC(), SpecializeLayers()] + prep_transforms = [InsertIODMA(512), InsertDWC(), SpecializeLayers(self.fpga_part)] for trn in prep_transforms: model = model.transform(trn) model = model.transform(GiveUniqueNodeNames()) @@ -405,7 +405,7 @@ def apply(self, model): dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_model = kernel_model.transform(InsertFIFO()) - kernel_model = kernel_model.transform(SpecializeLayers()) + kernel_model = kernel_model.transform(SpecializeLayers(self.fpga_part)) kernel_model = kernel_model.transform(RemoveUnusedTensors()) kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 1995d9f06a..91c191962f 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -288,3 +288,20 @@ def memutil(req_mem_spec, primitive_spec): eff = (req_width * req_depth) / (count * prim_width * prim_depth) waste = (count * prim_width * prim_depth) - (req_width * req_depth) return (count, eff, waste) + + +def is_versal(fpgapart): + """Returns whether board is part of the Versal family""" + return ( + fpgapart[0:4] in ["xcvc", "xcve", "xcvp", "xcvm", "xqvc", "xqvm"] + or fpgapart[0:5] == "xqrvc" + ) + + +def get_dsp_block(fpgapart): + if is_versal(fpgapart): + return "DSP58" + elif fpgapart[2] == "7": + return "DSP48E1" + else: + return "DSP48E2" diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py index 7698850029..6a72d38058 100644 --- a/src/finn/util/data_packing.py +++ b/src/finn/util/data_packing.py @@ -295,9 +295,14 @@ def npy_to_rtlsim_input(input_file, input_dtype, pad_to_nbits, reverse_inner=Tru inp = np.load(input_file) else: raise Exception("input_file must be ndarray or filename for .npy") - if inp.shape[-1] == 1 and input_dtype.is_integer(): + if ( + inp.shape[-1] == 1 + and input_dtype.is_integer() + and input_dtype.get_canonical_name() != "BIPOLAR" + ): + mask = (1 << input_dtype.bitwidth()) - 1 packed_data = inp.flatten().astype(input_dtype.to_numpy_dt()) - packed_data = [int(x) for x in packed_data] + packed_data = [int(x) & mask for x in packed_data] else: packed_data = pack_innermost_dim_as_hex_string( inp, input_dtype, pad_to_nbits, reverse_inner=reverse_inner diff --git a/src/finn/util/fpgadataflow.py b/src/finn/util/fpgadataflow.py index 3d3d343cd4..aae438fac2 100644 --- a/src/finn/util/fpgadataflow.py +++ b/src/finn/util/fpgadataflow.py @@ -69,11 +69,3 @@ def is_rtl_node(node): is_node = True return is_node - - -def is_versal(fpgapart): - """Returns whether board is part of the Versal family""" - return ( - fpgapart[0:4] in ["xcvc", "xcve", "xcvp", "xcvm", "xqvc", "xqvm"] - or fpgapart[0:5] == "xqrvc" - ) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 556ba1d187..81c6316ec1 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -95,7 +95,6 @@ MoveScalarLinearPastInvariants, ) from finn.util.basic import get_finn_root, make_build_dir, test_board_map -from finn.util.fpgadataflow import is_fpgadataflow_node from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -131,12 +130,12 @@ def fold_tfc(model): fcl_inst.set_nodeattr("SIMD", simd) fcl_inst.set_nodeattr("ram_style", ramstyle) fcl_inst.set_nodeattr("mem_mode", "internal_decoupled") + fcl_inst.set_nodeattr("resType", "lut") # set parallelism for input quantizer to be same as first layer's SIMD inp_qnt_node = model.get_nodes_by_op_type("Thresholding_rtl")[0] inp_qnt = getCustomOp(inp_qnt_node) inp_qnt.set_nodeattr("PE", 49) - # TODO: update PYNQ driver to support runtime writeable weights for RTL Thresholding - # inp_qnt.set_nodeattr("runtime_writeable_weights", 1) + inp_qnt.set_nodeattr("runtime_writeable_weights", 1) return model @@ -156,6 +155,7 @@ def fold_lfc(model): fcl_inst.set_nodeattr("ram_style", ramstyle) fcl_inst.set_nodeattr("runtime_writeable_weights", 1) fcl_inst.set_nodeattr("mem_mode", "internal_decoupled") + fcl_inst.set_nodeattr("resType", "lut") # set parallelism for input quantizer to be same as first layer's SIMD inp_qnt_node = model.get_nodes_by_op_type("Thresholding_rtl")[0] inp_qnt = getCustomOp(inp_qnt_node) @@ -182,12 +182,14 @@ def fold_cnv_large(model): fcl_inst.set_nodeattr("PE", pe) fcl_inst.set_nodeattr("SIMD", simd) fcl_inst.set_nodeattr("mem_mode", "internal_decoupled") + fcl_inst.set_nodeattr("resType", "lut") - swg_layers = model.get_nodes_by_op_type("ConvolutionInputGenerator_hls") + swg_layers = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") for i in range(len(swg_layers)): swg_inst = getCustomOp(swg_layers[i]) simd = folding[i][1] swg_inst.set_nodeattr("SIMD", simd) + swg_inst.set_nodeattr("ram_style", "distributed") return model @@ -198,11 +200,11 @@ def fold_cnv_small(model): (8, 3, "distributed"), (16, 16, "distributed"), (8, 16, "auto"), - (8, 16, "block"), + (8, 16, "distributed"), (4, 8, "auto"), (1, 8, "auto"), - (1, 2, "distributed"), - (2, 2, "block"), + (1, 2, "block"), + (2, 2, "auto"), (5, 1, "distributed"), ] for fcl, (pe, simd, ramstyle) in zip(fc_layers, folding): @@ -211,12 +213,18 @@ def fold_cnv_small(model): fcl_inst.set_nodeattr("SIMD", simd) fcl_inst.set_nodeattr("ram_style", ramstyle) fcl_inst.set_nodeattr("mem_mode", "internal_decoupled") + fcl_inst.set_nodeattr("resType", "lut") - swg_layers = model.get_nodes_by_op_type("ConvolutionInputGenerator_hls") + swg_layers = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") for i in range(len(swg_layers)): swg_inst = getCustomOp(swg_layers[i]) simd = folding[i][1] swg_inst.set_nodeattr("SIMD", simd) + swg_inst.set_nodeattr("ram_style", "distributed") + inp_qnt_node = model.get_nodes_by_op_type("Thresholding_rtl")[0] + inp_qnt = getCustomOp(inp_qnt_node) + inp_qnt.set_nodeattr("depth_trigger_uram", 32000) + inp_qnt.set_nodeattr("depth_trigger_bram", 32000) return model @@ -597,16 +605,10 @@ def test_convert_to_hw_layers(self, topology, wbits, abits, board): assert len(model.get_nodes_by_op_type(op_type)) == exp_count def test_specialize_layers(self, topology, wbits, abits, board): + build_data = get_build_env(board, target_clk_ns) prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "convert_to_hw_layers") model = load_test_checkpoint_or_skip(prev_chkpt_name) - # set preferred impl style to hls for all layers - force_hls_boards = ["Pynq-Z1", "U250"] - if topology == "cnv" and wbits == 2 and abits == 2 and board in force_hls_boards: - for node in model.graph.node: - if is_fpgadataflow_node(node): - inst = getCustomOp(node) - inst.set_nodeattr("preferred_impl_style", "hls") - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(build_data["part"])) model = model.transform(GiveUniqueNodeNames()) model.save(get_checkpoint_name(topology, wbits, abits, "specialize_layers")) exp_layer_counts = { @@ -636,19 +638,9 @@ def test_specialize_layers(self, topology, wbits, abits, board): ("StreamingMaxPool_hls", 2), ("LabelSelect_hls", 1), ], - "cnv-2-2": [ - ("Transpose", 1), - ("Thresholding_hls", 1), - ("ConvolutionInputGenerator_hls", 6), - ("MVAU_hls", 9), - ("StreamingMaxPool_hls", 2), - ("LabelSelect_hls", 1), - ], } if topology == "tfc" and wbits == 1 and abits == 1: exp_key = "tfc-1-1" - elif topology == "cnv" and wbits == 2 and abits == 2 and board in force_hls_boards: - exp_key = "cnv-2-2" else: exp_key = topology exp_layer_counts = exp_layer_counts[exp_key] @@ -706,7 +698,7 @@ def test_ipgen(self, topology, wbits, abits, board): build_data = get_build_env(board, target_clk_ns) if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fold") + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "minimize_bit_width") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(build_data["part"], target_clk_ns)) @@ -719,8 +711,8 @@ def test_set_fifo_depths(self, topology, wbits, abits, board): prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "ipgen_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(board, target_clk_ns)["part"] - if topology == "cnv" and wbits == 2 and abits == 2 and board == "Pynq-Z1": - # Enabling swg_exception for this single test case. Disabling the exception results in + if topology == "cnv" and abits == 2 and board == "Pynq-Z1": + # Enabling swg_exception for these test cases. Disabling the exception results in # a design that exceeds the resources of the Pynq-Z1 board. In future this should be # revisited and handled correctly as the swg_exception is poorly justified. model = model.transform( @@ -740,7 +732,7 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, board): model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(board, target_clk_ns)["part"] model = model.transform(InsertDWC()) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(AnnotateCycles()) perf = model.analysis(dataflow_performance) @@ -807,7 +799,7 @@ def test_build(self, topology, wbits, abits, board): prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fifodepth_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(build_data["build_fxn"]) - model = model.transform(AnnotateResources("synth")) + model = model.transform(AnnotateResources("synth", build_data["part"])) model.save(get_checkpoint_name(topology, wbits, abits, "build_" + board)) @pytest.mark.slow diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index cbf89c2eae..01d995c147 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -58,11 +58,15 @@ import finn.transformation.fpgadataflow.convert_to_hw_layers as to_hw import finn.transformation.streamline.absorb as absorb import finn.transformation.streamline.reorder as reorder +from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance from finn.core.onnx_exec import execute_onnx +from finn.transformation.fpgadataflow.annotate_cycles import AnnotateCycles from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.create_dataflow_partition import ( CreateDataflowPartition, ) +from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.minimize_accumulator_width import ( MinimizeAccumulatorWidth, ) @@ -70,14 +74,22 @@ MinimizeWeightBitWidth, ) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.fpgadataflow.set_fifo_depths import ( + InsertAndSetFIFODepths, + RemoveShallowFIFOs, + SplitLargeFIFOs, +) from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.transformation.streamline.collapse_repeated import CollapseRepeatedMul from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds -from finn.util.basic import alveo_default_platform, alveo_part_map, get_finn_root +from finn.util.basic import get_finn_root from finn.util.pytorch import NormalizePreProc +from finn.util.pyverilator import verilator_fifosim from finn.util.test import ( crop_center, get_test_model_trained, @@ -87,11 +99,9 @@ build_dir = os.environ["FINN_BUILD_DIR"] -test_board = "U250" -test_platform = alveo_default_platform[test_board] -test_fpga_part = alveo_part_map[test_board] +# Select Versal device such that RTL VVU (i.e. DSP58) can be enabled +fpga_part = "xcvm1802-vsvd1760-2MP-e-S" target_clk_ns = 3 -large_fifo_ram_style = "ultra" extra_fold = 1 first_layer_res_type = "dsp" @@ -218,7 +228,6 @@ def test_end2end_mobilenet_lowering(): @pytest.mark.end2end -@pytest.mark.xfail def test_end2end_mobilenet_convert_to_hw_layers(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_lowered.onnx") model = model.transform(to_hw.InferPool()) @@ -237,38 +246,50 @@ def test_end2end_mobilenet_convert_to_hw_layers(): @pytest.mark.end2end def test_end2end_mobilenet_specialize_layers(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_hw_layers.onnx") - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model.save(build_dir + "/end2end_mobilenet_specialize_layers.onnx") @pytest.mark.end2end -def test_end2end_mobilenet_folding(): +def test_end2end_mobilenet_create_dataflow_partition(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_specialize_layers.onnx") + parent_model = model.transform(CreateDataflowPartition()) + parent_model.save(build_dir + "/end2end_mobilenet_dataflow_parent.onnx") + sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] + sdp_node = getCustomOp(sdp_node) + dataflow_model_filename = sdp_node.get_nodeattr("model") + dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) + dataflow_model = dataflow_model.transform(RemoveUnusedTensors()) + dataflow_model.save(build_dir + "/end2end_mobilenet_dataflow_model.onnx") + + +@pytest.mark.end2end +def test_end2end_mobilenet_folding(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_dataflow_model.onnx") # optional extra folding to use fewer resources # applied while setting the attributes on each node assert extra_fold in [1, 2, 4] # set up folding for the conv layers impl'd by MVAUs # each value is PE for a layer - fc_layers = model.get_nodes_by_op_type("MVAU_hls") - fc_layers += model.get_nodes_by_op_type("MVAU_rtl") + fc_layers = model.get_nodes_by_op_type("MVAU_rtl") # each tuple is (PE, SIMD, ram_style) for a layer folding = [ - (32, 3, "block"), + (16, 3, "block"), + (8, 16, "distributed"), + (8, 16, "distributed"), + (16, 16, "distributed"), + (8, 16, "distributed"), + (16, 16, "distributed"), + (8, 16, "block"), + (16, 16, "block"), (16, 16, "block"), (16, 16, "block"), - (32, 16, "block"), (16, 16, "block"), - (32, 16, "block"), (16, 16, "block"), - (32, 16, "block"), - (32, 16, "block"), - (32, 16, "block"), - (32, 16, "block"), - (32, 16, "block"), + (8, 16, "block"), (16, 16, "block"), - (32, 16, "block"), (4, 4, "block"), ] for fcl, (pe, simd, ramstyle) in zip(fc_layers, folding): @@ -276,26 +297,46 @@ def test_end2end_mobilenet_folding(): fcl_inst.set_nodeattr("PE", pe // extra_fold) fcl_inst.set_nodeattr("SIMD", simd) fcl_inst.set_nodeattr("ram_style", ramstyle) - # first layer uses 8-bit weights & activations - # control its compute resource type explicitly - getCustomOp(fc_layers[0]).set_nodeattr("resType", first_layer_res_type) # set up folding for the depthwise conv layers impl'd by VVAUs # each value is PE for a layer - vvau_layers = model.get_nodes_by_op_type("VVAU_hls") - vvau_layers += model.get_nodes_by_op_type("VVAU_rtl") - folding = [32, 32, 64, 16, 32, 8, 16, 16, 16, 16, 16, 4, 8] - for vvau, pe in zip(vvau_layers, folding): + vvau_layers = model.get_nodes_by_op_type("VVAU_rtl") + pe_simd_fold = [ + [16, 3], + [8, 3], + [16, 3], + [4, 3], + [8, 3], + [2, 3], + [4, 3], + [4, 3], + [4, 3], + [4, 3], + [4, 3], + [1, 3], + [2, 3], + ] + for vvau, pe_simd in zip(vvau_layers, pe_simd_fold): + pe, simd = pe_simd vvau_inst = getCustomOp(vvau) vvau_inst.set_nodeattr("PE", pe // extra_fold) + vvau_inst.set_nodeattr("SIMD", simd) # set SIMD in preceeding ConvInputGen to same value convinputgen = model.find_direct_predecessors(vvau)[0] convinputgen_inst = getCustomOp(convinputgen) convinputgen_inst.set_nodeattr("SIMD", pe // extra_fold) + # Enable parallel_window mode for SIMD parallelism VVU + convinputgen_inst.set_nodeattr("parallel_window", 1) # set SIMD in preceeding FMPadding to same value padding = model.find_direct_predecessors(convinputgen)[0] - if padding.op_type == "FMPadding_hls": + if padding.op_type == "FMPadding_rtl": padding_inst = getCustomOp(padding) padding_inst.set_nodeattr("SIMD", pe // extra_fold) + # Set folding Thresholding layers + thresholding_layers = model.get_nodes_by_op_type("Thresholding_rtl") + folding = [2, 2, 4, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + for thresholding, pe in zip(thresholding_layers, folding): + thresholding_inst = getCustomOp(thresholding) + thresholding_inst.set_nodeattr("PE", pe) # adjust final pooling layer + its inpgen pool_node = model.get_nodes_by_op_type("Pool_hls")[0] pool_inst = getCustomOp(pool_node) @@ -312,29 +353,16 @@ def test_end2end_mobilenet_minimize_bit_width(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_folded.onnx") model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(MinimizeWeightBitWidth()) - model = model.save(build_dir + "/end2end_mobilenet_minimize_bitwidth.onnx") - - -@pytest.mark.end2end -def test_end2end_mobilenet_create_dataflow_partition(): - model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_minimize_bitwidth.onnx") - parent_model = model.transform(CreateDataflowPartition()) - parent_model.save(build_dir + "/end2end_mobilenet_dataflow_parent.onnx") - sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] - sdp_node = getCustomOp(sdp_node) - dataflow_model_filename = sdp_node.get_nodeattr("model") - dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) - dataflow_model = dataflow_model.transform(RemoveUnusedTensors()) - dataflow_model.save(build_dir + "/end2end_mobilenet_dataflow_model.onnx") + model.save(build_dir + "/end2end_mobilenet_minimize_bitwidth.onnx") @pytest.mark.slow @pytest.mark.vivado @pytest.mark.end2end -@pytest.mark.xfail def test_end2end_mobilenet_cppsim(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_minimize_bitwidth.onnx") x = np.load(build_dir + "/end2end_mobilenet_input.npy") + x = x.transpose(0, 2, 3, 1) # Convert NCHW to NHWC inp_name = model.graph.input[0].name out_name = model.graph.output[0].name inp_dict = {inp_name: x} @@ -358,7 +386,159 @@ def test_end2end_mobilenet_cppsim(): # check result with golden values golden = np.load(build_dir + "/end2end_mobilenet_golden_top5.npy") - golden_prob = np.load(build_dir + "/end2end_mobilenet_golden_top5_prob.npy") + # golden_prob = np.load(build_dir + "/end2end_mobilenet_golden_top5_prob.npy") assert (golden == res_cppsim).all() - assert np.isclose(golden_prob, res_cppsim_prob).all() + # assert np.isclose(golden_prob, res_cppsim_prob[0, 0, 0, :5]).all() + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_ipgen(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_minimize_bitwidth.onnx") + model = model.transform(PrepareIP(fpga_part, target_clk_ns)) + model = model.transform(HLSSynthIP()) + model.save(build_dir + "/end2end_mobilenet_hw_ipgen.onnx") + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_rtlsim(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_hw_ipgen.onnx") + # use critical path estimate to set rtlsim liveness threshold + # (very conservative) + model = model.transform(AnnotateCycles()) + estimate_network_performance = model.analysis(dataflow_performance) + os.environ["LIVENESS_THRESHOLD"] = str( + int(estimate_network_performance["critical_path_cycles"]) + ) + x = np.load(build_dir + "/end2end_mobilenet_input.npy") + x = x.transpose(0, 2, 3, 1) # Convert NCHW to NHWC + inp_name = model.graph.input[0].name + out_name = model.graph.output[0].name + inp_dict = {inp_name: x} + # rtlsim + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(PrepareRTLSim()) + model.save(build_dir + "/end2end_mobilenet_rtlsim.onnx") + ret_rtlsim = execute_onnx(model, inp_dict, True) + res_rtlsim = ret_rtlsim[out_name] + np.save(build_dir + "/end2end_mobilenet_result_rtlsim.npy", res_rtlsim) + a0 = np.load(build_dir + "/end2end_mobilenet_topk_scale.npy") + res_rtlsim_prob = ret_rtlsim[model.graph.node[-2].output[0]] * a0 + np.save(build_dir + "/end2end_mobilenet_result_rtlsim_prob.npy", res_rtlsim_prob) + + # check result with golden values + golden = np.load(build_dir + "/end2end_mobilenet_golden_top5.npy") + # golden_prob = np.load(build_dir + "/end2end_mobilenet_golden_top5_prob.npy") + + assert (golden == res_rtlsim).all() + # assert np.isclose(golden_prob, res_rtlsim_prob[0, 0, 0, :5]).all() + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_set_fifo_depths(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_hw_ipgen.onnx") + model = model.transform( + InsertAndSetFIFODepths( + fpga_part, + target_clk_ns, + swg_exception=False, + vivado_ram_style="auto", + force_python_sim=False, + ) + ) + # perform FIFO splitting and shallow FIFO removal only after the final config + # json file has been written. otherwise, since these transforms may add/remove + # FIFOs, we get name mismatch problems when trying to reuse the final config. + model = model.transform(SplitLargeFIFOs()) + model = model.transform(RemoveShallowFIFOs()) + # after FIFOs are ready to go, call PrepareIP and HLSSynthIP again + # this will only run for the new nodes (e.g. FIFOs and DWCs) + model = model.transform(PrepareIP(fpga_part, target_clk_ns)) + model = model.transform(HLSSynthIP()) + model.save(build_dir + "/end2end_mobilenet_set_fifo_depths.onnx") + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_stitched_ip(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_set_fifo_depths.onnx") + model = model.transform( + CreateStitchedIP( + fpga_part, + target_clk_ns, + vitis=False, + signature=None, + ) + ) + model.save(build_dir + "/end2end_mobilenet_stitched_ip.onnx") + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_stitched_ip_rtlsim(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_stitched_ip.onnx") + # use critical path estimate to set rtlsim liveness threshold + # (very conservative) + model = model.transform(AnnotateCycles()) + estimate_network_performance = model.analysis(dataflow_performance) + os.environ["LIVENESS_THRESHOLD"] = str( + int(estimate_network_performance["critical_path_cycles"]) + ) + # Prepare input + x = np.load(build_dir + "/end2end_mobilenet_input.npy") + x = x.transpose(0, 2, 3, 1) # Convert NCHW to NHWC + inp_name = model.graph.input[0].name + out_name = model.graph.output[0].name + inp_dict = {inp_name: x} + + # set top-level prop for stitched-ip rtlsim and launch + model.set_metadata_prop("exec_mode", "rtlsim") + ret_rtlsim_ip = execute_onnx(model, inp_dict, True) + res_rtlsim_ip = ret_rtlsim_ip[out_name] + np.save(build_dir + "/end2end_mobilenet_result_rtlsim_ip.npy", res_rtlsim_ip) + a0 = np.load(build_dir + "/end2end_mobilenet_topk_scale.npy") + res_rtlsim_ip_prob = ret_rtlsim_ip[model.graph.node[-2].output[0]] * a0 + np.save(build_dir + "/end2end_mobilenet_result_cppsim_prob.npy", res_rtlsim_ip_prob) + + # check result with golden values + golden = np.load(build_dir + "/end2end_mobilenet_golden_top5.npy") + # golden_prob = np.load(build_dir + "/end2end_mobilenet_golden_top5_prob.npy") + + assert (golden == res_rtlsim_ip).all() + # assert np.isclose(golden_prob, res_rtlsim_ip_prob[0, 0, 0, :5]).all() + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.end2end +def test_end2end_mobilenet_rtlsim_performance(): + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_stitched_ip.onnx") + report_dir = build_dir + "/report" + os.makedirs(report_dir, exist_ok=True) + # multi-in/out streams currently not supported in our C++ verilator driver + rtlsim_bs = 1 + + rtlsim_perf_dict = verilator_fifosim(model, rtlsim_bs) + # keep keys consistent between the Python and C++-styles + cycles = rtlsim_perf_dict["cycles"] + clk_ns = float(model.get_metadata_prop("clk_ns")) + fclk_mhz = 1 / (clk_ns * 0.001) + runtime_s = (cycles * clk_ns) * (10**-9) + rtlsim_perf_dict["runtime[ms]"] = runtime_s * 1000 + rtlsim_perf_dict["throughput[images/s]"] = rtlsim_bs / runtime_s + rtlsim_perf_dict["fclk[mhz]"] = fclk_mhz + for key, val in rtlsim_perf_dict.items(): + if "max_count" in key: + del rtlsim_perf_dict[key] + # estimate stable-state throughput based on latency+throughput + rtlsim_perf_dict["stable_throughput[images/s]"] = rtlsim_perf_dict["throughput[images/s]"] + + model.save(build_dir + "/end2end_mobilenet_rtlsim_performance.onnx") diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index bac343bedf..29d2f58e66 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -60,7 +60,7 @@ def get_checkpoint_name(step): # checkpoint for build step is an entire dir return build_dir + "/end2end_ext_weights_build" elif step == "download": - return onnx_dir_local + "/tfc-w1a1.onnx" + return onnx_dir_local + "/tfc-w2a2.onnx" else: # other checkpoints are onnx files return build_dir + "/end2end_ext_weights_%s.onnx" % (step) @@ -82,14 +82,17 @@ def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) test_data = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/test_ext_weights" - folding_config_file = test_data + "/tfc-w1a1-extw.json" + folding_config_file = test_data + "/tfc-w2a2-extw.json" + specialize_layers_config_file = test_data + "/specialize_layers_config.json" output_dir = make_build_dir("test_end2end_ext_weights_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, verbose=True, + standalone_thresholds=True, folding_config_file=folding_config_file, + specialize_layers_config_file=specialize_layers_config_file, synth_clk_period_ns=target_clk_ns, - board="Pynq-Z1", + board="ZCU104", shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, diff --git a/tests/fpgadataflow/test_convert_to_hw_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hw_1d_conv_layer.py index c5d0281203..6d3929109f 100644 --- a/tests/fpgadataflow/test_convert_to_hw_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hw_1d_conv_layer.py @@ -143,10 +143,10 @@ def test_convert_to_hw_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_m inst.set_nodeattr("preferred_impl_style", "hls") if depthwise is True: new_model = new_model.transform(to_hw.InferVectorVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) else: new_model = new_model.transform(to_hw.InferQuantizedMatrixVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) # set folding parameters for MVAU if new_model.get_nodes_by_op_type("MVAU_hls"): fc_node = new_model.get_nodes_by_op_type("MVAU_hls")[0] diff --git a/tests/fpgadataflow/test_convert_to_hw_channelwise_layer.py b/tests/fpgadataflow/test_convert_to_hw_channelwise_layer.py index 4b063f8505..ac02008ff2 100644 --- a/tests/fpgadataflow/test_convert_to_hw_channelwise_layer.py +++ b/tests/fpgadataflow/test_convert_to_hw_channelwise_layer.py @@ -121,7 +121,7 @@ def test_convert_to_hw_channelwise_layer(pdt, idt, onnx_op_name, scalar_param, e assert (y_produced == y_expected).all() assert model.graph.node[1].op_type == "ChannelwiseOp" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_convert_to_hw_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hw_conv_fc_transition.py index f7b3c55c2a..f9b5dff56c 100755 --- a/tests/fpgadataflow/test_convert_to_hw_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hw_conv_fc_transition.py @@ -204,7 +204,7 @@ def test_convert_to_hw_conv_fc_transition(conv_config, depthwise, use_reshape): if is_fpgadataflow_node(node): inst = getCustomOp(node) inst.set_nodeattr("preferred_impl_style", "hls") - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) new_model = new_model.transform(GiveUniqueNodeNames()) new_model = new_model.transform(InferDataLayouts()) diff --git a/tests/fpgadataflow/test_convert_to_hw_conv_layer.py b/tests/fpgadataflow/test_convert_to_hw_conv_layer.py index 61f8af7806..122997e412 100644 --- a/tests/fpgadataflow/test_convert_to_hw_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hw_conv_layer.py @@ -131,10 +131,10 @@ def test_convert_to_hw_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mode inst.set_nodeattr("preferred_impl_style", "hls") if depthwise is True: new_model = new_model.transform(to_hw.InferVectorVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) else: new_model = new_model.transform(to_hw.InferQuantizedMatrixVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) # set folding parameters for MVAU if new_model.get_nodes_by_op_type("MVAU_hls"): fc_node = new_model.get_nodes_by_op_type("MVAU_hls")[0] diff --git a/tests/fpgadataflow/test_convert_to_hw_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hw_layers_cnv.py index 71f383ca23..4b8668c7b3 100644 --- a/tests/fpgadataflow/test_convert_to_hw_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hw_layers_cnv.py @@ -111,7 +111,7 @@ def test_convert_to_hw_layers_cnv_w1a1(fused_activation): if is_fpgadataflow_node(node): inst = getCustomOp(node) inst.set_nodeattr("preferred_impl_style", "hls") - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) for node in model.graph.node: if node.op_type == "MVAU_hls": inst = getCustomOp(node) diff --git a/tests/fpgadataflow/test_convert_to_hw_layers_fc.py b/tests/fpgadataflow/test_convert_to_hw_layers_fc.py index 746ded9074..94fafae6b7 100644 --- a/tests/fpgadataflow/test_convert_to_hw_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hw_layers_fc.py @@ -82,7 +82,7 @@ def test_convert_to_hw_layers_tfc_w1a1(): model = model.transform(absorb.AbsorbMulIntoMultiThreshold()) model = model.transform(RoundAndClipThresholds()) model = model.transform(to_hw.InferBinaryMatrixVectorActivation()) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) fc0 = model.graph.node[2] assert fc0.op_type.startswith("MVAU") assert model.get_tensor_shape(fc0.input[0]) == [1, 784] @@ -154,7 +154,7 @@ def test_convert_to_hw_layers_tfc_w1a2(): model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) model = model.transform(to_hw.InferQuantizedMatrixVectorActivation()) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) fc0 = model.graph.node[2] assert fc0.op_type.startswith("MVAU") diff --git a/tests/fpgadataflow/test_convert_to_hw_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hw_layers_synthetic.py index 6c83f10617..6a22f39cdc 100644 --- a/tests/fpgadataflow/test_convert_to_hw_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hw_layers_synthetic.py @@ -210,7 +210,7 @@ def test_convert_to_hw_layers_synthetic(ch, ifmdim, idt): output_hw = oxe.execute_onnx(model, input_dict, True) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) # check topology status diff --git a/tests/fpgadataflow/test_convert_to_hw_pool_batch.py b/tests/fpgadataflow/test_convert_to_hw_pool_batch.py index d532cf345e..e155053b8b 100644 --- a/tests/fpgadataflow/test_convert_to_hw_pool_batch.py +++ b/tests/fpgadataflow/test_convert_to_hw_pool_batch.py @@ -186,7 +186,7 @@ def test_convert_to_hw_pool(idt, odt, pool_config, ifm_ch, pe, op_type, exec_mod inst.set_nodeattr("preferred_impl_style", "hls") y_produced = oxe.execute_onnx(new_model, input_dict)["outp"] assert (y_produced == y_expected).all() - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) # Folding for n in new_model.graph.node: diff --git a/tests/fpgadataflow/test_convert_to_hw_thresholding.py b/tests/fpgadataflow/test_convert_to_hw_thresholding.py deleted file mode 100755 index 63cb5986e1..0000000000 --- a/tests/fpgadataflow/test_convert_to_hw_thresholding.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2024, Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pytest - -import numpy as np -from onnx import TensorProto, helper -from qonnx.core.datatype import DataType -from qonnx.core.modelwrapper import ModelWrapper -from qonnx.custom_op.general.multithreshold import multithreshold -from qonnx.custom_op.registry import getCustomOp -from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.transformation.infer_datatypes import InferDataTypes -from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor - -import finn.core.onnx_exec as oxe -from finn.transformation.fpgadataflow.convert_to_hw_layers import InferThresholdingLayer -from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers - -test_fpga_part = "xczu3eg-sbva484-1-e" -target_clk_ns = 5 - - -# Helper functions -def sort_thresholds_increasing(thresholds): - return np.sort(thresholds, axis=1) - - -def prepare_inputs(input_tensor): - return {"inp": input_tensor} - - -# n = batch, c = channel, h = height, w = width of feature map -# Standard = NCHW; FINN = NHWC -# Convert from NHWC(FINN) to NCHW(Standard) -def layout_FINN2NCHW(data): - return np.transpose(data, (0, 3, 1, 2)) - - -# Convert from NCHW(Standard) to NHWC(FINN) -def layout_NCHW2FINN(data): - return np.transpose(data, (0, 2, 3, 1)) - - -def generate_random_threshold_values(input_data_type, num_input_channels, num_steps): - return np.random.randint( - input_data_type.min(), - input_data_type.max() + 1, - (num_input_channels, num_steps), - ).astype(np.float32) - - -def generate_pe_value(fold, num_input_channels): - if fold == -1: - fold = num_input_channels - pe = num_input_channels // fold - assert num_input_channels % pe == 0 - return pe - - -def make_single_multithresholding_modelwrapper( - thresholds, - pe, - input_data_type, - output_data_type, - activation_bias, - num_input_vecs, -): - NumChannels = thresholds.shape[0] - - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, num_input_vecs + [NumChannels]) - outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, num_input_vecs + [NumChannels]) - - node_inp_list = ["inp", "thresh"] - - Multithresholding_node = helper.make_node( - "MultiThreshold", - node_inp_list, - ["outp"], - domain="qonnx.custom_op.general", - out_dtype=output_data_type.name, - out_bias=float(activation_bias), - out_scale=1.0, - ) - - graph = helper.make_graph( - nodes=[Multithresholding_node], - name="multithresholding_graph", - inputs=[inp], - outputs=[outp], - ) - - model = helper.make_model(graph, producer_name="multithresholding-model") - model = ModelWrapper(model) - model = model.transform(InferShapes()) - model = model.transform(InferDataTypes()) - model = model.transform(GiveUniqueNodeNames()) - - model.set_tensor_datatype("inp", input_data_type) - model.set_tensor_datatype("outp", output_data_type) - - model.set_tensor_datatype("thresh", input_data_type) - model.set_initializer("thresh", thresholds) - return model - - -# N.B. Fold values where C % PE != 0 fail -@pytest.mark.parametrize("activation", [DataType["INT4"], DataType["BIPOLAR"]]) -@pytest.mark.parametrize("input_data_type", [DataType["INT16"], DataType["UINT16"]]) -@pytest.mark.parametrize("fold", [-1, 1, 2, 4, 6]) -@pytest.mark.parametrize("num_input_channels", [16]) -@pytest.mark.parametrize("impl_style", ["hls", "rtl"]) -@pytest.mark.fpgadataflow -@pytest.mark.vivado -def test_convert_multithreshold_to_hardware( - impl_style, - activation, - input_data_type, - fold, - num_input_channels, -): - # Handle inputs to the test - pe = generate_pe_value(fold, num_input_channels) - num_steps = activation.get_num_possible_values() - 1 - - # Other non-input parameters - num_input_vecs = [1, 2, 2] - output_data_type = activation - if output_data_type == DataType["BIPOLAR"]: - activation_bias = 0 - else: - activation_bias = output_data_type.min() - - # Generate random thresholds and sort in ascending order - thresholds = generate_random_threshold_values(input_data_type, num_input_channels, num_steps) - - # provide non-decreasing/ascending thresholds - thresholds = sort_thresholds_increasing(thresholds) - - # Make a Multithreshold graph and convert to thresholding binary search node - model = make_single_multithresholding_modelwrapper( - thresholds, - pe, - input_data_type, - output_data_type, - activation_bias, - num_input_vecs, - ) - - model = model.transform(InferThresholdingLayer()) - - # Perform functional validation of the InferThresholdingLayer transform - x = gen_finn_dt_tensor(input_data_type, tuple(num_input_vecs + [num_input_channels])) - - x_nchw = layout_FINN2NCHW(x) - y_expected = multithreshold(x_nchw, thresholds) - - # convert back to NHWC for comparison to hw outputs - y_expected = layout_NCHW2FINN(y_expected) - if activation == DataType["BIPOLAR"]: - # binary to bipolar - y_expected = 2 * y_expected - 1 - else: - # signed offset - y_expected += activation.min() - - input_dict = prepare_inputs(x) - y_produced = oxe.execute_onnx(model, input_dict)["outp"] - - assert (y_produced == y_expected).all() - - # Transform to the specified implementation style, either the - # RTL or HLS according to test parameters - node = model.get_nodes_by_op_type(model.graph.node[0].op_type)[0] - inst = getCustomOp(node) - inst.set_nodeattr("preferred_impl_style", impl_style) - model = model.transform(SpecializeLayers()) - model = model.transform(InferShapes()) - assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index b8242df933..f684931478 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -182,7 +182,7 @@ def test_depthwise_conv_hw_cppsim(act, pe, k, stride, padding): new_model = model.transform(InferConvInpGen()) new_model = new_model.transform(InferVectorVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) # set SIMD in ConvInputGen node and PE in VVAU node for n in new_model.graph.node: @@ -226,7 +226,7 @@ def test_depthwise_conv_hw_rtlsim(act, pe, k, stride, padding): new_model = model.transform(InferConvInpGen()) new_model = new_model.transform(InferVectorVectorActivation()) - new_model = new_model.transform(SpecializeLayers()) + new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1")) # set SIMD in ConvInputGen node and PE in VVAU node for n in new_model.graph.node: diff --git a/tests/fpgadataflow/test_fpgadataflow_addstreams.py b/tests/fpgadataflow/test_fpgadataflow_addstreams.py index 530d94e13b..484cbbe04a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_addstreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_addstreams.py @@ -116,7 +116,7 @@ def test_fpgadataflow_addstreams(idt, ch, fold, exec_mode): y_produced = oxe.execute_onnx(model, input_dict)["outp"] assert (y_produced == y_expected).all(), "Execution of hw layer failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py index d5fa7c779f..2ad49ae58b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py +++ b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py @@ -139,7 +139,7 @@ def test_fpgadataflow_channelwise_ops(idt, act, pdt, nf, ich, func, vecs, exec_m assert (y_produced == y_expected).all(), "HW layer execution failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_checksum.py b/tests/fpgadataflow/test_fpgadataflow_checksum.py index 34a48996c9..817d13e13d 100644 --- a/tests/fpgadataflow/test_fpgadataflow_checksum.py +++ b/tests/fpgadataflow/test_fpgadataflow_checksum.py @@ -176,7 +176,7 @@ def test_fpgadataflow_checksum(): # rtlsim model = model.transform(InsertFIFO(True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) diff --git a/tests/fpgadataflow/test_fpgadataflow_concat.py b/tests/fpgadataflow/test_fpgadataflow_concat.py index b52b14fca3..25c738d049 100644 --- a/tests/fpgadataflow/test_fpgadataflow_concat.py +++ b/tests/fpgadataflow/test_fpgadataflow_concat.py @@ -98,7 +98,7 @@ def test_fpgadataflow_concat(exec_mode, idt): assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow" ret = execute_onnx(model, inp_dict) assert (ret[oname] == exp_out).all() - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) assert model.graph.node[0].op_type == "StreamingConcat_hls" assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow.hls" if exec_mode == "cppsim": @@ -141,11 +141,11 @@ def test_fpgadataflow_concat_stitchedip(): model = model.transform(InferConcatLayer()) assert model.graph.node[0].op_type == "StreamingConcat" assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(fpga_part)) assert model.graph.node[0].op_type == "StreamingConcat_hls" assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow.hls" model = model.transform(InsertFIFO(create_shallow_fifos=True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(fpga_part, clk_ns)) model = model.transform(HLSSynthIP()) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 45ca74fbea..dc5dc0c02a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -189,7 +189,7 @@ def test_fpgadataflow_slidingwindow( # set impl_style inst = getCustomOp(model.get_nodes_by_op_type("ConvolutionInputGenerator")[0]) inst.set_nodeattr("preferred_impl_style", impl_style) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) # set simd inst = getCustomOp(model.graph.node[0]) inst.set_nodeattr("SIMD", simd) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 6c0712b7b0..9c45b06f4a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -253,7 +253,7 @@ def test_fpgadataflow_conv_dynamic(cfg): model = model.transform(to_hw.InferQuantizedMatrixVectorActivation()) model = model.transform(to_hw.InferVectorVectorActivation()) model = model.transform(absorb.AbsorbConsecutiveTransposes()) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) parent_model = model.transform(CreateDataflowPartition()) sdp_inst = getCustomOp(parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0]) model = ModelWrapper(sdp_inst.get_nodeattr("model")) @@ -281,7 +281,7 @@ def test_fpgadataflow_conv_dynamic(cfg): getCustomOp(comp_node).set_nodeattr("PE", 4) model = model.transform(InsertDWC()) model = model.transform(InsertFIFO(create_shallow_fifos=True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model = model.transform(PrepareIP("xc7z020clg400-1", 5)) @@ -523,11 +523,11 @@ def test_fpgadataflow_slidingwindow_rtl_dynamic( dw=dw, ) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) # Simulate using stitched-ip-rtlsim so we can use existing infrastructure # that supports hook functions to re-program configuration before rtlsim model = model.transform(InsertFIFO(True)) # required for proper simulation - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP("xc7z020clg400-1", 5)) model = model.transform(HLSSynthIP()) diff --git a/tests/fpgadataflow/test_fpgadataflow_deconv.py b/tests/fpgadataflow/test_fpgadataflow_deconv.py index f1fc989066..16cf7481f2 100644 --- a/tests/fpgadataflow/test_fpgadataflow_deconv.py +++ b/tests/fpgadataflow/test_fpgadataflow_deconv.py @@ -169,7 +169,7 @@ def test_fpgadataflow_deconv(idim, stride, ifm_ch, ofm_ch, simd, pe, k, padding, y_produced = oxe.execute_onnx(model, input_dict)["outp"] assert (y_produced == y_expected).all() - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(MinimizeAccumulatorWidth()) for n in model.graph.node: diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py index 25717a4152..fb9d52eb51 100644 --- a/tests/fpgadataflow/test_fpgadataflow_downsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -131,7 +131,7 @@ def test_fpgadataflow_downsampler(is_1d, flip_1d, exec_mode): assert len(model.get_nodes_by_op_type("DownSampler")) == 1 y_produced = execute_onnx(model, idict)["out0"] assert (y_produced == y_expected).all() - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(SetExecMode("cppsim")) model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py index 62b9265466..7ac9cbe3fb 100644 --- a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py @@ -128,7 +128,7 @@ def test_fpgadataflow_duplicatestreams(idt, ch, fold, imdim, n_dupl, exec_mode, y = output_dict["outp%d" % i] assert (y == expected_y).all(), "HW layer execution failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index 7152d32a7b..6b79a39ed5 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -47,7 +47,7 @@ from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers -def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype): +def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style): inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape) @@ -63,6 +63,7 @@ def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype): inWidth=inWidth, outWidth=outWidth, dataType=str(finn_dtype.name), + preferred_impl_style=impl_style, ) graph = helper.make_graph(nodes=[DWC_node], name="dwc_graph", inputs=[inp], outputs=[outp]) @@ -86,17 +87,17 @@ def prepare_inputs(input_tensor, dt): ([1, 24], 6, 4, DataType["INT2"]), ([1, 24], 4, 6, DataType["INT2"]), ([1, 4], 2, 4, DataType["BIPOLAR"]), - ([1, 2, 8], 2, 4, DataType["BIPOLAR"]), ([1, 4], 4, 2, DataType["INT2"]), ([1, 2, 8], 4, 4, DataType["INT2"]), ([1, 2, 8], 8, 16, DataType["INT2"]), ], ) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.parametrize("impl_style", ["hls", "rtl"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_dwc(config, exec_mode): +def test_fpgadataflow_dwc(config, exec_mode, impl_style): shape, inWidth, outWidth, finn_dtype = config test_fpga_part = "xc7z020clg400-1" @@ -104,7 +105,7 @@ def test_fpgadataflow_dwc(config, exec_mode): x = gen_finn_dt_tensor(finn_dtype, shape) input_dict = prepare_inputs(x, finn_dtype) - model = make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype) + model = make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style) # verify abstraction level execution y = oxe.execute_onnx(model, input_dict)["outp"] assert ( @@ -113,7 +114,7 @@ def test_fpgadataflow_dwc(config, exec_mode): input values anymore.""" assert y.shape == tuple(shape), """The output shape is incorrect.""" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) @@ -136,19 +137,17 @@ def test_fpgadataflow_dwc(config, exec_mode): @pytest.mark.parametrize( "config", [ - ([1, 24], 6, 4, DataType["INT2"]), - ([1, 24], 4, 6, DataType["INT2"]), ([1, 4], 2, 4, DataType["BIPOLAR"]), - ([1, 2, 8], 2, 4, DataType["BIPOLAR"]), ([1, 4], 4, 2, DataType["INT2"]), ([1, 2, 8], 4, 4, DataType["INT2"]), ([1, 2, 8], 8, 16, DataType["INT2"]), ], ) +@pytest.mark.parametrize("impl_style", ["hls", "rtl"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_dwc_stitched_rtlsim(config): +def test_fpgadataflow_dwc_stitched_rtlsim(config, impl_style): shape, inWidth, outWidth, finn_dtype = config test_fpga_part = "xc7z020clg400-1" @@ -157,10 +156,10 @@ def test_fpgadataflow_dwc_stitched_rtlsim(config): x = gen_finn_dt_tensor(finn_dtype, shape) input_dict = prepare_inputs(x, finn_dtype) - model = make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype) - model = model.transform(SpecializeLayers()) + model = make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(InsertFIFO(create_shallow_fifos=True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) diff --git a/tests/fpgadataflow/test_fpgadataflow_eltwise.py b/tests/fpgadataflow/test_fpgadataflow_eltwise.py index fbfcc8e28b..996477f28f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_eltwise.py +++ b/tests/fpgadataflow/test_fpgadataflow_eltwise.py @@ -114,7 +114,7 @@ def test_fpgadataflow_eltwise(dt0, ch, fold, do_abs, exec_mode): y_produced = execute_onnx(model, idict)["out0"] assert (y_produced == y_expected).all(), exec_mode + " failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) assert len(model.graph.node) == 1 assert model.graph.node[0].op_type == "StreamingEltwise_hls" diff --git a/tests/fpgadataflow/test_fpgadataflow_fifo.py b/tests/fpgadataflow/test_fpgadataflow_fifo.py index 1719da1454..f628a0e7af 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fifo.py +++ b/tests/fpgadataflow/test_fpgadataflow_fifo.py @@ -96,7 +96,7 @@ def test_fpgadataflow_fifo_rtlsim(Shape, folded_shape, depth, finn_dtype): input_dict = prepare_inputs(x, finn_dtype) model = make_single_fifo_modelwrapper(Shape, depth, folded_shape, finn_dtype) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 45cc265ac7..87e3267186 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -135,7 +135,7 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style): assert y_produced.shape == expected_oshape assert (y_produced == y_expected).all(), "HW layer execution failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(InferShapes()) model = model.transform(SetExecMode(mode)) diff --git a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py index 9c2802aade..cca4bb7e8e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py @@ -112,7 +112,7 @@ def test_fpgadataflow_globalaccpool(idt, ch, fold, imdim, exec_mode, impl_style) assert (y == expected_y).all(), "HW layer verification failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_labelselect.py b/tests/fpgadataflow/test_fpgadataflow_labelselect.py index 98ded66ca7..83ab2ddcaf 100644 --- a/tests/fpgadataflow/test_fpgadataflow_labelselect.py +++ b/tests/fpgadataflow/test_fpgadataflow_labelselect.py @@ -118,7 +118,7 @@ def test_fpgadataflow_labelselect(idt, labels, fold, k, exec_mode, impl_style): assert soft_verify_topk(x, y, k), "HW layer execution failed" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) diff --git a/tests/fpgadataflow/test_fpgadataflow_lookup.py b/tests/fpgadataflow/test_fpgadataflow_lookup.py index cb15fa3ae5..d5aadc33d4 100644 --- a/tests/fpgadataflow/test_fpgadataflow_lookup.py +++ b/tests/fpgadataflow/test_fpgadataflow_lookup.py @@ -131,7 +131,7 @@ def test_fpgadataflow_lookup(edt, embedding_cfg, exec_mode): ret_hw = execute_onnx(model, {iname: itensor}) assert (exp_out == ret_hw[oname]).all() # call transformation to convert abstraction layer into HLS layer - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xczu3eg-sbva484-1-e")) assert model.graph.node[0].op_type == "Lookup_hls" if exec_mode == "cppsim": model = model.transform(GiveUniqueNodeNames()) @@ -174,7 +174,7 @@ def test_fpgadataflow_lookup_external(): assert (model.get_initializer(ename) == embeddings).all() model = model.transform(InferLookupLayer()) assert model.graph.node[0].op_type == "Lookup" - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(fpga_part)) assert model.graph.node[0].op_type == "Lookup_hls" assert model.graph.node[0].input[0] == iname assert model.graph.node[0].input[1] == ename diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index 2a22f3fc41..1ec77f4eec 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -312,7 +312,7 @@ def test_fpgadataflow_mvau_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): inst.set_nodeattr("mem_mode", mem_mode) # Note: only HLS-based MVAU layers execute CPPsim inst.set_nodeattr("preferred_impl_style", "hls") - model = model.transform(SpecializeLayers("xc7z020clg400-1")) + model = model.transform(SpecializeLayers("xczu7ev-ffvc1156-2-e")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(SetExecMode("cppsim")) model = model.transform(PrepareCppSim()) @@ -423,10 +423,10 @@ def test_fpgadataflow_mvau_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): y_expected = y.reshape(oshape) # TODO split up into several dependent tests -- need to check how this # works for parametrized tests... - model = model.transform(SpecializeLayers("xc7z020clg400-1")) + model = model.transform(SpecializeLayers("xczu7ev-ffvc1156-2-e")) model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(PrepareIP("xczu7ev-ffvc1156-2-e", 5)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) y_produced = oxe.execute_onnx(model, input_dict)["outp"] @@ -531,12 +531,12 @@ def test_fpgadataflow_mvau_large_depth_decoupled_mode_rtlsim( y_expected = y.reshape(oshape) # TODO split up into several dependent tests -- need to check how this # works for parametrized tests... - model = model.transform(SpecializeLayers("xc7z020clg400-1")) + model = model.transform(SpecializeLayers("xczu7ev-ffvc1156-2-e")) model = model.transform(MinimizeWeightBitWidth()) model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(PrepareIP("xczu7ev-ffvc1156-2-e", 5)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) y_produced = oxe.execute_onnx(model, input_dict)["outp"] @@ -611,12 +611,12 @@ def test_mvau_fifocharacterize_rtlsim( inst.set_nodeattr("preferred_impl_style", preferred_impl_style) total_fold = nf * sf exp_total_cycles = total_fold + 10 - model = model.transform(SpecializeLayers("xc7z020clg400-1")) + model = model.transform(SpecializeLayers("xczu7ev-ffvc1156-2-e")) model = model.transform(MinimizeWeightBitWidth()) model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(PrepareIP("xczu7ev-ffvc1156-2-e", 5)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) model = model.transform(DeriveCharacteristic(exp_total_cycles)) @@ -635,17 +635,19 @@ def test_mvau_fifocharacterize_rtlsim( @pytest.mark.parametrize("mh", [18]) @pytest.mark.parametrize("mw", [128]) -@pytest.mark.parametrize("pe", [1, 6, 9, 18]) -@pytest.mark.parametrize("simd", [1, 4, 16, 64, 128]) +@pytest.mark.parametrize("pe", [1, 9, 18]) +@pytest.mark.parametrize("simd", [1, 64, 128]) @pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]]) @pytest.mark.parametrize("wdt", [DataType["INT4"], DataType["INT8"]]) -@pytest.mark.parametrize("part", ["xcvc1902-vsva2197-2MP-e-S", "xcku3p-ffva676-1-e"]) +@pytest.mark.parametrize( + "part", ["xcvc1902-vsva2197-2MP-e-S", "xcku3p-ffva676-1-e", "xc7z020clg400-1"] +) @pytest.mark.parametrize("clk_ns", [1.66, 4]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_rtl_mvau(mh, mw, pe, simd, idt, wdt, part, clk_ns): - if part == "xcku3p-ffva676-1-e" and clk_ns != 1.66: + if part != "xcvc1902-vsva2197-2MP-e-S" and clk_ns != 1.66: pytest.skip( """Skip test for varying clk for devices other than Versal, since this variable only affects DSP58s""" @@ -657,6 +659,9 @@ def test_fpgadataflow_rtl_mvau(mh, mw, pe, simd, idt, wdt, part, clk_ns): ifm = helper.make_tensor_value_info("ifm", TensorProto.FLOAT, [1, ofm_h, ofm_w, mw]) ofm = helper.make_tensor_value_info("ofm", TensorProto.FLOAT, (1, ofm_h, ofm_w, mh)) W = gen_finn_dt_tensor(wdt, (mw, mh)) + # if 7 series, force weights to narrow range + if part == "xc7z020clg400-1": + W = np.clip(W, wdt.min() + 1, wdt.max()) model = make_single_matmul_modelwrapper(ifm, ofm, idt, wdt, W) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py index 1bc2d9d59e..d81936f7e5 100644 --- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py +++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py @@ -28,6 +28,7 @@ import pytest +from functools import partial from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -40,6 +41,8 @@ ) from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers +test_fpga_part = "xczu3eg-sbva484-1-e" + def check_two_dict_for_equality(dict1, dict2): for key in dict1: @@ -96,9 +99,9 @@ def test_res_estimate(): model.set_tensor_datatype("outp", odt) model.set_tensor_datatype("weights", wdt) - model.transform(SpecializeLayers()) + model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) - prod_resource_estimation = model.analysis(res_estimation) + prod_resource_estimation = model.analysis(partial(res_estimation, fpgapart=test_fpga_part)) expect_resource_estimation = { "MVAU_hls_0": { "BRAM_18K": 0, @@ -115,7 +118,9 @@ def test_res_estimate(): ), """The produced output of the res_estimation analysis pass is not equal to the expected one""" - prod_resource_estimation = model.analysis(res_estimation_complete) + prod_resource_estimation = model.analysis( + partial(res_estimation_complete, fpgapart=test_fpga_part) + ) expect_resource_estimation = { "MVAU_hls_0": [ { diff --git a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py index 0df7181a60..c520fb50fc 100644 --- a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py @@ -146,7 +146,7 @@ def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, pe, ceil y_produced = oxe.execute_onnx(model, input_dict)["outp"] assert (y_produced == y_expected).all() - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xczu3eg-sbva484-1-e")) # Ensure PE value is set streamingmaxpool_node = model.get_nodes_by_op_type("StreamingMaxPool_hls")[0] diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py index a6e7e41596..e4dd49fc7f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py @@ -29,24 +29,21 @@ import pytest import numpy as np -import os from onnx import TensorProto, helper -from pyverilator.util.axi_utils import axilite_read, axilite_write from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper -from qonnx.custom_op.general.multithreshold import multithreshold from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model +from qonnx.transformation.infer_datatypes import InferDataTypes +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import gen_finn_dt_tensor import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.analysis.fpgadataflow.hls_synth_res_estimation import hls_synth_res_estimation -from finn.core.rtlsim_exec import rtlsim_exec from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim -from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP +from finn.transformation.fpgadataflow.convert_to_hw_layers import InferThresholdingLayer from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP -from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -57,10 +54,17 @@ target_clk_ns = 5 -def generate_random_threshold_values(input_data_type, num_input_channels, num_steps): +def generate_random_threshold_values( + data_type, num_input_channels, num_steps, narrow=False, per_tensor=False +): + if per_tensor: + num_input_channels = 1 + if narrow: + num_steps -= 1 + return np.random.randint( - input_data_type.min(), - input_data_type.max() + 1, + data_type.min(), + data_type.max() + 1, (num_input_channels, num_steps), ).astype(np.float32) @@ -69,76 +73,93 @@ def sort_thresholds_increasing(thresholds): return np.sort(thresholds, axis=1) -# n = batch, c = channel, h = height, w = width of feature map -# Standard = NCHW; FINN = NHWC -# Convert from NHWC(FINN) to NCHW(Standard) -def layout_FINN2NCHW(data): - return np.transpose(data, (0, 3, 1, 2)) - - -# Convert from NCHW(Standard) to NHWC(FINN) -def layout_NCHW2FINN(data): - return np.transpose(data, (0, 2, 3, 1)) - - -def make_single_thresholding_modelwrapper(impl_style, T, idt, odt, actval, n_inp_vecs): - NumChannels = T.shape[0] - - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]) - outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]) +def make_single_multithresholding_modelwrapper( + thresholds, + input_data_type, + threshold_data_type, + output_data_type, + activation_bias, + num_input_vecs, + num_channels, +): + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, num_input_vecs + [num_channels]) + thresh = helper.make_tensor_value_info("thresh", TensorProto.FLOAT, thresholds.shape) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, num_input_vecs + [num_channels]) node_inp_list = ["inp", "thresh"] - Thresholding_node = helper.make_node( - "Thresholding", + Multithresholding_node = helper.make_node( + "MultiThreshold", node_inp_list, ["outp"], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - NumChannels=NumChannels, - numSteps=T.shape[1], - inputDataType=idt.name, - weightDataType=idt.name, # will be set by MinimizeAccumulatorWidth - outputDataType=odt.name, - ActVal=actval, - numInputVectors=n_inp_vecs, - preferred_impl_style=impl_style, + domain="qonnx.custom_op.general", + out_dtype=output_data_type.name, + out_bias=float(activation_bias), + out_scale=1.0, + data_layout="NHWC", ) + graph = helper.make_graph( - nodes=[Thresholding_node], - name="thresholding_graph", + nodes=[Multithresholding_node], + name="multithresholding_graph", inputs=[inp], outputs=[outp], + value_info=[thresh], ) - model = qonnx_make_model(graph, producer_name="thresholding-model") + model = helper.make_model(graph, producer_name="multithresholding-model") model = ModelWrapper(model) + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + model = model.transform(GiveUniqueNodeNames()) - model.set_tensor_datatype("inp", idt) - model.set_tensor_datatype("outp", odt) + model.set_tensor_datatype("inp", input_data_type) + model.set_tensor_datatype("outp", output_data_type) - model.set_tensor_datatype("thresh", idt) - model.set_initializer("thresh", T) + model.set_tensor_datatype("thresh", threshold_data_type) + model.set_initializer("thresh", thresholds) return model -# activation: None or DataType -@pytest.mark.parametrize("act", [DataType["INT4"], DataType["BIPOLAR"]]) -# input datatype -@pytest.mark.parametrize("idt", [DataType["INT16"], DataType["UINT16"]]) -# folding, -1 is maximum possible -@pytest.mark.parametrize("nf", [-1, 2, 1]) -# number of input features -@pytest.mark.parametrize("ich", [16]) -# execution mode +@pytest.mark.parametrize("num_input_channels", [6, 16]) +@pytest.mark.parametrize( + "num_input_vecs", + [ + [1], + [1, 2, 2], + ], +) +@pytest.mark.parametrize("activation", [DataType["INT4"], DataType["BIPOLAR"]]) +@pytest.mark.parametrize( + "idt_tdt_cfg", + [ + (DataType["INT8"], DataType["INT8"]), + (DataType["INT8"], DataType["INT9"]), + (DataType["UINT8"], DataType["UINT8"]), + (DataType["UINT8"], DataType["UINT9"]), + ], +) +@pytest.mark.parametrize("fold", [-1, 1, 2]) +@pytest.mark.parametrize("narrow", [True, False]) +@pytest.mark.parametrize("per_tensor", [True, False]) +@pytest.mark.parametrize("impl_style", ["hls", "rtl"]) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) -# memory mode @pytest.mark.parametrize("mem_mode", ["internal_embedded", "internal_decoupled"]) -@pytest.mark.parametrize("impl_style", ["rtl", "hls"]) @pytest.mark.fpgadataflow @pytest.mark.vivado @pytest.mark.slow -def test_fpgadataflow_thresholding(impl_style, idt, act, nf, ich, exec_mode, mem_mode): +def test_fpgadataflow_thresholding( + num_input_channels, + num_input_vecs, + activation, + idt_tdt_cfg, + fold, + narrow, + per_tensor, + impl_style, + exec_mode, + mem_mode, +): # the mem_mode parameter can only be used for the hls thresholding # so the test will only be executed once for impl_style=rtl and once skipped # when the mem_mode is varied. Otherwise, the same test configuration would always @@ -147,66 +168,74 @@ def test_fpgadataflow_thresholding(impl_style, idt, act, nf, ich, exec_mode, mem pytest.skip( "Skip, because test is identical to impl_style=rtl and mem_mode=internal_embedded" ) - if nf == -1: - nf = ich - pe = ich // nf - n_inp_vecs = [1, 2, 2] - assert ich % pe == 0 - - # generate input data, data layout is NHWC for FINN - x = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ich])) - - odt = act - n_steps = act.get_num_possible_values() - 1 - - # Generate random, non-decreasing thresholds - thresholds = generate_random_threshold_values(idt, ich, n_steps) - - thresholds = sort_thresholds_increasing(thresholds) - - if odt == DataType["BIPOLAR"]: - actval = 0 + if narrow and activation == DataType["BIPOLAR"]: + pytest.skip("Narrow needs to be false with biploar activation.") + input_data_type, threshold_data_type = idt_tdt_cfg + num_steps = activation.get_num_possible_values() - 1 + + if fold == -1: + fold = num_input_channels + pe = num_input_channels // fold + if num_input_channels % pe != 0: + pytest.skip("Invalid folding configuration. Skipping test.") + + output_data_type = activation + if activation == DataType["BIPOLAR"]: + activation_bias = 0 else: - actval = odt.min() + activation_bias = activation.min() + if narrow: + activation_bias += 1 - # Build DUT - model = make_single_thresholding_modelwrapper( - impl_style, thresholds, idt, odt, actval, n_inp_vecs + # Generate random thresholds and sort in ascending order + thresholds = generate_random_threshold_values( + threshold_data_type, num_input_channels, num_steps, narrow, per_tensor ) - # Expected Reference output - # multithreshold util fxn wants NCHW input, not NHWC - x_nchw = layout_FINN2NCHW(x) - y = multithreshold(x_nchw, thresholds) + # provide non-decreasing/ascending thresholds + thresholds = sort_thresholds_increasing(thresholds) - # convert back to NHWC for comparison to hw outputs - y = layout_NCHW2FINN(y) - if act == DataType["BIPOLAR"]: - # binary to bipolar - y = 2 * y - 1 - else: - # signed offset - y += act.min() + # Make a Multithreshold graph and convert to thresholding binary search node + model = make_single_multithresholding_modelwrapper( + thresholds, + input_data_type, + threshold_data_type, + output_data_type, + activation_bias, + num_input_vecs, + num_input_channels, + ) - oshape = model.get_tensor_shape("outp") - y_expected = y.reshape(oshape) + # calculate reference output + x = gen_finn_dt_tensor(input_data_type, tuple(num_input_vecs + [num_input_channels])) - # package input data as dictionary - input_dict = {"inp": x} + input_dict = {model.graph.input[0].name: x} + y_expected = oxe.execute_onnx(model, input_dict)[model.graph.output[0].name] - # execute DUT - y_produced = oxe.execute_onnx(model, input_dict)["outp"] + if output_data_type == DataType["BIPOLAR"]: + # binary to bipolar + y_expected = 2 * y_expected - 1 - y_produced = y_produced.reshape(y_expected.shape) + model = model.transform(InferThresholdingLayer()) + # Perform functional validation of the InferThresholdingLayer transform + y_produced = oxe.execute_onnx(model, input_dict)[model.graph.output[0].name] assert (y_produced == y_expected).all() - model = model.transform(SpecializeLayers()) - # Make sure that SpecializeLayers did not default to HLS implementation unexpectedly + # Transform to the specified implementation style, either the + # RTL or HLS according to test parameters + node = model.get_nodes_by_op_type(model.graph.node[0].op_type)[0] + inst = getCustomOp(node) + inst.set_nodeattr("preferred_impl_style", impl_style) + model = model.transform(SpecializeLayers(test_fpga_part)) + model = model.transform(InferShapes()) assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) - node = model.graph.node[0] + + node = model.get_nodes_by_op_type(model.graph.node[0].op_type)[0] inst = getCustomOp(node) inst.set_nodeattr("PE", pe) + model = model.transform(GiveUniqueNodeNames()) + if impl_style == "hls": inst.set_nodeattr("mem_mode", mem_mode) @@ -215,19 +244,12 @@ def test_fpgadataflow_thresholding(impl_style, idt, act, nf, ich, exec_mode, mem model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) elif exec_mode == "rtlsim": - model = model.transform(SetExecMode("rtlsim")) - model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) + model = model.transform(SetExecMode("rtlsim")) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) - else: - raise Exception("Unknown exec_mode") - - # execute model - y_produced = oxe.execute_onnx(model, input_dict)["outp"] - - y_produced = y_produced.reshape(y_expected.shape) + y_produced = oxe.execute_onnx(model, input_dict)[model.graph.output[0].name] assert (y_produced == y_expected).all() if exec_mode == "rtlsim": @@ -241,219 +263,3 @@ def test_fpgadataflow_thresholding(impl_style, idt, act, nf, ich, exec_mode, mem exp_cycles = exp_cycles_dict[node.name] assert np.isclose(exp_cycles, cycles_rtlsim, atol=15) assert exp_cycles != 0 - - -@pytest.mark.parametrize("impl_style", ["rtl", "hls"]) -# configuration (ch, pe) -@pytest.mark.parametrize("cfg", [(1, 1), (6, 2), (6, 3), (8, 4)]) -@pytest.mark.fpgadataflow -@pytest.mark.vivado -def test_runtime_thresholds_read(impl_style, cfg): - """Read back threshold weights during runtime - - 1. Create random initial weights T - 2. Execute model - 3. Read back weights via AXI - 4. Compare with initial weights T - """ - ch = cfg[0] - pe = cfg[1] - n_inp_vecs = [1, 2, 2] - hls_mem_mode = "internal_decoupled" - act = DataType["INT4"] - idt = DataType["INT16"] - odt = act - n_steps = act.get_num_possible_values() - 1 - np.random.seed(2) - T = np.random.randint(idt.min(), idt.max() + 1, (ch, n_steps)).astype(np.float32) - # provide non-decreasing thresholds - T = np.sort(T, axis=1) - - if odt == DataType["BIPOLAR"]: - actval = 0 - else: - actval = odt.min() - - model = make_single_thresholding_modelwrapper(impl_style, T, idt, odt, actval, n_inp_vecs) - model = model.transform(SpecializeLayers()) - - # Make sure that specialize layer did not default to HLS implementation - assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) - - node = model.get_nodes_by_op_type(f"Thresholding_{impl_style}")[0] - op_inst = getCustomOp(node) - op_inst.set_nodeattr("PE", pe) - if impl_style == "hls": - op_inst.set_nodeattr("mem_mode", hls_mem_mode) - op_inst.set_nodeattr("runtime_writeable_weights", 1) - - dat_fname = f"old_weights_{cfg}.dat" - op_inst.make_weight_file(T, "decoupled_runtime", dat_fname) - with open(dat_fname, "r") as f: - old_weight_stream = f.read().strip() - os.remove(dat_fname) - old_weight_stream = map(lambda x: int(x, 16), old_weight_stream.split("\n")) - old_weight_stream = list(old_weight_stream) - # need to create stitched IP for runtime weight testing - model = model.transform(InsertFIFO(True)) - model = model.transform(SpecializeLayers()) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) - model = model.transform(HLSSynthIP()) - model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) - model = model.transform(PrepareRTLSim()) - model.set_metadata_prop("exec_mode", "rtlsim") - # add two copies of the input tensor as the first one is just used to - # "flush out" the pipeline (as mvau already starts receiving old weights while - # we read/write new ones and reads seem to cause a disturbance too) - # generate input data - in_tensor = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ch])) - in_tensor = np.tile(in_tensor, (2, 1, 1, 1)) - - exec_ctx = {"inp": in_tensor} - extracted_weight_stream = [] - - def read_weights(sim): - addr = 0 - for i in range(len(old_weight_stream)): - extracted_weight_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) - addr += 4 - - rtlsim_exec(model, exec_ctx, pre_hook=read_weights) - - # Validate the AXI Read weights - assert extracted_weight_stream == old_weight_stream - - y = exec_ctx["outp"][0] - - # multithreshold util fxn wants NCHW input, not NHWC - expected = multithreshold(np.transpose(in_tensor, (0, 3, 1, 2)), T) - # convert back to NHWC for comparison to hw outputs - expected = np.transpose(expected, (0, 2, 3, 1))[1] - - if act == DataType["BIPOLAR"]: - # binary to bipolarW - expected = 2 * expected - 1 - else: - # signed offset - expected += act.min() - - # Validate the output is as expected - assert (y == expected).all() - - -@pytest.mark.parametrize("impl_style", ["hls", "rtl"]) -# configuration (ch, pe) -@pytest.mark.parametrize("cfg", [(1, 1), (6, 2), (6, 3), (8, 4)]) -@pytest.mark.fpgadataflow -@pytest.mark.vivado -def test_runtime_thresholds_write(impl_style, cfg): - """Write threshold weights during runtime - - 1. Create random initial weights T_init - 2. Create model with initial weights - 3. Create new set of weights T_write - 4. Write T_write using AXI bus - 5. Read back using AXI bus to T_read - 6. Compare T_write and T_read - 7. Validate outputs with expected vectors - """ - ch = cfg[0] - pe = cfg[1] - - n_inp_vecs = [1, 2, 2] - hls_mem_mode = "internal_decoupled" - act = DataType["INT4"] - idt = DataType["INT16"] - - odt = act - n_steps = act.get_num_possible_values() - 1 - np.random.seed(2) - T_init = np.random.randint(idt.min(), idt.max() + 1, (ch, n_steps)).astype(np.float32) - # provide non-decreasing thresholds - T_init = np.sort(T_init, axis=1) - - if odt == DataType["BIPOLAR"]: - actval = 0 - else: - actval = odt.min() - - model = make_single_thresholding_modelwrapper(impl_style, T_init, idt, odt, actval, n_inp_vecs) - model = model.transform(SpecializeLayers()) - - # Validate that specialize layer did not default to HLS implementation - assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) - - op_inst = getCustomOp(model.graph.node[0]) - op_inst.set_nodeattr("PE", pe) - if impl_style == "hls": - op_inst.set_nodeattr("mem_mode", hls_mem_mode) - op_inst.set_nodeattr("runtime_writeable_weights", 1) - - # Make new weights for runtime write - np.random.seed(4) - T_write = np.random.randint(idt.min(), idt.max() + 1, (ch, n_steps)).astype(np.float32) - # provide non-decreasing thresholds - T_write = np.sort(T_write, axis=1) - - dat_fname = f"T_write_{cfg}.dat" # distinguish fname per paramter for distributed testing - op_inst.make_weight_file(T_write, "decoupled_runtime", dat_fname) - with open(dat_fname, "r") as f: - T_write_stream = f.read().strip() - os.remove(dat_fname) - - T_write_stream = map(lambda x: int(x, 16), T_write_stream.split("\n")) - T_write_stream = list(T_write_stream) - - # need to create stitched IP for runtime weight testing - model = model.transform(InsertFIFO(True)) - model = model.transform(SpecializeLayers()) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) - model = model.transform(HLSSynthIP()) - model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) - model = model.transform(PrepareRTLSim()) - model.set_metadata_prop("exec_mode", "rtlsim") - # add two copies of the input tensor as the first one is just used to - # "flush out" the pipeline (as mvau already starts receiving old weights while - # we read/write new ones and reads seem to cause a disturbance too) - # generate input data - in_tensor = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ch])) - in_tensor = np.tile(in_tensor, (2, 1, 1, 1)) - - exec_ctx_write = {"inp": in_tensor} - - def write_weights(sim): - addr = 0 - for nw in T_write_stream: - axilite_write(sim, addr, nw, basename="s_axilite_0_") - addr += 4 - - T_read_stream = [] - - def read_weights(sim): - addr = 0 - for i in range(len(T_write_stream)): - T_read_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) - addr += 4 - - rtlsim_exec(model, exec_ctx_write, pre_hook=write_weights, post_hook=read_weights) - - y = exec_ctx_write["outp"][1] - - assert T_read_stream == T_write_stream - - # multithreshold util fxn wants NCHW input, not NHWC - expected = multithreshold(np.transpose(in_tensor, (0, 3, 1, 2)), T_write) - # convert back to NHWC for comparison to hw outputs - expected = np.transpose(expected, (0, 2, 3, 1))[1] - - if act == DataType["BIPOLAR"]: - # binary to bipolarW - expected = 2 * expected - 1 - else: - # signed offset - expected += act.min() - - # Validate the output is as expected - assert (y == expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding_runtime.py b/tests/fpgadataflow/test_fpgadataflow_thresholding_runtime.py new file mode 100644 index 0000000000..1ad695bb94 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding_runtime.py @@ -0,0 +1,334 @@ +# Copyright (C) 2024, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +import os +from onnx import TensorProto, helper +from pyverilator.util.axi_utils import axilite_read, axilite_write +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.general.multithreshold import multithreshold +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model + +from finn.core.rtlsim_exec import rtlsim_exec +from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers + +test_fpga_part = "xczu3eg-sbva484-1-e" +target_clk_ns = 5 + + +def generate_random_threshold_values( + data_type, num_input_channels, num_steps, narrow=False, per_tensor=False +): + if per_tensor: + num_input_channels = 1 + if narrow: + num_steps -= 1 + + return np.random.randint( + data_type.min(), + data_type.max() + 1, + (num_input_channels, num_steps), + ).astype(np.float32) + + +def sort_thresholds_increasing(thresholds): + return np.sort(thresholds, axis=1) + + +# n = batch, c = channel, h = height, w = width of feature map +# Standard = NCHW; FINN = NHWC +# Convert from NHWC(FINN) to NCHW(Standard) +def layout_FINN2NCHW(data): + return np.transpose(data, (0, 3, 1, 2)) + + +# Convert from NCHW(Standard) to NHWC(FINN) +def layout_NCHW2FINN(data): + return np.transpose(data, (0, 2, 3, 1)) + + +def make_single_thresholding_modelwrapper(impl_style, T, idt, odt, actval, n_inp_vecs, num_ch): + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, n_inp_vecs + [num_ch]) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, n_inp_vecs + [num_ch]) + + node_inp_list = ["inp", "thresh"] + + Thresholding_node = helper.make_node( + "Thresholding", + node_inp_list, + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + NumChannels=num_ch, + numSteps=T.shape[1], + inputDataType=idt.name, + weightDataType=idt.name, # will be set by MinimizeAccumulatorWidth + outputDataType=odt.name, + ActVal=actval, + numInputVectors=n_inp_vecs, + preferred_impl_style=impl_style, + ) + graph = helper.make_graph( + nodes=[Thresholding_node], + name="thresholding_graph", + inputs=[inp], + outputs=[outp], + ) + + model = qonnx_make_model(graph, producer_name="thresholding-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + model.set_tensor_datatype("thresh", idt) + model.set_initializer("thresh", T) + return model + + +@pytest.mark.parametrize("impl_style", ["rtl", "hls"]) +# configuration (ch, pe) +@pytest.mark.parametrize("cfg", [(1, 1), (6, 2), (6, 3)]) +@pytest.mark.parametrize("narrow", [True, False]) +@pytest.mark.parametrize("per_tensor", [True, False]) +@pytest.mark.fpgadataflow +@pytest.mark.vivado +def test_runtime_thresholds_read(impl_style, cfg, narrow, per_tensor): + """Read back threshold weights during runtime + + 1. Create random initial weights T + 2. Execute model + 3. Read back weights via AXI + 4. Compare with initial weights T + """ + ch = cfg[0] + pe = cfg[1] + n_inp_vecs = [1, 2, 2] + hls_mem_mode = "internal_decoupled" + act = DataType["INT4"] + idt = DataType["INT16"] + odt = act + n_steps = act.get_num_possible_values() - 1 + # Generate random thresholds and sort in ascending order + T = generate_random_threshold_values(idt, ch, n_steps, narrow, per_tensor) + + # provide non-decreasing/ascending thresholds + T = sort_thresholds_increasing(T) + + actval = act.min() + if narrow: + actval += 1 + + model = make_single_thresholding_modelwrapper(impl_style, T, idt, odt, actval, n_inp_vecs, ch) + model = model.transform(SpecializeLayers(test_fpga_part)) + + # Make sure that specialize layer did not default to HLS implementation + assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) + + node = model.get_nodes_by_op_type(f"Thresholding_{impl_style}")[0] + op_inst = getCustomOp(node) + op_inst.set_nodeattr("PE", pe) + if impl_style == "hls": + op_inst.set_nodeattr("mem_mode", hls_mem_mode) + op_inst.set_nodeattr("runtime_writeable_weights", 1) + + dat_fname = f"old_weights_{cfg}.dat" + op_inst.make_weight_file(T, "decoupled_runtime", dat_fname) + with open(dat_fname, "r") as f: + old_weight_stream = f.read().strip() + os.remove(dat_fname) + old_weight_stream = map(lambda x: int(x, 16), old_weight_stream.split("\n")) + old_weight_stream = list(old_weight_stream) + # need to create stitched IP for runtime weight testing + model = model.transform(InsertFIFO(True)) + model = model.transform(SpecializeLayers(test_fpga_part)) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) + model = model.transform(HLSSynthIP()) + model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) + model = model.transform(PrepareRTLSim()) + model.set_metadata_prop("exec_mode", "rtlsim") + # add two copies of the input tensor as the first one is just used to + # "flush out" the pipeline (as mvau already starts receiving old weights while + # we read/write new ones and reads seem to cause a disturbance too) + # generate input data + in_tensor = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ch])) + in_tensor = np.tile(in_tensor, (2, 1, 1, 1)) + + exec_ctx = {"inp": in_tensor} + extracted_weight_stream = [] + + def read_weights(sim): + addr = 0 + for i in range(len(old_weight_stream)): + extracted_weight_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) + addr += 4 + + rtlsim_exec(model, exec_ctx, pre_hook=read_weights) + + # Validate the AXI Read weights + assert extracted_weight_stream == old_weight_stream + + y = exec_ctx["outp"][0] + + # multithreshold util fxn wants NCHW input, not NHWC + expected = multithreshold(np.transpose(in_tensor, (0, 3, 1, 2)), T) + # convert back to NHWC for comparison to hw outputs + expected = np.transpose(expected, (0, 2, 3, 1))[1] + + # signed offset + expected += actval + + # Validate the output is as expected + assert (y == expected).all() + + +@pytest.mark.parametrize("impl_style", ["rtl", "hls"]) +# configuration (ch, pe) +@pytest.mark.parametrize("cfg", [(1, 1), (6, 2), (6, 3)]) +@pytest.mark.parametrize("narrow", [True, False]) +@pytest.mark.parametrize("per_tensor", [True, False]) +@pytest.mark.fpgadataflow +@pytest.mark.vivado +def test_runtime_thresholds_write(impl_style, cfg, narrow, per_tensor): + """Write threshold weights during runtime + + 1. Create random initial weights T_init + 2. Create model with initial weights + 3. Create new set of weights T_write + 4. Write T_write using AXI bus + 5. Read back using AXI bus to T_read + 6. Compare T_write and T_read + 7. Validate outputs with expected vectors + """ + ch = cfg[0] + pe = cfg[1] + + n_inp_vecs = [1, 2, 2] + hls_mem_mode = "internal_decoupled" + act = DataType["INT4"] + idt = DataType["INT16"] + + odt = act + n_steps = act.get_num_possible_values() - 1 + # Generate random thresholds and sort in ascending order + T_init = generate_random_threshold_values(idt, ch, n_steps, narrow, per_tensor) + + # provide non-decreasing/ascending thresholds + T_init = sort_thresholds_increasing(T_init) + + actval = act.min() + if narrow: + actval += 1 + + model = make_single_thresholding_modelwrapper( + impl_style, T_init, idt, odt, actval, n_inp_vecs, ch + ) + model = model.transform(SpecializeLayers(test_fpga_part)) + + # Validate that specialize layer did not default to HLS implementation + assert model.graph.node[0].op_type == "Thresholding_" + str(impl_style) + + op_inst = getCustomOp(model.graph.node[0]) + op_inst.set_nodeattr("PE", pe) + if impl_style == "hls": + op_inst.set_nodeattr("mem_mode", hls_mem_mode) + op_inst.set_nodeattr("runtime_writeable_weights", 1) + + # Make new weights for runtime write + T_write = generate_random_threshold_values(idt, ch, n_steps, narrow, per_tensor) + # provide non-decreasing/ascending thresholds + T_write = sort_thresholds_increasing(T_write) + + dat_fname = f"T_write_{cfg}.dat" # distinguish fname per paramter for distributed testing + op_inst.make_weight_file(T_write, "decoupled_runtime", dat_fname) + with open(dat_fname, "r") as f: + T_write_stream = f.read().strip() + os.remove(dat_fname) + + T_write_stream = map(lambda x: int(x, 16), T_write_stream.split("\n")) + T_write_stream = list(T_write_stream) + + # need to create stitched IP for runtime weight testing + model = model.transform(InsertFIFO(True)) + model = model.transform(SpecializeLayers(test_fpga_part)) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) + model = model.transform(HLSSynthIP()) + model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) + model = model.transform(PrepareRTLSim()) + model.set_metadata_prop("exec_mode", "rtlsim") + # add two copies of the input tensor as the first one is just used to + # "flush out" the pipeline (as mvau already starts receiving old weights while + # we read/write new ones and reads seem to cause a disturbance too) + # generate input data + in_tensor = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ch])) + in_tensor = np.tile(in_tensor, (2, 1, 1, 1)) + + exec_ctx_write = {"inp": in_tensor} + + def write_weights(sim): + addr = 0 + for nw in T_write_stream: + axilite_write(sim, addr, nw, basename="s_axilite_0_") + addr += 4 + + T_read_stream = [] + + def read_weights(sim): + addr = 0 + for i in range(len(T_write_stream)): + T_read_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) + addr += 4 + + rtlsim_exec(model, exec_ctx_write, pre_hook=write_weights, post_hook=read_weights) + + y = exec_ctx_write["outp"][1] + + assert T_read_stream == T_write_stream + + # multithreshold util fxn wants NCHW input, not NHWC + expected = multithreshold(np.transpose(in_tensor, (0, 3, 1, 2)), T_write) + # convert back to NHWC for comparison to hw outputs + expected = np.transpose(expected, (0, 2, 3, 1))[1] + + # signed off-set + expected += actval + + # Validate the output is as expected + assert (y == expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_upsampler.py b/tests/fpgadataflow/test_fpgadataflow_upsampler.py index b0da767eaa..4539917878 100644 --- a/tests/fpgadataflow/test_fpgadataflow_upsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_upsampler.py @@ -174,7 +174,7 @@ def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode, is_1d test_result = output_dict[model.graph.output[0].name] output_matches = np.isclose(golden_result, test_result, atol=atol).all() - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers("xc7z020clg400-1")) # Prep sim if exec_mode == "cppsim": diff --git a/tests/fpgadataflow/test_runtime_weights.py b/tests/fpgadataflow/test_runtime_weights.py index 3e7822a077..4ca61578c3 100644 --- a/tests/fpgadataflow/test_runtime_weights.py +++ b/tests/fpgadataflow/test_runtime_weights.py @@ -70,7 +70,7 @@ def test_runtime_weights_single_layer(): } layer_spec_list = [layer_spec] model = hls_random_mlp_maker(layer_spec_list) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) fcl = model.get_nodes_by_op_type("MVAU_hls")[0] op_inst = getCustomOp(fcl) op_inst.set_nodeattr("mem_mode", "internal_decoupled") @@ -83,7 +83,7 @@ def test_runtime_weights_single_layer(): old_weight_stream = map(lambda x: int(x, 16), old_weight_stream.split("\n")) old_weight_stream = list(old_weight_stream) model = model.transform(InsertFIFO(True)) - model = model.transform(SpecializeLayers()) + model = model.transform(SpecializeLayers(test_fpga_part)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index c8f80a8e1b..75ed8335c0 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -64,6 +64,7 @@ def test_end2end_build_dataflow_directory(): assert os.path.isfile(output_dir + "/bitfile/finn-accel.hwh") assert os.path.isfile(output_dir + "/report/post_synth_resources.xml") assert os.path.isfile(output_dir + "/report/post_route_timing.rpt") + assert os.path.isfile(output_dir + "/report/post_synth_resources.json") # verification outputs verif_batchsize = np.load(target_dir + "/input.npy").shape[0] for i in range(verif_batchsize): diff --git a/tests/util/test_data_packing_hls.py b/tests/util/test_data_packing.py similarity index 79% rename from tests/util/test_data_packing_hls.py rename to tests/util/test_data_packing.py index b95bcd5d42..a718f171e2 100644 --- a/tests/util/test_data_packing_hls.py +++ b/tests/util/test_data_packing.py @@ -36,7 +36,7 @@ from qonnx.util.basic import gen_finn_dt_tensor from finn.util.basic import make_build_dir -from finn.util.data_packing import numpy_to_hls_code +from finn.util.data_packing import npy_to_rtlsim_input, numpy_to_hls_code @pytest.mark.util @@ -141,3 +141,42 @@ def remove_all_whitespace(s): eB = """{{ap_uint<4>("0xf", 16), ap_uint<4>("0xf", 16)}, {ap_uint<4>("0x7", 16), ap_uint<4>("0xd", 16)}};""" assert remove_all_whitespace(ret) == remove_all_whitespace(eB) + + +@pytest.mark.util +@pytest.mark.parametrize( + "dtype", + [ + DataType["BINARY"], + DataType["BIPOLAR"], + DataType["TERNARY"], + DataType["INT2"], + DataType["INT7"], + DataType["INT8"], + DataType["INT22"], + DataType["INT32"], + DataType["UINT7"], + DataType["UINT8"], + DataType["UINT15"], + DataType["FIXED<9,6>"], + DataType["FLOAT32"], + ], +) +def test_npy_to_rtlsim_input(dtype): + # check if slow and fast data packing produce the same non-sign-extended input for rtlsim + # fast mode is triggered for certain data types if last (SIMD) dim = 1 + inp_fast = gen_finn_dt_tensor(dtype, (1, 8, 8, 8 // 1, 1)) # N H W FOLD SIMD + inp_slow = inp_fast.reshape((1, 8, 8, 8 // 2, 2)) # N H W FOLD SIMD + + output_fast = npy_to_rtlsim_input(inp_fast, dtype, 1 * dtype.bitwidth()) + output_slow = npy_to_rtlsim_input(inp_slow, dtype, 2 * dtype.bitwidth()) + + output_slow_split = [] + for x in output_slow: + # least significant bits = first element: + output_slow_split.append(x & ((1 << dtype.bitwidth()) - 1)) + # remaining bits = second element: + output_slow_split.append(x >> dtype.bitwidth()) + + assert all([(x >> dtype.bitwidth()) == 0 for x in output_fast]), "extraneous bits detected" + assert np.all(output_fast == output_slow_split), "different behavior of packing modes detected" diff --git a/tests/util/test_hls_vector.py b/tests/util/test_hls_vector.py new file mode 100644 index 0000000000..35d9b1b2fc --- /dev/null +++ b/tests/util/test_hls_vector.py @@ -0,0 +1,117 @@ +# Copyright (C) 2024, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pytest + +import numpy as np +import os +import shutil +import subprocess +from qonnx.core.datatype import DataType +from qonnx.util.basic import gen_finn_dt_tensor + +from finn.util.basic import make_build_dir + + +@pytest.mark.util +@pytest.mark.parametrize( + "dtype", + [ + DataType["BINARY"], + DataType["UINT8"], + DataType["INT32"], + DataType["FIXED<9,6>"], + DataType["FLOAT32"], + ], +) +@pytest.mark.parametrize("test_shape", [(1, 2, 4), (1, 1, 64), (2, 64)]) +@pytest.mark.vivado +def test_npy2vectorstream(test_shape, dtype): + ndarray = gen_finn_dt_tensor(dtype, test_shape) + test_dir = make_build_dir(prefix="test_npy2vectorstream_") + shape = ndarray.shape + elem_hls_type = dtype.get_hls_datatype_str() + vLen = shape[-1] + npy_in = test_dir + "/in.npy" + npy_out = test_dir + "/out.npy" + # restrict the np datatypes we can handle + npyt_to_ct = { + "float32": "float", + "float64": "double", + "int8": "int8_t", + "int32": "int32_t", + "int64": "int64_t", + "uint8": "uint8_t", + "uint32": "uint32_t", + "uint64": "uint64_t", + } + npy_type = npyt_to_ct[str(ndarray.dtype)] + shape_cpp_str = str(shape).replace("(", "{").replace(")", "}") + test_app_string = [] + test_app_string += ["#include "] + test_app_string += ["#define AP_INT_MAX_W 8191"] + test_app_string += ['#include "ap_int.h"'] + test_app_string += ['#include "stdint.h"'] + test_app_string += ['#include "hls_stream.h"'] + test_app_string += ['#include "hls_vector.h"'] + test_app_string += ['#include "cnpy.h"'] + test_app_string += ['#include "npy2vectorstream.hpp"'] + test_app_string += ["int main(int argc, char *argv[]) {"] + test_app_string += ["hls::stream> teststream;" % (elem_hls_type, vLen)] + test_app_string += [ + 'npy2vectorstream<%s, %s, %d>("%s", teststream);' % (elem_hls_type, npy_type, vLen, npy_in) + ] + test_app_string += [ + 'vectorstream2npy<%s, %s, %d>(teststream, %s, "%s");' + % (elem_hls_type, npy_type, vLen, shape_cpp_str, npy_out) + ] + test_app_string += ["return 0;"] + test_app_string += ["}"] + with open(test_dir + "/test.cpp", "w") as f: + f.write("\n".join(test_app_string)) + cmd_compile = """ +g++ -o test_npy2vectorstream test.cpp $FINN_ROOT/deps/cnpy/cnpy.cpp \ +-I$FINN_ROOT/deps/cnpy/ -I{}/include -I$FINN_ROOT/src/finn/qnn-data/cpp \ +--std=c++14 -lz """.format( + os.environ["HLS_PATH"] + ) + with open(test_dir + "/compile.sh", "w") as f: + f.write(cmd_compile) + compile = subprocess.Popen(["sh", "compile.sh"], stdout=subprocess.PIPE, cwd=test_dir) + (stdout, stderr) = compile.communicate() + # make copy before saving the array + ndarray = ndarray.copy() + np.save(npy_in, ndarray) + execute = subprocess.Popen("./test_npy2vectorstream", stdout=subprocess.PIPE, cwd=test_dir) + (stdout, stderr) = execute.communicate() + produced = np.load(npy_out) + success = (produced == ndarray).all() + # only delete generated code if test has passed + # useful for debug otherwise + if success: + shutil.rmtree(test_dir) + assert success