From 813aaf39f94609a46f38f1e3a15a763a2cc0d2cf Mon Sep 17 00:00:00 2001 From: Aart Bik <39774503+aartbik@users.noreply.github.com> Date: Fri, 17 Nov 2023 15:47:38 -0800 Subject: [PATCH] [mlir][sparse] stress test BSR (#72712) I always enjoy a good stress test. This end-to-end integration test ensures the major ordering of both the block and within the block are correctly handled (giving row-row, row-col, col-row and col-row as options). --- .../SparseTensor/CPU/block_majors.mlir | 178 ++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100755 mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir new file mode 100755 index 00000000000000..ca7a3b302fdb63 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir @@ -0,0 +1,178 @@ +//-------------------------------------------------------------------------------------------------- +// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. +// +// Set-up that's shared across all tests in this directory. In principle, this +// config could be moved to lit.local.cfg. However, there are downstream users that +// do not use these LIT config files. Hence why this is kept inline. +// +// DEFINE: %{sparsifier_opts} = enable-runtime-library=true +// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} +// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" +// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" +// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils +// DEFINE: %{run_opts} = -e main -entry-point-result=void +// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs} +// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs} +// +// DEFINE: %{env} = +//-------------------------------------------------------------------------------------------------- + +// RUN: %{compile} | %{run} | FileCheck %s +// +// Do the same run, but now with direct IR generation. +// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false +// RUN: %{compile} | %{run} | FileCheck %s +// +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true +// RUN: %{compile} | %{run} | FileCheck %s + +#BSR_row_rowmajor = #sparse_tensor.encoding<{ + map = (i, j) -> + ( i floordiv 3 : dense + , j floordiv 4 : compressed + , i mod 3 : dense + , j mod 4 : dense + ) +}> + +#BSR_row_colmajor = #sparse_tensor.encoding<{ + map = (i, j) -> + ( i floordiv 3 : dense + , j floordiv 4 : compressed + , j mod 4 : dense + , i mod 3 : dense + ) +}> + +#BSR_col_rowmajor = #sparse_tensor.encoding<{ + map = (i, j) -> + ( j floordiv 4 : dense + , i floordiv 3 : compressed + , i mod 3 : dense + , j mod 4 : dense + ) +}> + +#BSR_col_colmajor = #sparse_tensor.encoding<{ + map = (i, j) -> + ( j floordiv 4 : dense + , i floordiv 3 : compressed + , j mod 4 : dense + , i mod 3 : dense + ) +}> + +// +// Example 3x4 block storage of a 6x16 matrix: +// +// +---------+---------+---------+---------+ +// | 1 2 . . | . . . . | . . . . | . . . . | +// | . . . . | . . . . | . . . . | . . . . | +// | . . . 3 | . . . . | . . . . | . . . . | +// +---------+---------+---------+---------+ +// | . . . . | . . . . | 4 5 . . | . . . . | +// | . . . . | . . . . | . . . . | . . . . | +// | . . . . | . . . . | . . 6 7 | . . . . | +// +---------+---------+---------+---------+ +// +// Storage for CSR block storage. Note that this essentially +// provides CSR storage of 2x4 blocks with either row-major +// or column-major storage within each 3x4 block of elements. +// +// positions[1] : 0 1 2 +// coordinates[1] : 0 2 +// values : 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, +// 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 [row-major] +// +// 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, +// 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 [col-major] +// +// Storage for CSC block storage. Note that this essentially +// provides CSC storage of 4x2 blocks with either row-major +// or column-major storage within each 3x4 block of elements. +// +// positions[1] : 0 1 1 2 2 +// coordinates[1] : 0 1 +// values : 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, +// 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 [row-major] +// +// 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, +// 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 [col-major] +// +module { + + func.func @main() { + %c0 = arith.constant 0 : index + %f0 = arith.constant 0.0 : f64 + + %m = arith.constant sparse< + [ [0, 0], [0, 1], [2, 3], [3, 8], [3, 9], [5, 10], [5, 11] ], + [ 1., 2., 3., 4., 5., 6., 7.] + > : tensor<6x16xf64> + %s1 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor + %s2 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor + %s3 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor + %s4 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor + + // CHECK: ( 0, 1, 2 ) + // CHECK-NEXT: ( 0, 2 ) + // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 ) + %pos1 = sparse_tensor.positions %s1 {level = 1 : index } : tensor to memref + %vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref, vector<3xindex> + vector.print %vecp1 : vector<3xindex> + %crd1 = sparse_tensor.coordinates %s1 {level = 1 : index } : tensor to memref + %vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref, vector<2xindex> + vector.print %vecc1 : vector<2xindex> + %val1 = sparse_tensor.values %s1 : tensor to memref + %vecv1 = vector.transfer_read %val1[%c0], %f0 : memref, vector<24xf64> + vector.print %vecv1 : vector<24xf64> + + // CHECK-NEXT: ( 0, 1, 2 ) + // CHECK-NEXT: ( 0, 2 ) + // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 ) + %pos2 = sparse_tensor.positions %s2 {level = 1 : index } : tensor to memref + %vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref, vector<3xindex> + vector.print %vecp2 : vector<3xindex> + %crd2 = sparse_tensor.coordinates %s2 {level = 1 : index } : tensor to memref + %vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref, vector<2xindex> + vector.print %vecc2 : vector<2xindex> + %val2 = sparse_tensor.values %s2 : tensor to memref + %vecv2 = vector.transfer_read %val2[%c0], %f0 : memref, vector<24xf64> + vector.print %vecv2 : vector<24xf64> + + // CHECK-NEXT: ( 0, 1, 1, 2, 2 ) + // CHECK-NEXT: ( 0, 1 ) + // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 ) + %pos3 = sparse_tensor.positions %s3 {level = 1 : index } : tensor to memref + %vecp3 = vector.transfer_read %pos3[%c0], %c0 : memref, vector<5xindex> + vector.print %vecp3 : vector<5xindex> + %crd3 = sparse_tensor.coordinates %s3 {level = 1 : index } : tensor to memref + %vecc3 = vector.transfer_read %crd3[%c0], %c0 : memref, vector<2xindex> + vector.print %vecc3 : vector<2xindex> + %val3 = sparse_tensor.values %s3 : tensor to memref + %vecv3 = vector.transfer_read %val3[%c0], %f0 : memref, vector<24xf64> + vector.print %vecv3 : vector<24xf64> + + // CHECK-NEXT: ( 0, 1, 1, 2, 2 ) + // CHECK-NEXT: ( 0, 1 ) + // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 ) + %pos4 = sparse_tensor.positions %s4 {level = 1 : index } : tensor to memref + %vecp4 = vector.transfer_read %pos4[%c0], %c0 : memref, vector<5xindex> + vector.print %vecp4 : vector<5xindex> + %crd4 = sparse_tensor.coordinates %s4 {level = 1 : index } : tensor to memref + %vecc4 = vector.transfer_read %crd4[%c0], %c0 : memref, vector<2xindex> + vector.print %vecc4 : vector<2xindex> + %val4 = sparse_tensor.values %s4 : tensor to memref + %vecv4 = vector.transfer_read %val4[%c0], %f0 : memref, vector<24xf64> + vector.print %vecv4 : vector<24xf64> + + // Release the resources. + bufferization.dealloc_tensor %s1: tensor + bufferization.dealloc_tensor %s2: tensor + bufferization.dealloc_tensor %s3: tensor + bufferization.dealloc_tensor %s4: tensor + + return + } +}