Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump IREE to 3cf5b65f736ce50c9890190b80e6343c0b929d56 #863

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ jobs:
- name: Python deps
run: |
pip install -r third_party/iree/runtime/bindings/python/iree/runtime/build_requirements.txt
pip install pyyaml
pip install pyyaml pybind11==2.13.6 nanobind==2.2.0

- name: Enable cache
uses: actions/cache/restore@v3
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci-macos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ jobs:
- name: Python deps
run: |
pip install -r third_party/iree/runtime/bindings/python/iree/runtime/build_requirements.txt
pip install pytest
pip install pytest pybind11==2.13.6 nanobind==2.2.0
- name: Enable cache
uses: actions/cache/restore@v3
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci-windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ jobs:
- name: Python deps
run: |
pip install -r third_party\iree\runtime\bindings\python\iree\runtime\build_requirements.txt
pip install pyyaml
pip install pyyaml pybind11==2.13.6 nanobind==2.2.0
- name: Enable cache
uses: actions/cache/restore@v3
Expand Down
4 changes: 2 additions & 2 deletions build_tools/ci/cpu_comparison/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,8 +537,8 @@ def aie_vs_llvm_cpu(
config,
test_file,
use_ukernel=False,
tile_pipeline="pad-pack",
lower_to_aie_pipeline="air",
tile_pipeline="pack-peel",
lower_to_aie_pipeline="objectFifo",
function_name=None,
seed=1,
rtol=1e-6,
Expand Down
21 changes: 11 additions & 10 deletions build_tools/ci/run_matmul_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -555,16 +555,17 @@ run_matmul_test \
# MLIR-AIR Matmul tests
###################################################################

if [ -d "$VITIS" ]; then
run_matmul_test \
--name_prefix "ukern" \
--lower_to_aie_pipeline "air" \
--tile_pipeline "pad-pack" \
--lhs_rhs_type "bf16" \
--acc_type "f32" \
--m "256" --k "256" --n "256" \
--use_ukernel "1"
fi
# TODO: re-enable after fixing in AIR
# if [ -d "$VITIS" ]; then
# run_matmul_test \
# --name_prefix "ukern" \
# --lower_to_aie_pipeline "air" \
# --tile_pipeline "pad-pack" \
# --lhs_rhs_type "bf16" \
# --acc_type "f32" \
# --m "256" --k "256" --n "256" \
# --use_ukernel "1"
# fi

# Example of a run with a group of 2+ matmuls. Currently this test is passed
# the flag '--num_repeat_runs 0" as there is currently an issue with the runtime if
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ iree_lit_test_suite(
lit
SRCS
"conv_fill_spec_pad.mlir"
"matmul_fill_spec_pack_funcIR.mlir"
"matmul_fill_spec_pack_peel.mlir"
"matmul_fill_spec_pad.mlir"
"matmul_fill_spec_pad_pack.mlir"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ module attributes { transform.with_named_sequence } {
%padded_1, %pad_1, %___ = transform.structured.pad %tiled_conv_1 {
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 1, 2],
pack_paddings=[0, 0, 1],
nofold_flags=[0, 0, 1],
copy_back_op="linalg.copy"
} : (!any) -> (!any, !any, !any)

Expand Down Expand Up @@ -163,7 +163,7 @@ module attributes { transform.with_named_sequence } {
%padded_2, %pad_2, %____ = transform.structured.pad %inner_conv {
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 1, 2],
pack_paddings=[1, 1, 0],
nofold_flags=[1, 1, 0],
copy_back_op="linalg.copy"
} : (!any) -> (!any, !any, !any)

Expand Down
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test is no longer useful, delete it to reduce the workload of maintenance.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ module attributes { transform.with_named_sequence } {
// CHECK: scf.forall
// CHECK: {
// CHECK: memref.alloc() : memref<8x16xi32, 1>
// CHECK: linalg.copy ins(%{{.*}} : memref<8x16xi32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%{{.*}} : memref<8x16xi32, 1>)
// CHECK: linalg.copy ins(%{{.*}} : memref<8x16xi32, #hal.descriptor_type<storage_buffer>>) outs(%{{.*}} : memref<8x16xi32, 1>)
// CHECK: memref.alloc() : memref<16x8xi32, 1>
// CHECK: linalg.copy ins(%{{.*}} : memref<16x8xi32, strided<[8, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%{{.*}} : memref<16x8xi32, 1>)
// CHECK: linalg.copy ins(%{{.*}} : memref<16x8xi32, #hal.descriptor_type<storage_buffer>>) outs(%{{.*}} : memref<16x8xi32, 1>)
// CHECK: memref.alloc() : memref<8x8xi32, 1>
// CHECK: scf.forall
// CHECK: {
Expand All @@ -166,7 +166,7 @@ module attributes { transform.with_named_sequence } {
// CHECK: linalg.copy ins(%{{.*}} : memref<4x4xi32, 2>) outs(%{{.*}} : memref<4x4xi32, strided<[8, 1], offset: ?>, 1>)
// CHECK: memref.dealloc %{{.*}} : memref<4x4xi32, 2>
// CHECK: }
// CHECK: linalg.copy ins(%{{.*}} : memref<8x8xi32, 1>) outs(%{{.*}} : memref<8x8xi32, strided<[8, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
// CHECK: linalg.copy ins(%{{.*}} : memref<8x8xi32, 1>) outs(%{{.*}} : memref<8x8xi32, #hal.descriptor_type<storage_buffer>>)
// CHECK: memref.dealloc %{{.*}} : memref<8x16xi32, 1>
// CHECK: memref.dealloc %{{.*}} : memref<16x8xi32, 1>
// CHECK: memref.dealloc %{{.*}} : memref<8x8xi32, 1>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,21 +160,23 @@ LogicalResult setDmaInputs(Operation *&operandOp,
SmallVector<OpFoldResult> &sizes,
SmallVector<OpFoldResult> &strides) {
MLIRContext *ctx = operandOp->getContext();
if (auto allocOp = dyn_cast<memref::AllocOp>(operandOp)) {
auto [stridesI64, baseOffset] = getStridesAndOffset(allocOp.getType());
if (isa<memref::AllocOp>(operandOp) ||
isa<IREE::HAL::InterfaceBindingSubspanOp>(operandOp)) {
MemRefType memRefType = cast<MemRefType>(operandOp->getResult(0).getType());
auto [stridesI64, baseOffset] = getStridesAndOffset(memRefType);
if (baseOffset != 0) {
auto message = llvm::formatv(
"with non-zero base offset {0} is not supported by the "
"current pass, requires testing and possible code changes.",
baseOffset);
return allocOp->emitOpError(message);
return operandOp->emitOpError(message);
}
strides = getAsIndexOpFoldResult(ctx, stridesI64);
auto sizesI64 = allocOp.getType().getShape();
auto sizesI64 = memRefType.getShape();
if (llvm::any_of(sizesI64, [](int64_t size) {
return ShapedType::isDynamic(size);
})) {
return allocOp->emitOpError(
return operandOp->emitOpError(
"with dynamic shape is not supported by dma op.");
}
sizes = getAsIndexOpFoldResult(ctx, sizesI64);
Expand Down Expand Up @@ -235,8 +237,9 @@ LogicalResult setDmaInputs(Operation *&operandOp,
return success();
}
return operandOp->emitOpError(
"is an unsupported operation. This pass currently only supports AllocOp "
"and SubViewOp as inputs.");
"is an unsupported operation. This pass currently only supports "
"hal.interface.binding.subspan, memref.alloc and memref.subview as "
"inputs.");
}

/// Rewrite the pack/unpack op 'op' as a DMA operation. The function arguments
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ struct iree_hal_xrt_lite_semaphore {
iree_allocator_t host_allocator)
: value(initial_value), host_allocator(host_allocator) {
iree_hal_semaphore_initialize(&iree_hal_xrt_lite_semaphore_vtable, &base);
iree_atomic_store_int64(&value, initial_value, iree_memory_order_release);
iree_atomic_store(&value, initial_value, iree_memory_order_release);
}
};

Expand Down
Loading