From c4906588ce47de33d59bcd95f3e82ce2c3e61c23 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Thu, 29 Aug 2024 21:19:59 +0100 Subject: [PATCH] [VPlan] Use skipCostComputation when pre-computing induction costs. This ensures we skip any instructions identified to be ignored by the legacy cost model as well. Fixes a divergence between legacy and VPlan-based cost model. Fixes https://github.com/llvm/llvm-project/issues/106417. --- .../Transforms/Vectorize/LoopVectorize.cpp | 3 +- .../LoopVectorize/RISCV/induction-costs.ll | 192 ++++++++++++++++++ 2 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 4cc75e2e754603..6babfd1eee9108 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7113,7 +7113,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, IVInsts.push_back(CI); } for (Instruction *IVInst : IVInsts) { - if (!CostCtx.SkipCostComputation.insert(IVInst).second) + if (CostCtx.skipCostComputation(IVInst, VF.isVector())) continue; InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF); LLVM_DEBUG({ @@ -7121,6 +7121,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, << ": induction instruction " << *IVInst << "\n"; }); Cost += InductionCost; + CostCtx.SkipCostComputation.insert(IVInst); } } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll new file mode 100644 index 00000000000000..bee7bb7bd61622 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -0,0 +1,192 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -p loop-vectorize -S %s | FileCheck %s + +target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" +target triple = "riscv64-unknown-linux-gnu" + +; Test case for https://github.com/llvm/llvm-project/issues/106417. +define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { +; CHECK-LABEL: define void @skip_free_iv_truncate( +; CHECK-SAME: i16 [[X:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[X_I32:%.*]] = sext i16 [[X]] to i32 +; CHECK-NEXT: [[X_I64:%.*]] = sext i16 [[X]] to i64 +; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 -8 +; CHECK-NEXT: [[SMAX20:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99) +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[SMAX20]], [[X_I64]] +; CHECK-NEXT: [[UMIN21:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP0]], i64 1) +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[SMAX20]], [[UMIN21]] +; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[X_I64]] +; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 288, i64 [[TMP7]]) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK: [[VECTOR_SCEVCHECK]]: +; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99) +; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[SMAX]], [[X_I64]] +; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP9]], i64 1) +; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[SMAX]], [[UMIN]] +; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], [[X_I64]] +; CHECK-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP11]], 3 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[UMIN]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = shl nsw i64 [[X_I64]], 1 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP14]] +; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 6, i64 [[TMP13]]) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp ult ptr [[TMP16]], [[SCEVGEP]] +; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW]] +; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i64 [[X_I64]], 3 +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP19]] +; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]]) +; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1 +; CHECK-NEXT: [[TMP20:%.*]] = sub i64 0, [[MUL_RESULT3]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[MUL_RESULT3]] +; CHECK-NEXT: [[TMP22:%.*]] = icmp ult ptr [[TMP21]], [[SCEVGEP1]] +; CHECK-NEXT: [[TMP23:%.*]] = or i1 [[TMP22]], [[MUL_OVERFLOW4]] +; CHECK-NEXT: [[TMP24:%.*]] = add nsw i64 [[TMP19]], -8 +; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP24]] +; CHECK-NEXT: [[MUL6:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]]) +; CHECK-NEXT: [[MUL_RESULT7:%.*]] = extractvalue { i64, i1 } [[MUL6]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW8:%.*]] = extractvalue { i64, i1 } [[MUL6]], 1 +; CHECK-NEXT: [[TMP25:%.*]] = sub i64 0, [[MUL_RESULT7]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SCEVGEP5]], i64 [[MUL_RESULT7]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp ult ptr [[TMP26]], [[SCEVGEP5]] +; CHECK-NEXT: [[TMP28:%.*]] = or i1 [[TMP27]], [[MUL_OVERFLOW8]] +; CHECK-NEXT: [[TMP29:%.*]] = or i1 [[TMP18]], [[TMP23]] +; CHECK-NEXT: [[TMP30:%.*]] = or i1 [[TMP29]], [[TMP28]] +; CHECK-NEXT: br i1 [[TMP30]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1 +; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]] +; CHECK-NEXT: [[SMAX10:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99) +; CHECK-NEXT: [[TMP32:%.*]] = sub i64 [[SMAX10]], [[X_I64]] +; CHECK-NEXT: [[UMIN11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP32]], i64 1) +; CHECK-NEXT: [[TMP33:%.*]] = sub i64 [[SMAX10]], [[UMIN11]] +; CHECK-NEXT: [[TMP34:%.*]] = sub i64 [[TMP33]], [[X_I64]] +; CHECK-NEXT: [[TMP35:%.*]] = udiv i64 [[TMP34]], 3 +; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[UMIN11]], [[TMP35]] +; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 6 +; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[TMP37]], [[TMP31]] +; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 2 +; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP39]] +; CHECK-NEXT: [[TMP40:%.*]] = shl nsw i64 [[X_I64]], 3 +; CHECK-NEXT: [[SCEVGEP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP40]] +; CHECK-NEXT: [[TMP41:%.*]] = mul i64 [[TMP36]], 24 +; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[TMP40]] +; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[TMP42]], 8 +; CHECK-NEXT: [[SCEVGEP14:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP43]] +; CHECK-NEXT: [[TMP44:%.*]] = add nsw i64 [[TMP40]], -8 +; CHECK-NEXT: [[SCEVGEP15:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP42]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP14]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP13]], [[SCEVGEP12]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: [[BOUND017:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP16]] +; CHECK-NEXT: [[BOUND118:%.*]] = icmp ult ptr [[SCEVGEP15]], [[SCEVGEP12]] +; CHECK-NEXT: [[FOUND_CONFLICT19:%.*]] = and i1 [[BOUND017]], [[BOUND118]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT19]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP46:%.*]] = mul i64 [[TMP45]], 8 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]] +; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]] +; CHECK-NEXT: [[TMP49:%.*]] = mul i64 [[N_VEC]], 3 +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]] +; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; CHECK-NEXT: [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3 +; CHECK-NEXT: [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]] +; CHECK-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP52:%.*]] = mul i64 [[TMP51]], 8 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[X_I64]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP53:%.*]] = call @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP54:%.*]] = add [[TMP53]], zeroinitializer +; CHECK-NEXT: [[TMP55:%.*]] = mul [[TMP54]], shufflevector ( insertelement ( poison, i64 3, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[INDUCTION:%.*]] = add [[DOTSPLAT]], [[TMP55]] +; CHECK-NEXT: [[TMP56:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP57:%.*]] = mul i64 [[TMP56]], 8 +; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP57]] +; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement poison, i64 [[TMP58]], i64 0 +; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector [[DOTSPLATINSERT24]], poison, zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], [[VEC_IND]] +; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0( zeroinitializer, [[TMP59]], i32 2, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT25]] +; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL23:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL23]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_I64:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP61:%.*]] = load i64, ptr [[GEP_I64]], align 8 +; CHECK-NEXT: [[TMP62:%.*]] = sext i32 [[IV_CONV]] to i64 +; CHECK-NEXT: [[GEP_CONV:%.*]] = getelementptr i64, ptr [[INVARIANT_GEP]], i64 [[TMP62]] +; CHECK-NEXT: [[TMP63:%.*]] = load i64, ptr [[GEP_CONV]], align 8 +; CHECK-NEXT: [[GEP_I16:%.*]] = getelementptr i16, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: store i16 0, ptr [[GEP_I16]], align 2 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 +; CHECK-NEXT: [[TMP64]] = trunc i64 [[IV_NEXT]] to i32 +; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[IV]], 99 +; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %x.i32 = sext i16 %x to i32 + %x.i64 = sext i16 %x to i64 + %invariant.gep = getelementptr i8, ptr %A, i64 -8 + br label %loop + +loop: + %iv = phi i64 [ %x.i64, %entry ], [ %iv.next, %loop ] + %iv.conv = phi i32 [ %x.i32, %entry ], [ %5, %loop ] + %gep.i64 = getelementptr i64, ptr %A, i64 %iv + %2 = load i64, ptr %gep.i64, align 8 + %3 = sext i32 %iv.conv to i64 + %gep.conv = getelementptr i64, ptr %invariant.gep, i64 %3 + %4 = load i64, ptr %gep.conv, align 8 + %gep.i16 = getelementptr i16, ptr %A, i64 %iv + store i16 0, ptr %gep.i16, align 2 + %iv.next = add i64 %iv, 3 + %5 = trunc i64 %iv.next to i32 + %c = icmp slt i64 %iv, 99 + br i1 %c, label %loop, label %exit + +exit: + ret void +} + +attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } +;. +; CHECK: [[META0]] = !{[[META1:![0-9]+]]} +; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} +; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"} +; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]} +; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]} +; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]} +; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]} +;.