forked from OSchip/llvm-project
[X86][AMX] Prevent transforming load pointer from <256 x i32>* to x86_amx*.
The load/store instruction will be transformed to amx intrinsics in the pass of AMX type lowering. Prohibiting the pointer cast make that pass happy. Differential Revision: https://reviews.llvm.org/D98247
This commit is contained in:
parent
c9fce5f0c3
commit
66fbf5fafb
|
@ -95,8 +95,15 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
|
|||
// else is not lossless. Conservatively assume we can't losslessly convert
|
||||
// between pointers with different address spaces.
|
||||
if (auto *PTy = dyn_cast<PointerType>(this)) {
|
||||
if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
|
||||
if (auto *OtherPTy = dyn_cast<PointerType>(Ty)) {
|
||||
// Don't bitcast "load <256 x i32>, <256 x i32>*" to
|
||||
// "load x86_amx, x86_amx*", because we don't have a corresponding
|
||||
// instruction to load x86_amx. Doing the transform causes trouble
|
||||
// to lower "load x86_amx" instruction in backend.
|
||||
if (OtherPTy->getElementType()->isX86_AMXTy())
|
||||
return false;
|
||||
return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return false; // Other types have no identity values
|
||||
|
|
|
@ -2403,6 +2403,11 @@ Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
|
|||
Value *Addr = LI->getOperand(0);
|
||||
if (Addr == &CI || isa<LoadInst>(Addr))
|
||||
return nullptr;
|
||||
// If there is any loss for the pointer bitcast, abandon.
|
||||
auto *DestPtrTy = DestTy->getPointerTo(LI->getPointerAddressSpace());
|
||||
auto *SrcPtrTy = Addr->getType();
|
||||
if (!SrcPtrTy->canLosslesslyBitCastTo(DestPtrTy))
|
||||
return nullptr;
|
||||
if (LI->hasOneUse() && LI->isSimple())
|
||||
continue;
|
||||
// If a LoadInst has more than one use, changing the type of loaded
|
||||
|
|
|
@ -9,22 +9,22 @@ define linkonce_odr dso_local void @foo(<256 x i32>* %arrayidx16, <256 x i32>* %
|
|||
; CHECK: for.cond9:
|
||||
; CHECK-NEXT: br i1 undef, label [[FOR_BODY14:%.*]], label [[EXIT:%.*]]
|
||||
; CHECK: for.body14:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <256 x i32>* [[ARRAYIDX16:%.*]] to x86_amx*
|
||||
; CHECK-NEXT: [[T51:%.*]] = load x86_amx, x86_amx* [[TMP0]], align 64
|
||||
; CHECK-NEXT: [[T5:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX16:%.*]], align 64
|
||||
; CHECK-NEXT: br label [[FOR_COND18:%.*]]
|
||||
; CHECK: for.cond18:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = phi x86_amx [ [[T51]], [[FOR_BODY14]] ], [ [[T11:%.*]], [[FOR_BODY24:%.*]] ]
|
||||
; CHECK-NEXT: [[SUB_C_SROA_0_0:%.*]] = phi <256 x i32> [ [[T5]], [[FOR_BODY14]] ], [ [[T12:%.*]], [[FOR_BODY24:%.*]] ]
|
||||
; CHECK-NEXT: br i1 undef, label [[FOR_BODY24]], label [[FOR_COND_CLEANUP23:%.*]]
|
||||
; CHECK: for.cond.cleanup23:
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast x86_amx [[TMP1]] to <256 x i32>
|
||||
; CHECK-NEXT: store <256 x i32> [[TMP2]], <256 x i32>* [[ARRAYIDX16]], align 64
|
||||
; CHECK-NEXT: store <256 x i32> [[SUB_C_SROA_0_0]], <256 x i32>* [[ARRAYIDX16]], align 64
|
||||
; CHECK-NEXT: br label [[FOR_COND9]]
|
||||
; CHECK: for.body24:
|
||||
; CHECK-NEXT: [[T6:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX29:%.*]], align 64
|
||||
; CHECK-NEXT: [[T7:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX35:%.*]], align 64
|
||||
; CHECK-NEXT: [[T8:%.*]] = bitcast <256 x i32> [[SUB_C_SROA_0_0]] to x86_amx
|
||||
; CHECK-NEXT: [[T9:%.*]] = bitcast <256 x i32> [[T6]] to x86_amx
|
||||
; CHECK-NEXT: [[T10:%.*]] = bitcast <256 x i32> [[T7]] to x86_amx
|
||||
; CHECK-NEXT: [[T11]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 1, i16 4, i16 4, x86_amx [[TMP1]], x86_amx [[T9]], x86_amx [[T10]])
|
||||
; CHECK-NEXT: [[T11:%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 1, i16 4, i16 4, x86_amx [[T8]], x86_amx [[T9]], x86_amx [[T10]])
|
||||
; CHECK-NEXT: [[T12]] = bitcast x86_amx [[T11]] to <256 x i32>
|
||||
; CHECK-NEXT: br label [[FOR_COND18]]
|
||||
; CHECK: exit:
|
||||
; CHECK-NEXT: ret void
|
||||
|
|
Loading…
Reference in New Issue