[Alignment][NFC] Use Align with CreateMaskedLoad

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D73087
This commit is contained in:
Guillaume Chatelet 2020-01-21 11:21:31 +01:00
parent 3f9b6b270f
commit bc8a1ab26f
8 changed files with 38 additions and 31 deletions

View File

@ -9727,8 +9727,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops, unsigned Align) {
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
@ -9736,7 +9736,7 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
@ -10731,11 +10731,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
return EmitX86MaskedLoad(*this, Ops, Align::None());
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
return EmitX86MaskedLoad(*this, Ops, Align::None());
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
@ -10748,11 +10748,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
case X86::BI__builtin_ia32_movdqa64load512_mask: {
unsigned Align =
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedLoad(*this, Ops, Align);
}
case X86::BI__builtin_ia32_movdqa64load512_mask:
return EmitX86MaskedLoad(
*this, Ops,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:

View File

@ -509,6 +509,7 @@ public:
bool isReverse() const { return Reverse; }
uint32_t getFactor() const { return Factor; }
uint32_t getAlignment() const { return Alignment.value(); }
Align getAlign() const { return Alignment; }
uint32_t getNumMembers() const { return Members.size(); }
/// Try to insert a new member \p Instr with index \p Index and

View File

@ -727,7 +727,14 @@ public:
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
/// Create a call to Masked Load intrinsic
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
LLVM_ATTRIBUTE_DEPRECATED(
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,
Value *PassThru = nullptr,
const Twine &Name = ""),
"Use the version that takes Align instead") {
return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
}
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic

View File

@ -1257,18 +1257,19 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
Type *ValTy = Passthru->getType();
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
unsigned Align =
Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
const Align Alignment =
Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8)
: Align::None();
// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Builder.CreateAlignedLoad(ValTy, Ptr, Align);
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Passthru->getType()->getVectorNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
}
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {

View File

@ -466,14 +466,14 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
}
/// Create a call to a Masked Load intrinsic.
/// \p Ptr - base pointer for the load
/// \p Align - alignment of the source location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
/// \p Ptr - base pointer for the load
/// \p Alignment - alignment of the source location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
Value *Mask, Value *PassThru,
const Twine &Name) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
@ -483,7 +483,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
if (!PassThru)
PassThru = UndefValue::get(DataTy);
Type *OverloadedTypes[] = { DataTy, PtrTy };
Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru};
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
OverloadedTypes, Name);
}

View File

@ -1331,7 +1331,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad =
IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
IC.Builder.CreateMaskedLoad(PtrCast, Align::None(), BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad);
}

View File

@ -2945,9 +2945,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateMaskedLoad(
ShadowPtr, Alignment ? Alignment->value() : 0, Mask,
getShadow(PassThru), "_msmaskedld"));
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
}

View File

@ -2263,7 +2263,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
: ShuffledMask;
}
NewLoad =
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlignment(),
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
GroupMask, UndefVec, "wide.masked.vec");
}
else
@ -2475,8 +2475,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
if (isMaskRequired)
NewLI = Builder.CreateMaskedLoad(
VecPtr, Alignment.value(), BlockInMaskParts[Part],
UndefValue::get(DataTy), "wide.masked.load");
VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
"wide.masked.load");
else
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
"wide.load");