forked from OSchip/llvm-project
[Alignment][NFC] Use Align with CreateMaskedLoad
Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D73087
This commit is contained in:
parent
3f9b6b270f
commit
bc8a1ab26f
|
@ -9727,8 +9727,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
|
||||||
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
|
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
|
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
|
||||||
ArrayRef<Value *> Ops, unsigned Align) {
|
Align Alignment) {
|
||||||
// Cast the pointer to right type.
|
// Cast the pointer to right type.
|
||||||
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
|
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
|
||||||
llvm::PointerType::getUnqual(Ops[1]->getType()));
|
llvm::PointerType::getUnqual(Ops[1]->getType()));
|
||||||
|
@ -9736,7 +9736,7 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
|
||||||
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
|
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
|
||||||
Ops[1]->getType()->getVectorNumElements());
|
Ops[1]->getType()->getVectorNumElements());
|
||||||
|
|
||||||
return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
|
return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
|
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
|
||||||
|
@ -10731,11 +10731,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
|
||||||
case X86::BI__builtin_ia32_loaddqudi128_mask:
|
case X86::BI__builtin_ia32_loaddqudi128_mask:
|
||||||
case X86::BI__builtin_ia32_loaddqudi256_mask:
|
case X86::BI__builtin_ia32_loaddqudi256_mask:
|
||||||
case X86::BI__builtin_ia32_loaddqudi512_mask:
|
case X86::BI__builtin_ia32_loaddqudi512_mask:
|
||||||
return EmitX86MaskedLoad(*this, Ops, 1);
|
return EmitX86MaskedLoad(*this, Ops, Align::None());
|
||||||
|
|
||||||
case X86::BI__builtin_ia32_loadss128_mask:
|
case X86::BI__builtin_ia32_loadss128_mask:
|
||||||
case X86::BI__builtin_ia32_loadsd128_mask:
|
case X86::BI__builtin_ia32_loadsd128_mask:
|
||||||
return EmitX86MaskedLoad(*this, Ops, 1);
|
return EmitX86MaskedLoad(*this, Ops, Align::None());
|
||||||
|
|
||||||
case X86::BI__builtin_ia32_loadaps128_mask:
|
case X86::BI__builtin_ia32_loadaps128_mask:
|
||||||
case X86::BI__builtin_ia32_loadaps256_mask:
|
case X86::BI__builtin_ia32_loadaps256_mask:
|
||||||
|
@ -10748,11 +10748,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
|
||||||
case X86::BI__builtin_ia32_movdqa32load512_mask:
|
case X86::BI__builtin_ia32_movdqa32load512_mask:
|
||||||
case X86::BI__builtin_ia32_movdqa64load128_mask:
|
case X86::BI__builtin_ia32_movdqa64load128_mask:
|
||||||
case X86::BI__builtin_ia32_movdqa64load256_mask:
|
case X86::BI__builtin_ia32_movdqa64load256_mask:
|
||||||
case X86::BI__builtin_ia32_movdqa64load512_mask: {
|
case X86::BI__builtin_ia32_movdqa64load512_mask:
|
||||||
unsigned Align =
|
return EmitX86MaskedLoad(
|
||||||
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
|
*this, Ops,
|
||||||
return EmitX86MaskedLoad(*this, Ops, Align);
|
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
|
||||||
}
|
|
||||||
|
|
||||||
case X86::BI__builtin_ia32_expandloaddf128_mask:
|
case X86::BI__builtin_ia32_expandloaddf128_mask:
|
||||||
case X86::BI__builtin_ia32_expandloaddf256_mask:
|
case X86::BI__builtin_ia32_expandloaddf256_mask:
|
||||||
|
|
|
@ -509,6 +509,7 @@ public:
|
||||||
bool isReverse() const { return Reverse; }
|
bool isReverse() const { return Reverse; }
|
||||||
uint32_t getFactor() const { return Factor; }
|
uint32_t getFactor() const { return Factor; }
|
||||||
uint32_t getAlignment() const { return Alignment.value(); }
|
uint32_t getAlignment() const { return Alignment.value(); }
|
||||||
|
Align getAlign() const { return Alignment; }
|
||||||
uint32_t getNumMembers() const { return Members.size(); }
|
uint32_t getNumMembers() const { return Members.size(); }
|
||||||
|
|
||||||
/// Try to insert a new member \p Instr with index \p Index and
|
/// Try to insert a new member \p Instr with index \p Index and
|
||||||
|
|
|
@ -727,7 +727,14 @@ public:
|
||||||
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
|
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
|
||||||
|
|
||||||
/// Create a call to Masked Load intrinsic
|
/// Create a call to Masked Load intrinsic
|
||||||
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
|
LLVM_ATTRIBUTE_DEPRECATED(
|
||||||
|
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,
|
||||||
|
Value *PassThru = nullptr,
|
||||||
|
const Twine &Name = ""),
|
||||||
|
"Use the version that takes Align instead") {
|
||||||
|
return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
|
||||||
|
}
|
||||||
|
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
|
||||||
Value *PassThru = nullptr, const Twine &Name = "");
|
Value *PassThru = nullptr, const Twine &Name = "");
|
||||||
|
|
||||||
/// Create a call to Masked Store intrinsic
|
/// Create a call to Masked Store intrinsic
|
||||||
|
|
|
@ -1257,18 +1257,19 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
|
||||||
Type *ValTy = Passthru->getType();
|
Type *ValTy = Passthru->getType();
|
||||||
// Cast the pointer to the right type.
|
// Cast the pointer to the right type.
|
||||||
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
|
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
|
||||||
unsigned Align =
|
const Align Alignment =
|
||||||
Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
|
Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8)
|
||||||
|
: Align::None();
|
||||||
|
|
||||||
// If the mask is all ones just emit a regular store.
|
// If the mask is all ones just emit a regular store.
|
||||||
if (const auto *C = dyn_cast<Constant>(Mask))
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
||||||
if (C->isAllOnesValue())
|
if (C->isAllOnesValue())
|
||||||
return Builder.CreateAlignedLoad(ValTy, Ptr, Align);
|
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
|
||||||
|
|
||||||
// Convert the mask from an integer type to a vector of i1.
|
// Convert the mask from an integer type to a vector of i1.
|
||||||
unsigned NumElts = Passthru->getType()->getVectorNumElements();
|
unsigned NumElts = Passthru->getType()->getVectorNumElements();
|
||||||
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
||||||
return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
|
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
|
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
|
||||||
|
|
|
@ -467,13 +467,13 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
|
||||||
|
|
||||||
/// Create a call to a Masked Load intrinsic.
|
/// Create a call to a Masked Load intrinsic.
|
||||||
/// \p Ptr - base pointer for the load
|
/// \p Ptr - base pointer for the load
|
||||||
/// \p Align - alignment of the source location
|
/// \p Alignment - alignment of the source location
|
||||||
/// \p Mask - vector of booleans which indicates what vector lanes should
|
/// \p Mask - vector of booleans which indicates what vector lanes should
|
||||||
/// be accessed in memory
|
/// be accessed in memory
|
||||||
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
|
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
|
||||||
/// of the result
|
/// of the result
|
||||||
/// \p Name - name of the result variable
|
/// \p Name - name of the result variable
|
||||||
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
|
CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
|
||||||
Value *Mask, Value *PassThru,
|
Value *Mask, Value *PassThru,
|
||||||
const Twine &Name) {
|
const Twine &Name) {
|
||||||
auto *PtrTy = cast<PointerType>(Ptr->getType());
|
auto *PtrTy = cast<PointerType>(Ptr->getType());
|
||||||
|
@ -483,7 +483,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
|
||||||
if (!PassThru)
|
if (!PassThru)
|
||||||
PassThru = UndefValue::get(DataTy);
|
PassThru = UndefValue::get(DataTy);
|
||||||
Type *OverloadedTypes[] = { DataTy, PtrTy };
|
Type *OverloadedTypes[] = { DataTy, PtrTy };
|
||||||
Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru};
|
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
|
||||||
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
|
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
|
||||||
OverloadedTypes, Name);
|
OverloadedTypes, Name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1331,7 +1331,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
|
||||||
|
|
||||||
// The pass-through vector for an x86 masked load is a zero vector.
|
// The pass-through vector for an x86 masked load is a zero vector.
|
||||||
CallInst *NewMaskedLoad =
|
CallInst *NewMaskedLoad =
|
||||||
IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
|
IC.Builder.CreateMaskedLoad(PtrCast, Align::None(), BoolMask, ZeroVec);
|
||||||
return IC.replaceInstUsesWith(II, NewMaskedLoad);
|
return IC.replaceInstUsesWith(II, NewMaskedLoad);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2945,8 +2945,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||||
if (PropagateShadow) {
|
if (PropagateShadow) {
|
||||||
std::tie(ShadowPtr, OriginPtr) =
|
std::tie(ShadowPtr, OriginPtr) =
|
||||||
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
|
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
|
||||||
setShadow(&I, IRB.CreateMaskedLoad(
|
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
|
||||||
ShadowPtr, Alignment ? Alignment->value() : 0, Mask,
|
|
||||||
getShadow(PassThru), "_msmaskedld"));
|
getShadow(PassThru), "_msmaskedld"));
|
||||||
} else {
|
} else {
|
||||||
setShadow(&I, getCleanShadow(&I));
|
setShadow(&I, getCleanShadow(&I));
|
||||||
|
|
|
@ -2263,7 +2263,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
|
||||||
: ShuffledMask;
|
: ShuffledMask;
|
||||||
}
|
}
|
||||||
NewLoad =
|
NewLoad =
|
||||||
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlignment(),
|
Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
|
||||||
GroupMask, UndefVec, "wide.masked.vec");
|
GroupMask, UndefVec, "wide.masked.vec");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -2475,8 +2475,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
|
||||||
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
|
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
|
||||||
if (isMaskRequired)
|
if (isMaskRequired)
|
||||||
NewLI = Builder.CreateMaskedLoad(
|
NewLI = Builder.CreateMaskedLoad(
|
||||||
VecPtr, Alignment.value(), BlockInMaskParts[Part],
|
VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
|
||||||
UndefValue::get(DataTy), "wide.masked.load");
|
"wide.masked.load");
|
||||||
else
|
else
|
||||||
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
|
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
|
||||||
"wide.load");
|
"wide.load");
|
||||||
|
|
Loading…
Reference in New Issue