forked from OSchip/llvm-project
[globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.
This patch splits atomics out of the generic G_LOAD/G_STORE and into their own G_ATOMIC_LOAD/G_ATOMIC_STORE. This is a pragmatic decision rather than a necessary one. Atomic load/store has little in implementation in common with non-atomic load/store. They tend to be handled very differently throughout the backend. It also has the nice side-effect of slightly improving the common-case performance at ISel since there's no longer a need for an atomicity check in the matcher table. All targets have been updated to remove the atomic load/store check from the G_LOAD/G_STORE path. AArch64 has also been updated to mark G_ATOMIC_LOAD/G_ATOMIC_STORE legal. There is one issue with this patch though which also affects the extending loads and truncating stores. The rules only match when an appropriate G_ANYEXT is present in the MIR. For example, (G_ATOMIC_STORE (G_TRUNC:s16 (G_ANYEXT:s32 (G_ATOMIC_LOAD:s16 X)))) will match but: (G_ATOMIC_STORE (G_ATOMIC_LOAD:s16 X)) will not. This shouldn't be a problem at the moment, but as we get better at eliminating extends/truncates we'll likely start failing to match in some cases. The current plan is to fix this in a patch that changes the representation of extending-load/truncating-store to allow the MMO to describe a different type to the operation. llvm-svn: 319691
This commit is contained in:
parent
ef573fa073
commit
04e4f47e93
|
@ -277,7 +277,7 @@ bool InstructionSelector::executeMatchTable(
|
|||
return false;
|
||||
|
||||
for (const auto &MMO : State.MIs[InsnID]->memoperands())
|
||||
if (!isStrongerThan(Ordering, MMO->getOrdering()))
|
||||
if (isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
|
||||
if (handleReject() == RejectAndGiveUp)
|
||||
return false;
|
||||
break;
|
||||
|
|
|
@ -571,6 +571,30 @@ public:
|
|||
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
|
||||
MachineMemOperand &MMO);
|
||||
|
||||
/// Build and insert `Res<def> = G_ATOMIC_LOAD Addr, MMO`.
|
||||
///
|
||||
/// Loads the value stored at \p Addr. Puts the result in \p Res.
|
||||
///
|
||||
/// \pre setBasicBlock or setMI must have been called.
|
||||
/// \pre \p Res must be a generic virtual register.
|
||||
/// \pre \p Addr must be a generic virtual register with pointer type.
|
||||
///
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildAtomicLoad(unsigned Res, unsigned Addr,
|
||||
MachineMemOperand &MMO);
|
||||
|
||||
/// Build and insert `G_ATOMIC_STORE Val, Addr, MMO`.
|
||||
///
|
||||
/// Stores the value \p Val to \p Addr.
|
||||
///
|
||||
/// \pre setBasicBlock or setMI must have been called.
|
||||
/// \pre \p Val must be a generic virtual register.
|
||||
/// \pre \p Addr must be a generic virtual register with pointer type.
|
||||
///
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildAtomicStore(unsigned Val, unsigned Addr,
|
||||
MachineMemOperand &MMO);
|
||||
|
||||
/// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0`.
|
||||
///
|
||||
/// \pre setBasicBlock or setMI must have been called.
|
||||
|
|
|
@ -265,6 +265,12 @@ HANDLE_TARGET_OPCODE(G_LOAD)
|
|||
/// Generic store.
|
||||
HANDLE_TARGET_OPCODE(G_STORE)
|
||||
|
||||
/// Generic atomic load
|
||||
HANDLE_TARGET_OPCODE(G_ATOMIC_LOAD)
|
||||
|
||||
/// Generic atomic store
|
||||
HANDLE_TARGET_OPCODE(G_ATOMIC_STORE)
|
||||
|
||||
/// Generic atomic cmpxchg with internal success check.
|
||||
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
|
||||
|
||||
|
|
|
@ -482,6 +482,28 @@ def G_STORE : Instruction {
|
|||
let mayStore = 1;
|
||||
}
|
||||
|
||||
// Generic atomic load. Expects a MachineMemOperand in addition to explicit
|
||||
// operands. Technically, we could have handled this as a G_LOAD, however we
|
||||
// decided to keep it separate on the basis that atomic loads tend to have
|
||||
// very different handling to non-atomic loads.
|
||||
def G_ATOMIC_LOAD : Instruction {
|
||||
let OutOperandList = (outs type0:$dst);
|
||||
let InOperandList = (ins ptype1:$addr);
|
||||
let hasSideEffects = 0;
|
||||
let mayLoad = 1;
|
||||
}
|
||||
|
||||
// Generic atomic store. Expects a MachineMemOperand in addition to explicit
|
||||
// operands. Technically, we could have handled this as a G_STORE, however we
|
||||
// decided to keep it separate on the basis that atomic stores tend to have
|
||||
// very different handling to non-atomic stores.
|
||||
def G_ATOMIC_STORE : Instruction {
|
||||
let OutOperandList = (outs);
|
||||
let InOperandList = (ins type0:$src, ptype1:$addr);
|
||||
let hasSideEffects = 0;
|
||||
let mayStore = 1;
|
||||
}
|
||||
|
||||
// Generic atomic cmpxchg with internal success check. Expects a
|
||||
// MachineMemOperand in addition to explicit operands.
|
||||
def G_ATOMIC_CMPXCHG_WITH_SUCCESS : Instruction {
|
||||
|
|
|
@ -23,11 +23,6 @@
|
|||
class GINodeEquiv<Instruction i, SDNode node> {
|
||||
Instruction I = i;
|
||||
SDNode Node = node;
|
||||
|
||||
// SelectionDAG has separate nodes for atomic and non-atomic memory operations
|
||||
// (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel
|
||||
// stores this information in the MachineMemoryOperand.
|
||||
bit CheckMMOIsNonAtomic = 0;
|
||||
}
|
||||
|
||||
// These are defined in the same order as the G_* instructions.
|
||||
|
@ -80,19 +75,16 @@ def : GINodeEquiv<G_BSWAP, bswap>;
|
|||
// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
|
||||
// complications that tablegen must take care of. For example, Predicates such
|
||||
// as isSignExtLoad require that this is not a perfect 1:1 mapping since a
|
||||
// sign-extending load is (G_SEXT (G_LOAD x)) in GlobalISel. Additionally,
|
||||
// G_LOAD handles both atomic and non-atomic loads where as SelectionDAG had
|
||||
// separate nodes for them. This GINodeEquiv maps the non-atomic loads to
|
||||
// G_LOAD with a non-atomic MachineMemOperand.
|
||||
def : GINodeEquiv<G_LOAD, ld> { let CheckMMOIsNonAtomic = 1; }
|
||||
// sign-extending load is (G_SEXT (G_LOAD x)) in GlobalISel.
|
||||
def : GINodeEquiv<G_LOAD, ld>;
|
||||
// Broadly speaking G_STORE is equivalent to ISD::STORE but there are some
|
||||
// complications that tablegen must take care of. For example, predicates such
|
||||
// as isTruncStore require that this is not a perfect 1:1 mapping since a
|
||||
// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel. Additionally,
|
||||
// G_STORE handles both atomic and non-atomic stores where as SelectionDAG had
|
||||
// separate nodes for them. This GINodeEquiv maps the non-atomic stores to
|
||||
// G_STORE with a non-atomic MachineMemOperand.
|
||||
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
|
||||
// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel.
|
||||
def : GINodeEquiv<G_STORE, st>;
|
||||
|
||||
def : GINodeEquiv<G_ATOMIC_LOAD, atomic_load>;
|
||||
def : GINodeEquiv<G_ATOMIC_STORE, atomic_store>;
|
||||
|
||||
def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
|
||||
def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
|
||||
|
|
|
@ -345,6 +345,16 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
|
|||
unsigned Res = getOrCreateVReg(LI);
|
||||
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
|
||||
|
||||
if (LI.getOrdering() != AtomicOrdering::NotAtomic) {
|
||||
MIRBuilder.buildAtomicLoad(
|
||||
Res, Addr,
|
||||
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
|
||||
Flags, DL->getTypeStoreSize(LI.getType()),
|
||||
getMemOpAlignment(LI), AAMDNodes(), nullptr,
|
||||
LI.getSyncScopeID(), LI.getOrdering()));
|
||||
return true;
|
||||
}
|
||||
|
||||
MIRBuilder.buildLoad(
|
||||
Res, Addr,
|
||||
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
|
||||
|
@ -366,6 +376,17 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
|
|||
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
|
||||
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
|
||||
|
||||
if (SI.getOrdering() != AtomicOrdering::NotAtomic) {
|
||||
MIRBuilder.buildAtomicStore(
|
||||
Val, Addr,
|
||||
*MF->getMachineMemOperand(
|
||||
MachinePointerInfo(SI.getPointerOperand()), Flags,
|
||||
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
|
||||
getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
|
||||
SI.getOrdering()));
|
||||
return true;
|
||||
}
|
||||
|
||||
MIRBuilder.buildStore(
|
||||
Val, Addr,
|
||||
*MF->getMachineMemOperand(
|
||||
|
|
|
@ -295,6 +295,8 @@ MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
|
|||
MachineMemOperand &MMO) {
|
||||
assert(MRI->getType(Res).isValid() && "invalid operand type");
|
||||
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
|
||||
assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
|
||||
"invalid atomic ordering");
|
||||
|
||||
return buildInstr(TargetOpcode::G_LOAD)
|
||||
.addDef(Res)
|
||||
|
@ -306,6 +308,8 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
|
|||
MachineMemOperand &MMO) {
|
||||
assert(MRI->getType(Val).isValid() && "invalid operand type");
|
||||
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
|
||||
assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
|
||||
"invalid atomic ordering");
|
||||
|
||||
return buildInstr(TargetOpcode::G_STORE)
|
||||
.addUse(Val)
|
||||
|
@ -313,6 +317,34 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
|
|||
.addMemOperand(&MMO);
|
||||
}
|
||||
|
||||
MachineInstrBuilder MachineIRBuilder::buildAtomicLoad(unsigned Res,
|
||||
unsigned Addr,
|
||||
MachineMemOperand &MMO) {
|
||||
assert(MRI->getType(Res).isValid() && "invalid operand type");
|
||||
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
|
||||
assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
|
||||
"invalid atomic ordering");
|
||||
|
||||
return buildInstr(TargetOpcode::G_ATOMIC_LOAD)
|
||||
.addDef(Res)
|
||||
.addUse(Addr)
|
||||
.addMemOperand(&MMO);
|
||||
}
|
||||
|
||||
MachineInstrBuilder MachineIRBuilder::buildAtomicStore(unsigned Val,
|
||||
unsigned Addr,
|
||||
MachineMemOperand &MMO) {
|
||||
assert(MRI->getType(Val).isValid() && "invalid operand type");
|
||||
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
|
||||
assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
|
||||
"invalid atomic ordering");
|
||||
|
||||
return buildInstr(TargetOpcode::G_ATOMIC_STORE)
|
||||
.addUse(Val)
|
||||
.addUse(Addr)
|
||||
.addMemOperand(&MMO);
|
||||
}
|
||||
|
||||
MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
|
||||
unsigned CarryOut,
|
||||
unsigned Op0, unsigned Op1,
|
||||
|
|
|
@ -889,12 +889,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
|||
return false;
|
||||
}
|
||||
|
||||
auto &MemOp = **I.memoperands_begin();
|
||||
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
||||
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
const unsigned PtrReg = I.getOperand(1).getReg();
|
||||
#ifndef NDEBUG
|
||||
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
|
||||
|
|
|
@ -231,6 +231,14 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
|
|||
setAction({MemOp, 1, p0}, Legal);
|
||||
}
|
||||
|
||||
for (unsigned MemOp : {G_ATOMIC_LOAD, G_ATOMIC_STORE}) {
|
||||
for (auto Ty : {s8, s16, s32, s64, p0})
|
||||
setAction({MemOp, Ty}, Legal);
|
||||
|
||||
// And everything's fine in addrspace 0.
|
||||
setAction({MemOp, 1, p0}, Legal);
|
||||
}
|
||||
|
||||
// Constants
|
||||
for (auto Ty : {s32, s64}) {
|
||||
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
|
||||
|
|
|
@ -801,12 +801,6 @@ bool ARMInstructionSelector::select(MachineInstr &I,
|
|||
return selectGlobal(MIB, MRI);
|
||||
case G_STORE:
|
||||
case G_LOAD: {
|
||||
const auto &MemOp = **I.memoperands_begin();
|
||||
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
||||
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned Reg = I.getOperand(0).getReg();
|
||||
unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();
|
||||
|
||||
|
|
|
@ -484,11 +484,6 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
|
|||
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
||||
|
||||
auto &MemOp = **I.memoperands_begin();
|
||||
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
||||
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
|
||||
if (NewOpc == Opc)
|
||||
return false;
|
||||
|
|
|
@ -40,10 +40,10 @@ define [1 x double] @constant() {
|
|||
ret [1 x double] [double 1.0]
|
||||
}
|
||||
|
||||
; The key problem here is that we may fail to create an MBB referenced by a
|
||||
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
|
||||
; happen.
|
||||
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
|
||||
; The key problem here is that we may fail to create an MBB referenced by a
|
||||
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
|
||||
; happen.
|
||||
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate constant: [1 x double] (in function: pending_phis)
|
||||
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
|
||||
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
|
||||
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
|
||||
|
@ -54,7 +54,7 @@ end:
|
|||
ret i32 %res
|
||||
|
||||
true:
|
||||
store atomic i32 42, i32* %addr seq_cst, align 4
|
||||
%t = extractvalue [1 x double] [double 1.0], 0
|
||||
br label %end
|
||||
|
||||
false:
|
||||
|
@ -90,16 +90,6 @@ define i128 @sequence_sizes([8 x i8] %in) {
|
|||
ret i128 undef
|
||||
}
|
||||
|
||||
; Just to make sure we don't accidentally emit a normal load/store.
|
||||
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
|
||||
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
|
||||
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
|
||||
define i64 @atomic_ops(i64* %addr) {
|
||||
store atomic i64 0, i64* %addr unordered, align 8
|
||||
%res = load atomic i64, i64* %addr seq_cst, align 8
|
||||
ret i64 %res
|
||||
}
|
||||
|
||||
; Make sure we don't mess up metadata arguments.
|
||||
declare void @llvm.write_register.i64(metadata, i64)
|
||||
|
||||
|
|
|
@ -1332,12 +1332,12 @@ define void @test_lifetime_intrin() {
|
|||
define void @test_load_store_atomics(i8* %addr) {
|
||||
; CHECK-LABEL: name: test_load_store_atomics
|
||||
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
|
||||
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
|
||||
; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
|
||||
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
|
||||
; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
|
||||
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
|
||||
; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
|
||||
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
|
||||
; CHECK: G_ATOMIC_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
|
||||
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
|
||||
; CHECK: G_ATOMIC_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
|
||||
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
|
||||
; CHECK: G_ATOMIC_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
|
||||
%v0 = load atomic i8, i8* %addr unordered, align 1
|
||||
store atomic i8 %v0, i8* %addr monotonic, align 1
|
||||
|
||||
|
|
|
@ -832,7 +832,6 @@ def MOVfpimmz : I<(outs FPR32:$dst), (ins f32imm:$imm), [(set FPR32:$dst, fpimmz
|
|||
// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 22*/ [[LABEL:[0-9]+]],
|
||||
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
|
||||
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
|
||||
// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
|
||||
// CHECK-NEXT: // MIs[0] dst
|
||||
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
|
||||
// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
|
||||
|
@ -861,7 +860,6 @@ def LOAD : I<(outs GPR32:$dst), (ins GPR32:$src1),
|
|||
// CHECK-NEXT: // MIs[0] Operand 1
|
||||
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s16,
|
||||
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_LOAD,
|
||||
// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/1, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
|
||||
// CHECK-NEXT: // MIs[1] Operand 0
|
||||
// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s16,
|
||||
// CHECK-NEXT: // MIs[1] src1
|
||||
|
|
|
@ -2378,8 +2378,9 @@ private:
|
|||
CodeGenRegBank CGRegs;
|
||||
|
||||
/// Keep track of the equivalence between SDNodes and Instruction by mapping
|
||||
/// SDNodes to the GINodeEquiv mapping. We need to map to the GINodeEquiv to
|
||||
/// check for attributes on the relation such as CheckMMOIsNonAtomic.
|
||||
/// SDNodes to the GINodeEquiv mapping. We map to the GINodeEquiv in case we
|
||||
/// need to check for attributes on the relation such as (the now removed)
|
||||
/// CheckMMOIsNonAtomic.
|
||||
/// This is defined using 'GINodeEquiv' in the target description.
|
||||
DenseMap<Record *, Record *> NodeEquivs;
|
||||
|
||||
|
@ -2398,6 +2399,8 @@ private:
|
|||
Record *findNodeEquiv(Record *N) const;
|
||||
|
||||
Error importRulePredicates(RuleMatcher &M, ArrayRef<Predicate> Predicates);
|
||||
Error importInstructionPredicates(InstructionMatcher &InsnMatcher,
|
||||
const TreePatternNode *Src) const;
|
||||
Expected<InstructionMatcher &> createAndImportSelDAGMatcher(
|
||||
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
|
||||
const TreePatternNode *Src, unsigned &TempOpIdx) const;
|
||||
|
@ -2483,45 +2486,8 @@ GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
|
|||
return Error::success();
|
||||
}
|
||||
|
||||
Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
|
||||
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
|
||||
const TreePatternNode *Src, unsigned &TempOpIdx) const {
|
||||
Record *SrcGIEquivOrNull = nullptr;
|
||||
const CodeGenInstruction *SrcGIOrNull = nullptr;
|
||||
|
||||
// Start with the defined operands (i.e., the results of the root operator).
|
||||
if (Src->getExtTypes().size() > 1)
|
||||
return failedImport("Src pattern has multiple results");
|
||||
|
||||
if (Src->isLeaf()) {
|
||||
Init *SrcInit = Src->getLeafValue();
|
||||
if (isa<IntInit>(SrcInit)) {
|
||||
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
|
||||
&Target.getInstruction(RK.getDef("G_CONSTANT")));
|
||||
} else
|
||||
return failedImport(
|
||||
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
|
||||
} else {
|
||||
SrcGIEquivOrNull = findNodeEquiv(Src->getOperator());
|
||||
if (!SrcGIEquivOrNull)
|
||||
return failedImport("Pattern operator lacks an equivalent Instruction" +
|
||||
explainOperator(Src->getOperator()));
|
||||
SrcGIOrNull = &Target.getInstruction(SrcGIEquivOrNull->getValueAsDef("I"));
|
||||
|
||||
// The operators look good: match the opcode
|
||||
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(SrcGIOrNull);
|
||||
}
|
||||
|
||||
unsigned OpIdx = 0;
|
||||
for (const TypeSetByHwMode &VTy : Src->getExtTypes()) {
|
||||
// Results don't have a name unless they are the root node. The caller will
|
||||
// set the name if appropriate.
|
||||
OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
|
||||
if (auto Error = OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
|
||||
return failedImport(toString(std::move(Error)) +
|
||||
" for result of Src pattern operator");
|
||||
}
|
||||
|
||||
Error GlobalISelEmitter::importInstructionPredicates(
|
||||
InstructionMatcher &InsnMatcher, const TreePatternNode *Src) const {
|
||||
for (const auto &Predicate : Src->getPredicateFns()) {
|
||||
if (Predicate.isAlwaysTrue())
|
||||
continue;
|
||||
|
@ -2610,9 +2576,50 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
|
|||
return failedImport("Src pattern child has predicate (" +
|
||||
explainPredicates(Src) + ")");
|
||||
}
|
||||
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
|
||||
InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
|
||||
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
|
||||
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
|
||||
const TreePatternNode *Src, unsigned &TempOpIdx) const {
|
||||
Record *SrcGIEquivOrNull = nullptr;
|
||||
const CodeGenInstruction *SrcGIOrNull = nullptr;
|
||||
|
||||
// Start with the defined operands (i.e., the results of the root operator).
|
||||
if (Src->getExtTypes().size() > 1)
|
||||
return failedImport("Src pattern has multiple results");
|
||||
|
||||
if (Src->isLeaf()) {
|
||||
Init *SrcInit = Src->getLeafValue();
|
||||
if (isa<IntInit>(SrcInit)) {
|
||||
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
|
||||
&Target.getInstruction(RK.getDef("G_CONSTANT")));
|
||||
} else
|
||||
return failedImport(
|
||||
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
|
||||
} else {
|
||||
SrcGIEquivOrNull = findNodeEquiv(Src->getOperator());
|
||||
if (!SrcGIEquivOrNull)
|
||||
return failedImport("Pattern operator lacks an equivalent Instruction" +
|
||||
explainOperator(Src->getOperator()));
|
||||
SrcGIOrNull = &Target.getInstruction(SrcGIEquivOrNull->getValueAsDef("I"));
|
||||
|
||||
// The operators look good: match the opcode
|
||||
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(SrcGIOrNull);
|
||||
}
|
||||
|
||||
unsigned OpIdx = 0;
|
||||
for (const TypeSetByHwMode &VTy : Src->getExtTypes()) {
|
||||
// Results don't have a name unless they are the root node. The caller will
|
||||
// set the name if appropriate.
|
||||
OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
|
||||
if (auto Error = OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
|
||||
return failedImport(toString(std::move(Error)) +
|
||||
" for result of Src pattern operator");
|
||||
}
|
||||
|
||||
|
||||
if (Src->isLeaf()) {
|
||||
Init *SrcInit = Src->getLeafValue();
|
||||
if (IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
|
||||
|
@ -2631,6 +2638,8 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
|
|||
// here since we don't support ImmLeaf predicates yet. However, we still
|
||||
// need to note the hidden operand to get GIM_CheckNumOperands correct.
|
||||
InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
|
||||
if (auto Error = importInstructionPredicates(InsnMatcher, Src))
|
||||
return std::move(Error);
|
||||
return InsnMatcher;
|
||||
}
|
||||
|
||||
|
@ -2665,6 +2674,8 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
|
|||
}
|
||||
}
|
||||
|
||||
if (auto Error = importInstructionPredicates(InsnMatcher, Src))
|
||||
return std::move(Error);
|
||||
return InsnMatcher;
|
||||
}
|
||||
|
||||
|
@ -3698,6 +3709,40 @@ TreePatternNode *GlobalISelEmitter::fixupPatternNode(TreePatternNode *N) {
|
|||
return Ext;
|
||||
}
|
||||
}
|
||||
|
||||
if (N->getOperator()->getName() == "atomic_load") {
|
||||
// If it's a atomic-load we need to adapt the pattern slightly. We need
|
||||
// to split the node into (anyext (atomic_load ...)), and then apply the
|
||||
// <<atomic_load_TY>> predicate by updating the result type of the load.
|
||||
//
|
||||
// For example:
|
||||
// (atomic_load:[i32] [iPTR])<<atomic_load_i16>>
|
||||
// must be transformed into:
|
||||
// (anyext:[i32] (atomic_load:[i16] [iPTR]))
|
||||
|
||||
std::vector<TreePredicateFn> Predicates;
|
||||
Record *MemVT = nullptr;
|
||||
for (const auto &P : N->getPredicateFns()) {
|
||||
if (P.isAtomic() && P.getMemoryVT()) {
|
||||
MemVT = P.getMemoryVT();
|
||||
continue;
|
||||
}
|
||||
Predicates.push_back(P);
|
||||
}
|
||||
|
||||
if (MemVT) {
|
||||
TypeSetByHwMode ValueTy = getValueType(MemVT);
|
||||
if (ValueTy != N->getType(0)) {
|
||||
TreePatternNode *Ext =
|
||||
new TreePatternNode(RK.getDef("anyext"), {N}, 1);
|
||||
Ext->setType(0, N->getType(0));
|
||||
N->clearPredicateFns();
|
||||
N->setPredicateFns(Predicates);
|
||||
N->setType(0, ValueTy);
|
||||
return Ext;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return N;
|
||||
|
|
Loading…
Reference in New Issue