2006-05-15 06:18:28 +08:00
|
|
|
//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2006-05-15 06:18:28 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines an instruction selector for the ARM target.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-06-04 05:09:53 +08:00
|
|
|
#define DEBUG_TYPE "arm-isel"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "ARM.h"
|
2010-12-06 06:04:16 +08:00
|
|
|
#include "ARMBaseInstrInfo.h"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "ARMTargetMachine.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "MCTargetDesc/ARMAddressingModes.h"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2013-02-15 02:10:21 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
|
|
|
#include "llvm/CodeGen/SelectionDAGISel.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/CallingConv.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2010-05-05 04:39:49 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2008-02-03 13:43:57 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-09 04:53:28 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2009-07-09 04:53:28 +08:00
|
|
|
|
2006-05-15 06:18:28 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2010-07-31 07:33:54 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableShifterOp("disable-shifter-op", cl::Hidden,
|
|
|
|
cl::desc("Disable isel of shifter-op"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
|
|
|
|
cl::desc("Check fp vmla / vmls hazard at isel time"),
|
2011-04-20 02:11:57 +08:00
|
|
|
cl::init(true));
|
2010-12-06 06:04:16 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
|
|
|
|
/// instructions for SelectionDAG operations.
|
|
|
|
///
|
2006-05-15 06:18:28 +08:00
|
|
|
namespace {
|
2010-09-30 03:03:54 +08:00
|
|
|
|
|
|
|
enum AddrMode2Type {
|
|
|
|
AM2_BASE, // Simple AM2 (+-imm12)
|
|
|
|
AM2_SHOP // Shifter-op AM2
|
|
|
|
};
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
class ARMDAGToDAGISel : public SelectionDAGISel {
|
2009-06-27 05:28:53 +08:00
|
|
|
ARMBaseTargetMachine &TM;
|
2008-09-18 15:24:33 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const ARMSubtarget *Subtarget;
|
2006-10-10 20:56:00 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
public:
|
2009-09-28 22:30:20 +08:00
|
|
|
explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
|
|
|
|
CodeGenOpt::Level OptLevel)
|
|
|
|
: SelectionDAGISel(tm, OptLevel), TM(tm),
|
2010-12-06 06:04:16 +08:00
|
|
|
Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
|
2006-07-16 09:02:57 +08:00
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
virtual const char *getPassName() const {
|
|
|
|
return "ARM Instruction Selection";
|
2009-06-18 02:13:58 +08:00
|
|
|
}
|
|
|
|
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
virtual void PreprocessISelDAG();
|
|
|
|
|
2009-10-09 02:51:31 +08:00
|
|
|
/// getI32Imm - Return a target constant of type i32 with the specified
|
|
|
|
/// value.
|
2009-06-18 02:13:58 +08:00
|
|
|
inline SDValue getI32Imm(unsigned Imm) {
|
2009-08-12 04:47:22 +08:00
|
|
|
return CurDAG->getTargetConstant(Imm, MVT::i32);
|
2009-06-18 02:13:58 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *Select(SDNode *N);
|
2010-02-16 03:41:07 +08:00
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
|
|
|
|
bool hasNoVMLxHazardUse(SDNode *N) const;
|
2010-10-28 07:41:30 +08:00
|
|
|
bool isShifterOpProfitable(const SDValue &Shift,
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
|
2011-07-22 02:54:16 +08:00
|
|
|
bool SelectRegShifterOperand(SDValue N, SDValue &A,
|
|
|
|
SDValue &B, SDValue &C,
|
|
|
|
bool CheckProfitability = true);
|
|
|
|
bool SelectImmShifterOperand(SDValue N, SDValue &A,
|
2011-07-22 07:38:37 +08:00
|
|
|
SDValue &B, bool CheckProfitability = true);
|
|
|
|
bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
|
2011-03-19 03:46:58 +08:00
|
|
|
SDValue &B, SDValue &C) {
|
|
|
|
// Don't apply the profitability check
|
2011-07-22 07:38:37 +08:00
|
|
|
return SelectRegShifterOperand(N, A, B, C, false);
|
|
|
|
}
|
|
|
|
bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
|
|
|
|
SDValue &B) {
|
|
|
|
// Don't apply the profitability check
|
|
|
|
return SelectImmShifterOperand(N, A, B, false);
|
2011-03-19 03:46:58 +08:00
|
|
|
}
|
|
|
|
|
2010-10-27 06:37:02 +08:00
|
|
|
bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
|
|
|
|
bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
|
|
|
|
|
2010-09-30 03:03:54 +08:00
|
|
|
AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
|
|
|
|
SDValue &Offset, SDValue &Opc);
|
|
|
|
bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
|
|
|
return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
|
|
|
return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
|
|
|
SelectAddrMode2Worker(N, Base, Offset, Opc);
|
2010-10-27 06:37:02 +08:00
|
|
|
// return SelectAddrMode2ShOp(N, Base, Offset, Opc);
|
2010-09-30 03:03:54 +08:00
|
|
|
// This always matches one way or another.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-22 17:57:11 +08:00
|
|
|
bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
|
|
|
|
const ConstantSDNode *CN = cast<ConstantSDNode>(N);
|
|
|
|
Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
|
|
|
|
Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-07-27 04:54:26 +08:00
|
|
|
bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
|
|
|
|
SDValue &Offset, SDValue &Opc);
|
|
|
|
bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset, SDValue &Opc);
|
2011-08-30 04:16:50 +08:00
|
|
|
bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
|
|
|
|
SDValue &Offset, SDValue &Opc);
|
2011-08-06 04:35:44 +08:00
|
|
|
bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectAddrMode3(SDValue N, SDValue &Base,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset, SDValue &Opc);
|
2010-01-05 09:24:18 +08:00
|
|
|
bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset, SDValue &Opc);
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectAddrMode5(SDValue N, SDValue &Base,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset);
|
2010-11-02 07:40:51 +08:00
|
|
|
bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
|
2011-02-25 14:42:42 +08:00
|
|
|
bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
|
2008-07-28 05:46:04 +08:00
|
|
|
|
2011-01-17 16:03:18 +08:00
|
|
|
bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
|
2008-07-28 05:46:04 +08:00
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
// Thumb Addressing Modes:
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
|
2010-12-14 11:36:38 +08:00
|
|
|
bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
|
|
|
|
unsigned Scale);
|
|
|
|
bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
|
|
|
|
bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
|
|
|
|
bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
|
|
|
|
bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
|
|
|
|
SDValue &OffImm);
|
|
|
|
bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm);
|
|
|
|
bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm);
|
|
|
|
bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm);
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
|
2008-07-28 05:46:04 +08:00
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
// Thumb 2 Addressing Modes:
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectT2ShifterOperandReg(SDValue N,
|
2009-06-27 10:26:13 +08:00
|
|
|
SDValue &BaseReg, SDValue &Opc);
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
|
|
|
|
bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &OffImm);
|
2010-01-05 09:24:18 +08:00
|
|
|
bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
|
2009-07-02 15:28:31 +08:00
|
|
|
SDValue &OffImm);
|
2010-09-22 04:31:19 +08:00
|
|
|
bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &OffReg, SDValue &ShImm);
|
2013-07-16 17:46:55 +08:00
|
|
|
bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
|
2009-06-29 15:51:04 +08:00
|
|
|
|
2010-11-13 06:42:47 +08:00
|
|
|
inline bool is_so_imm(unsigned Imm) const {
|
|
|
|
return ARM_AM::getSOImmVal(Imm) != -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool is_so_imm_not(unsigned Imm) const {
|
|
|
|
return ARM_AM::getSOImmVal(~Imm) != -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool is_t2_so_imm(unsigned Imm) const {
|
|
|
|
return ARM_AM::getT2SOImmVal(Imm) != -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool is_t2_so_imm_not(unsigned Imm) const {
|
|
|
|
return ARM_AM::getT2SOImmVal(~Imm) != -1;
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// Include the pieces autogenerated from the target description.
|
|
|
|
#include "ARMGenDAGISel.inc"
|
2009-05-19 13:53:42 +08:00
|
|
|
|
|
|
|
private:
|
2009-07-02 15:28:31 +08:00
|
|
|
/// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
|
|
|
|
/// ARM.
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *SelectARMIndexedLoad(SDNode *N);
|
|
|
|
SDNode *SelectT2IndexedLoad(SDNode *N);
|
2009-07-02 15:28:31 +08:00
|
|
|
|
2010-03-23 13:25:43 +08:00
|
|
|
/// SelectVLD - Select NEON load intrinsics. NumVecs should be
|
|
|
|
/// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
|
2009-10-15 01:28:52 +08:00
|
|
|
/// loads of D registers and even subregs and odd subregs of Q registers.
|
2010-03-23 13:25:43 +08:00
|
|
|
/// For NumVecs <= 2, QOpcodes1 is not used.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes,
|
|
|
|
const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
|
2009-10-15 01:28:52 +08:00
|
|
|
|
2009-10-15 02:32:29 +08:00
|
|
|
/// SelectVST - Select NEON store intrinsics. NumVecs should
|
2010-03-23 14:20:33 +08:00
|
|
|
/// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
|
2009-10-15 02:32:29 +08:00
|
|
|
/// stores of D registers and even subregs and odd subregs of Q registers.
|
2010-03-23 14:20:33 +08:00
|
|
|
/// For NumVecs <= 2, QOpcodes1 is not used.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes,
|
|
|
|
const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
|
2009-10-15 02:32:29 +08:00
|
|
|
|
2009-10-15 00:46:45 +08:00
|
|
|
/// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
|
2009-10-15 00:19:03 +08:00
|
|
|
/// be 2, 3 or 4. The opcode arrays specify the instructions used for
|
2010-09-14 07:01:35 +08:00
|
|
|
/// load/store of D registers and Q registers.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
|
|
|
|
bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes, const uint16_t *QOpcodes);
|
2009-10-15 00:19:03 +08:00
|
|
|
|
2010-11-28 14:51:26 +08:00
|
|
|
/// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
|
|
|
|
/// should be 2, 3 or 4. The opcode array specifies the instructions used
|
|
|
|
/// for loading D registers. (Q registers are not supported.)
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *Opcodes);
|
2010-11-28 14:51:26 +08:00
|
|
|
|
2010-07-07 08:08:54 +08:00
|
|
|
/// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
|
|
|
|
/// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
|
|
|
|
/// generated to force the table registers to be consecutive.
|
|
|
|
SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
|
2010-07-07 07:36:25 +08:00
|
|
|
|
2009-10-14 04:25:58 +08:00
|
|
|
/// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
|
2010-04-23 07:24:18 +08:00
|
|
|
SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
|
2009-10-14 02:59:48 +08:00
|
|
|
|
2011-10-11 06:59:55 +08:00
|
|
|
// Select special operations if node forms integer ABS pattern
|
|
|
|
SDNode *SelectABSOp(SDNode *N);
|
|
|
|
|
2013-02-15 02:10:21 +08:00
|
|
|
SDNode *SelectInlineAsm(SDNode *N);
|
|
|
|
|
2010-05-06 02:28:36 +08:00
|
|
|
SDNode *SelectConcatVector(SDNode *N);
|
|
|
|
|
2013-09-26 20:22:36 +08:00
|
|
|
SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64);
|
2011-08-31 08:31:29 +08:00
|
|
|
|
2009-07-02 09:23:32 +08:00
|
|
|
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
|
|
|
|
/// inline asm expressions.
|
|
|
|
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
|
|
|
|
char ConstraintCode,
|
|
|
|
std::vector<SDValue> &OutOps);
|
2009-10-07 06:01:59 +08:00
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
// Form pairs of consecutive R, S, D, or Q registers.
|
2012-11-17 05:55:34 +08:00
|
|
|
SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
|
2012-11-17 08:23:35 +08:00
|
|
|
SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
|
|
|
|
SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
|
|
|
|
SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
|
2010-05-11 01:34:18 +08:00
|
|
|
|
2010-06-04 08:04:02 +08:00
|
|
|
// Form sequences of 4 consecutive S, D, or Q registers.
|
2012-11-17 08:23:35 +08:00
|
|
|
SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
|
|
|
|
SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
|
|
|
|
SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
|
2010-11-02 07:40:51 +08:00
|
|
|
|
|
|
|
// Get the alignment operand for a NEON VLD or VST instruction.
|
|
|
|
SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2006-08-25 00:13:15 +08:00
|
|
|
}
|
|
|
|
|
2009-10-14 02:59:48 +08:00
|
|
|
/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
|
|
|
|
/// operand. If so Imm will receive the 32-bit value.
|
|
|
|
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
|
|
|
|
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
|
|
|
|
Imm = cast<ConstantSDNode>(N)->getZExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// isInt32Immediate - This method tests to see if a constant operand.
|
|
|
|
// If so Imm will receive the 32 bit value.
|
|
|
|
static bool isInt32Immediate(SDValue N, unsigned &Imm) {
|
|
|
|
return isInt32Immediate(N.getNode(), Imm);
|
|
|
|
}
|
|
|
|
|
|
|
|
// isOpcWithIntImmediate - This method tests to see if the node is a specific
|
|
|
|
// opcode and that it has a immediate integer right operand.
|
|
|
|
// If so Imm will receive the 32 bit value.
|
|
|
|
static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
|
|
|
|
return N->getOpcode() == Opc &&
|
|
|
|
isInt32Immediate(N->getOperand(1).getNode(), Imm);
|
|
|
|
}
|
|
|
|
|
2011-01-19 23:12:16 +08:00
|
|
|
/// \brief Check whether a particular node is a constant value representable as
|
2012-09-14 22:57:36 +08:00
|
|
|
/// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
|
2011-01-19 23:12:16 +08:00
|
|
|
///
|
|
|
|
/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
|
2011-09-24 06:10:33 +08:00
|
|
|
static bool isScaledConstantInRange(SDValue Node, int Scale,
|
2011-01-19 23:12:16 +08:00
|
|
|
int RangeMin, int RangeMax,
|
|
|
|
int &ScaledConstant) {
|
2011-09-24 06:10:33 +08:00
|
|
|
assert(Scale > 0 && "Invalid scale!");
|
2011-01-19 23:12:16 +08:00
|
|
|
|
|
|
|
// Check that this is a constant.
|
|
|
|
const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
|
|
|
|
if (!C)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ScaledConstant = (int) C->getZExtValue();
|
|
|
|
if ((ScaledConstant % Scale) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ScaledConstant /= Scale;
|
|
|
|
return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
|
|
|
|
}
|
|
|
|
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
void ARMDAGToDAGISel::PreprocessISelDAG() {
|
|
|
|
if (!Subtarget->hasV6T2Ops())
|
|
|
|
return;
|
|
|
|
|
|
|
|
bool isThumb2 = Subtarget->isThumb();
|
|
|
|
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
|
|
|
E = CurDAG->allnodes_end(); I != E; ) {
|
|
|
|
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
|
|
|
|
|
|
|
|
if (N->getOpcode() != ISD::ADD)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
|
|
|
|
// leading zeros, followed by consecutive set bits, followed by 1 or 2
|
|
|
|
// trailing zeros, e.g. 1020.
|
|
|
|
// Transform the expression to
|
|
|
|
// (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
|
|
|
|
// of trailing zeros of c2. The left shift would be folded as an shifter
|
|
|
|
// operand of 'add' and the 'and' and 'srl' would become a bits extraction
|
|
|
|
// node (UBFX).
|
|
|
|
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
SDValue N1 = N->getOperand(1);
|
|
|
|
unsigned And_imm = 0;
|
|
|
|
if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
|
|
|
|
if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
|
|
|
|
std::swap(N0, N1);
|
|
|
|
}
|
|
|
|
if (!And_imm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check if the AND mask is an immediate of the form: 000.....1111111100
|
2013-05-25 06:23:49 +08:00
|
|
|
unsigned TZ = countTrailingZeros(And_imm);
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
if (TZ != 1 && TZ != 2)
|
|
|
|
// Be conservative here. Shifter operands aren't always free. e.g. On
|
|
|
|
// Swift, left shifter operand of 1 / 2 for free but others are not.
|
|
|
|
// e.g.
|
|
|
|
// ubfx r3, r1, #16, #8
|
|
|
|
// ldr.w r3, [r0, r3, lsl #2]
|
|
|
|
// vs.
|
|
|
|
// mov.w r9, #1020
|
|
|
|
// and.w r2, r9, r1, lsr #14
|
|
|
|
// ldr r2, [r0, r2]
|
|
|
|
continue;
|
|
|
|
And_imm >>= TZ;
|
|
|
|
if (And_imm & (And_imm + 1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look for (and (srl X, c1), c2).
|
|
|
|
SDValue Srl = N1.getOperand(0);
|
|
|
|
unsigned Srl_imm = 0;
|
|
|
|
if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
|
|
|
|
(Srl_imm <= 2))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Make sure first operand is not a shifter operand which would prevent
|
|
|
|
// folding of the left shift.
|
|
|
|
SDValue CPTmp0;
|
|
|
|
SDValue CPTmp1;
|
|
|
|
SDValue CPTmp2;
|
|
|
|
if (isThumb2) {
|
|
|
|
if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
|
|
|
|
SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make the transformation.
|
2013-05-25 10:42:55 +08:00
|
|
|
Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
Srl.getOperand(0),
|
|
|
|
CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
|
2013-05-25 10:42:55 +08:00
|
|
|
N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
Srl, CurDAG->getConstant(And_imm, MVT::i32));
|
2013-05-25 10:42:55 +08:00
|
|
|
N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
N1, CurDAG->getConstant(TZ, MVT::i32));
|
|
|
|
CurDAG->UpdateNodeOperands(N, N0, N1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
|
|
|
|
/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
|
|
|
|
/// least on current ARM implementations) which should be avoidded.
|
|
|
|
bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
|
|
|
|
if (OptLevel == CodeGenOpt::None)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!CheckVMLxHazard)
|
|
|
|
return true;
|
2012-09-30 05:43:49 +08:00
|
|
|
|
2013-07-29 17:25:50 +08:00
|
|
|
if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() &&
|
2012-09-30 05:43:49 +08:00
|
|
|
!Subtarget->isSwift())
|
2010-12-06 06:04:16 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!N->hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDNode *Use = *N->use_begin();
|
|
|
|
if (Use->getOpcode() == ISD::CopyToReg)
|
|
|
|
return true;
|
|
|
|
if (Use->isMachineOpcode()) {
|
2013-06-20 05:36:55 +08:00
|
|
|
const ARMBaseInstrInfo *TII =
|
|
|
|
static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
|
|
|
|
if (MCID.mayStore())
|
2010-12-06 06:04:16 +08:00
|
|
|
return true;
|
2011-06-29 03:10:37 +08:00
|
|
|
unsigned Opcode = MCID.getOpcode();
|
2010-12-06 06:04:16 +08:00
|
|
|
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
|
|
|
|
return true;
|
|
|
|
// vmlx feeding into another vmlx. We actually want to unfold
|
|
|
|
// the use later in the MLxExpansion pass. e.g.
|
|
|
|
// vmla
|
|
|
|
// vmla (stall 8 cycles)
|
|
|
|
//
|
|
|
|
// vmul (5 cycles)
|
|
|
|
// vadd (5 cycles)
|
|
|
|
// vmla
|
|
|
|
// This adds up to about 18 - 19 cycles.
|
|
|
|
//
|
|
|
|
// vmla
|
|
|
|
// vmul (stall 4 cycles)
|
|
|
|
// vadd adds up to about 14 cycles.
|
|
|
|
return TII->isFpMLxInstruction(Opcode);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2009-10-14 02:59:48 +08:00
|
|
|
|
2010-10-28 07:41:30 +08:00
|
|
|
bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal,
|
|
|
|
unsigned ShAmt) {
|
2012-09-30 05:43:49 +08:00
|
|
|
if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
|
2010-10-28 07:41:30 +08:00
|
|
|
return true;
|
|
|
|
if (Shift.hasOneUse())
|
|
|
|
return true;
|
|
|
|
// R << 2 is free.
|
2012-09-30 05:43:49 +08:00
|
|
|
return ShOpcVal == ARM_AM::lsl &&
|
|
|
|
(ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
|
2010-10-28 07:41:30 +08:00
|
|
|
}
|
|
|
|
|
2011-07-22 02:54:16 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &BaseReg,
|
2011-03-19 03:46:58 +08:00
|
|
|
SDValue &Opc,
|
|
|
|
bool CheckProfitability) {
|
2010-07-31 07:33:54 +08:00
|
|
|
if (DisableShifterOp)
|
|
|
|
return false;
|
|
|
|
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
|
2009-06-29 15:51:04 +08:00
|
|
|
|
|
|
|
// Don't match base register only case. That is matched to a separate
|
|
|
|
// lower complexity pattern with explicit register operand.
|
|
|
|
if (ShOpcVal == ARM_AM::no_shift) return false;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-06-29 15:51:04 +08:00
|
|
|
BaseReg = N.getOperand(0);
|
|
|
|
unsigned ShImmVal = 0;
|
2011-07-22 02:54:16 +08:00
|
|
|
ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
|
|
|
|
if (!RHS) return false;
|
|
|
|
ShImmVal = RHS->getZExtValue() & 31;
|
2010-10-28 07:41:30 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-07-22 02:54:16 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
|
|
|
|
SDValue &BaseReg,
|
|
|
|
SDValue &ShReg,
|
|
|
|
SDValue &Opc,
|
|
|
|
bool CheckProfitability) {
|
|
|
|
if (DisableShifterOp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
|
|
|
|
|
|
|
|
// Don't match base register only case. That is matched to a separate
|
|
|
|
// lower complexity pattern with explicit register operand.
|
|
|
|
if (ShOpcVal == ARM_AM::no_shift) return false;
|
|
|
|
|
|
|
|
BaseReg = N.getOperand(0);
|
|
|
|
unsigned ShImmVal = 0;
|
|
|
|
ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
|
|
|
|
if (RHS) return false;
|
|
|
|
|
|
|
|
ShReg = N.getOperand(1);
|
|
|
|
if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
|
|
|
|
return false;
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-27 06:37:02 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &OffImm) {
|
|
|
|
// Match simple R + imm12 operands.
|
|
|
|
|
|
|
|
// Base only.
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
|
|
|
|
!CurDAG->isBaseWithConstantOffset(N)) {
|
2010-10-27 06:37:02 +08:00
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
2011-02-14 06:25:43 +08:00
|
|
|
// Match frame index.
|
2010-10-27 06:37:02 +08:00
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2010-10-27 06:37:02 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
|
|
|
return true;
|
2011-02-14 06:25:43 +08:00
|
|
|
}
|
2011-03-19 03:46:58 +08:00
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() == ARMISD::Wrapper &&
|
2013-12-02 18:35:41 +08:00
|
|
|
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
|
2010-10-27 06:37:02 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
} else
|
|
|
|
Base = N;
|
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (N.getOpcode() == ISD::SUB)
|
|
|
|
RHSC = -RHSC;
|
|
|
|
|
|
|
|
if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2010-10-27 06:37:02 +08:00
|
|
|
}
|
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Base only.
|
|
|
|
Base = N;
|
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
2010-10-28 07:41:30 +08:00
|
|
|
if (N.getOpcode() == ISD::MUL &&
|
2012-09-30 05:43:49 +08:00
|
|
|
((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
|
2010-10-27 06:37:02 +08:00
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
// X * [3,5,9] -> X + X * [2,4,8] etc.
|
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (RHSC & 1) {
|
|
|
|
RHSC = RHSC & ~1;
|
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (RHSC < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
|
|
|
RHSC = - RHSC;
|
|
|
|
}
|
|
|
|
if (isPowerOf2_32(RHSC)) {
|
|
|
|
unsigned ShAmt = Log2_32(RHSC);
|
|
|
|
Base = Offset = N.getOperand(0);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
|
|
|
|
ARM_AM::lsl),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
|
|
|
|
// ISD::OR that is equivalent to an ISD::ADD.
|
|
|
|
!CurDAG->isBaseWithConstantOffset(N))
|
2010-10-27 06:37:02 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Leave simple R +/- imm12 operands for LDRi12
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
|
|
|
|
-0x1000+1, 0x1000, RHSC)) // 12 bits.
|
|
|
|
return false;
|
2010-10-27 06:37:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise this is R +/- [possibly shifted] R.
|
2011-02-14 06:25:43 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal =
|
|
|
|
ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
|
2010-10-27 06:37:02 +08:00
|
|
|
unsigned ShAmt = 0;
|
|
|
|
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
|
|
|
|
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
|
|
|
// it.
|
|
|
|
if (ConstantSDNode *Sh =
|
|
|
|
dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
|
|
|
|
ShAmt = Sh->getZExtValue();
|
2010-10-28 07:41:30 +08:00
|
|
|
if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
|
|
|
|
Offset = N.getOperand(1).getOperand(0);
|
|
|
|
else {
|
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2010-10-27 06:37:02 +08:00
|
|
|
} else {
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try matching (R shl C) + (R).
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
|
2012-09-30 05:43:49 +08:00
|
|
|
!(Subtarget->isLikeA9() || Subtarget->isSwift() ||
|
|
|
|
N.getOperand(0).hasOneUse())) {
|
2011-07-21 07:34:39 +08:00
|
|
|
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
|
2010-10-27 06:37:02 +08:00
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't
|
|
|
|
// fold it.
|
|
|
|
if (ConstantSDNode *Sh =
|
|
|
|
dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
|
|
|
|
ShAmt = Sh->getZExtValue();
|
2011-10-06 07:39:02 +08:00
|
|
|
if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
|
2010-10-28 07:41:30 +08:00
|
|
|
Offset = N.getOperand(0).getOperand(0);
|
|
|
|
Base = N.getOperand(1);
|
|
|
|
} else {
|
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2010-10-27 06:37:02 +08:00
|
|
|
} else {
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//-----
|
|
|
|
|
2010-09-30 03:03:54 +08:00
|
|
|
AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
2010-10-28 07:41:30 +08:00
|
|
|
if (N.getOpcode() == ISD::MUL &&
|
2012-09-30 05:43:49 +08:00
|
|
|
(!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
|
2007-03-14 05:05:54 +08:00
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
// X * [3,5,9] -> X + X * [2,4,8] etc.
|
2008-09-13 00:56:44 +08:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2007-03-14 05:05:54 +08:00
|
|
|
if (RHSC & 1) {
|
|
|
|
RHSC = RHSC & ~1;
|
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (RHSC < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
|
|
|
RHSC = - RHSC;
|
|
|
|
}
|
|
|
|
if (isPowerOf2_32(RHSC)) {
|
|
|
|
unsigned ShAmt = Log2_32(RHSC);
|
|
|
|
Base = Offset = N.getOperand(0);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
|
|
|
|
ARM_AM::lsl),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2010-09-30 03:03:54 +08:00
|
|
|
return AM2_SHOP;
|
2007-03-14 05:05:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
|
|
|
|
// ISD::OR that is equivalent to an ADD.
|
|
|
|
!CurDAG->isBaseWithConstantOffset(N)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N;
|
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-11-24 08:44:37 +08:00
|
|
|
} else if (N.getOpcode() == ARMISD::Wrapper &&
|
2013-12-02 18:35:41 +08:00
|
|
|
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
|
|
|
|
ARM_AM::no_shift),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2010-09-30 03:03:54 +08:00
|
|
|
return AM2_BASE;
|
2006-07-16 09:02:57 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// Match simple R +/- imm12 operands.
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::SUB) {
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
|
|
|
|
-0x1000+1, 0x1000, RHSC)) { // 12 bits.
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2011-01-19 23:12:16 +08:00
|
|
|
}
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
2007-01-24 10:45:25 +08:00
|
|
|
|
2011-01-19 23:12:16 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (RHSC < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
|
|
|
RHSC = - RHSC;
|
2006-10-06 00:48:49 +08:00
|
|
|
}
|
2011-01-19 23:12:16 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
|
|
|
|
ARM_AM::no_shift),
|
|
|
|
MVT::i32);
|
|
|
|
return AM2_BASE;
|
2006-10-06 00:48:49 +08:00
|
|
|
}
|
2010-09-30 01:32:29 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
|
2010-10-28 07:41:30 +08:00
|
|
|
// Compute R +/- (R << N) and reuse it.
|
|
|
|
Base = N;
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
|
|
|
|
ARM_AM::no_shift),
|
|
|
|
MVT::i32);
|
|
|
|
return AM2_BASE;
|
|
|
|
}
|
|
|
|
|
2009-10-28 01:25:15 +08:00
|
|
|
// Otherwise this is R +/- [possibly shifted] R.
|
2011-02-14 06:25:43 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal =
|
|
|
|
ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned ShAmt = 0;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
|
|
|
// it.
|
|
|
|
if (ConstantSDNode *Sh =
|
|
|
|
dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
ShAmt = Sh->getZExtValue();
|
2010-10-28 07:41:30 +08:00
|
|
|
if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
|
|
|
|
Offset = N.getOperand(1).getOperand(0);
|
|
|
|
else {
|
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2006-10-06 20:50:22 +08:00
|
|
|
} else {
|
2007-01-19 15:51:42 +08:00
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
2006-10-06 20:50:22 +08:00
|
|
|
}
|
2006-10-06 00:48:49 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// Try matching (R shl C) + (R).
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
|
2012-09-30 05:43:49 +08:00
|
|
|
!(Subtarget->isLikeA9() || Subtarget->isSwift() ||
|
|
|
|
N.getOperand(0).hasOneUse())) {
|
2011-07-21 07:34:39 +08:00
|
|
|
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
|
2007-01-19 15:51:42 +08:00
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't
|
|
|
|
// fold it.
|
|
|
|
if (ConstantSDNode *Sh =
|
|
|
|
dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
ShAmt = Sh->getZExtValue();
|
2011-10-06 07:39:02 +08:00
|
|
|
if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
|
2010-10-28 07:41:30 +08:00
|
|
|
Offset = N.getOperand(0).getOperand(0);
|
|
|
|
Base = N.getOperand(1);
|
|
|
|
} else {
|
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
} else {
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
2006-10-14 00:47:22 +08:00
|
|
|
}
|
|
|
|
}
|
2006-07-26 04:17:20 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2010-09-30 03:03:54 +08:00
|
|
|
return AM2_SHOP;
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|
|
|
|
|
2011-07-27 04:54:26 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset, SDValue &Opc) {
|
2010-01-05 09:24:18 +08:00
|
|
|
unsigned Opcode = Op->getOpcode();
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
|
|
|
|
? cast<LoadSDNode>(Op)->getAddressingMode()
|
|
|
|
: cast<StoreSDNode>(Op)->getAddressingMode();
|
|
|
|
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
|
|
|
|
? ARM_AM::add : ARM_AM::sub;
|
2011-01-19 23:12:16 +08:00
|
|
|
int Val;
|
2011-07-27 04:54:26 +08:00
|
|
|
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
|
|
|
|
return false;
|
2006-10-03 03:30:56 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Offset = N;
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned ShAmt = 0;
|
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
|
|
|
// it.
|
|
|
|
if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
ShAmt = Sh->getZExtValue();
|
2010-10-28 07:41:30 +08:00
|
|
|
if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
|
|
|
|
Offset = N.getOperand(0);
|
|
|
|
else {
|
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2006-10-03 03:30:56 +08:00
|
|
|
} else {
|
2007-01-19 15:51:42 +08:00
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
2006-09-05 03:05:01 +08:00
|
|
|
}
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|
2006-06-06 06:26:14 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|
|
|
|
|
2011-08-30 04:16:50 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
|
|
|
|
SDValue &Offset, SDValue &Opc) {
|
2011-09-01 04:00:11 +08:00
|
|
|
unsigned Opcode = Op->getOpcode();
|
|
|
|
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
|
|
|
|
? cast<LoadSDNode>(Op)->getAddressingMode()
|
|
|
|
: cast<StoreSDNode>(Op)->getAddressingMode();
|
|
|
|
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
|
|
|
|
? ARM_AM::add : ARM_AM::sub;
|
2011-08-30 04:16:50 +08:00
|
|
|
int Val;
|
|
|
|
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
|
2011-09-01 04:00:11 +08:00
|
|
|
if (AddSub == ARM_AM::sub) Val *= -1;
|
2011-08-30 04:16:50 +08:00
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
Opc = CurDAG->getTargetConstant(Val, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-27 04:54:26 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
|
|
|
|
SDValue &Offset, SDValue &Opc) {
|
|
|
|
unsigned Opcode = Op->getOpcode();
|
|
|
|
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
|
|
|
|
? cast<LoadSDNode>(Op)->getAddressingMode()
|
|
|
|
: cast<StoreSDNode>(Op)->getAddressingMode();
|
|
|
|
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
|
|
|
|
? ARM_AM::add : ARM_AM::sub;
|
|
|
|
int Val;
|
|
|
|
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
|
|
|
|
ARM_AM::no_shift),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-08-06 04:35:44 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
|
|
|
|
Base = N;
|
|
|
|
return true;
|
|
|
|
}
|
2006-08-01 20:58:43 +08:00
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Base, SDValue &Offset,
|
|
|
|
SDValue &Opc) {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (N.getOpcode() == ISD::SUB) {
|
|
|
|
// X - C is canonicalize to X + -C, no need to handle it here.
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
2009-08-12 04:47:22 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (!CurDAG->isBaseWithConstantOffset(N)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N;
|
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2007-01-13 04:35:49 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// If the RHS is +/- imm8, fold into addr mode.
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
|
|
|
|
-256 + 1, 256, RHSC)) { // 8 bits.
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2011-01-19 23:12:16 +08:00
|
|
|
}
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
2007-01-24 10:45:25 +08:00
|
|
|
|
2011-01-19 23:12:16 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (RHSC < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
2011-02-14 06:25:43 +08:00
|
|
|
RHSC = -RHSC;
|
2007-01-13 04:35:49 +08:00
|
|
|
}
|
2011-01-19 23:12:16 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
|
|
|
|
return true;
|
2007-01-13 04:35:49 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
2009-08-12 04:47:22 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2007-01-13 04:35:49 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Offset, SDValue &Opc) {
|
2010-01-05 09:24:18 +08:00
|
|
|
unsigned Opcode = Op->getOpcode();
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
|
|
|
|
? cast<LoadSDNode>(Op)->getAddressingMode()
|
|
|
|
: cast<StoreSDNode>(Op)->getAddressingMode();
|
|
|
|
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
|
|
|
|
? ARM_AM::add : ARM_AM::sub;
|
2011-01-19 23:12:16 +08:00
|
|
|
int Val;
|
|
|
|
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
|
|
|
|
return true;
|
2007-01-13 04:35:49 +08:00
|
|
|
}
|
2006-08-01 20:58:43 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Offset = N;
|
2009-08-12 04:47:22 +08:00
|
|
|
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2006-08-26 01:55:16 +08:00
|
|
|
}
|
|
|
|
|
2010-10-22 03:38:40 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Base, SDValue &Offset) {
|
2011-02-14 06:25:43 +08:00
|
|
|
if (!CurDAG->isBaseWithConstantOffset(N)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N;
|
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-11-24 08:44:37 +08:00
|
|
|
} else if (N.getOpcode() == ARMISD::Wrapper &&
|
2013-12-02 18:35:41 +08:00
|
|
|
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N.getOperand(0);
|
2006-10-06 00:48:49 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2006-05-23 10:48:20 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// If the RHS is +/- imm8, fold into addr mode.
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
|
|
|
|
-256 + 1, 256, RHSC)) {
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2011-01-19 23:12:16 +08:00
|
|
|
}
|
2007-01-24 10:45:25 +08:00
|
|
|
|
2011-01-19 23:12:16 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (RHSC < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
2011-02-14 06:25:43 +08:00
|
|
|
RHSC = -RHSC;
|
2006-08-26 01:55:16 +08:00
|
|
|
}
|
2011-01-19 23:12:16 +08:00
|
|
|
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
|
|
|
|
MVT::i32);
|
|
|
|
return true;
|
2006-08-26 01:55:16 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N;
|
|
|
|
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2006-12-15 02:58:37 +08:00
|
|
|
}
|
|
|
|
|
2010-11-02 07:40:51 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
|
|
|
|
SDValue &Align) {
|
2009-07-02 07:16:05 +08:00
|
|
|
Addr = N;
|
2010-11-02 07:40:51 +08:00
|
|
|
|
|
|
|
unsigned Alignment = 0;
|
|
|
|
if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
|
|
|
|
// This case occurs only for VLD1-lane/dup and VST1-lane instructions.
|
|
|
|
// The maximum alignment is equal to the memory size being referenced.
|
|
|
|
unsigned LSNAlign = LSN->getAlignment();
|
|
|
|
unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
|
2011-10-28 06:39:16 +08:00
|
|
|
if (LSNAlign >= MemSize && MemSize > 1)
|
2010-11-02 07:40:51 +08:00
|
|
|
Alignment = MemSize;
|
|
|
|
} else {
|
|
|
|
// All other uses of addrmode6 are for intrinsics. For now just record
|
|
|
|
// the raw alignment value; it will be refined later based on the legal
|
|
|
|
// alignment operands for the intrinsic.
|
|
|
|
Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
|
|
|
|
}
|
|
|
|
|
|
|
|
Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
|
2009-07-02 07:16:05 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-02-25 14:42:42 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
|
|
|
|
SDValue &Offset) {
|
|
|
|
LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
|
|
|
|
ISD::MemIndexedMode AM = LdSt->getAddressingMode();
|
|
|
|
if (AM != ISD::POST_INC)
|
|
|
|
return false;
|
|
|
|
Offset = N;
|
|
|
|
if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
|
|
|
|
if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
|
|
|
|
Offset = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
|
2009-08-15 03:01:37 +08:00
|
|
|
SDValue &Offset, SDValue &Label) {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
|
|
|
|
Offset = N.getOperand(0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue N1 = N.getOperand(1);
|
2011-01-20 16:34:58 +08:00
|
|
|
Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
|
|
|
|
MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
return true;
|
2006-12-15 02:58:37 +08:00
|
|
|
}
|
2010-12-14 11:36:38 +08:00
|
|
|
|
2006-12-15 02:58:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Thumb Addressing Modes
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Base, SDValue &Offset){
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
|
2009-07-11 15:08:13 +08:00
|
|
|
ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
|
2010-06-18 22:22:04 +08:00
|
|
|
if (!NC || !NC->isNullValue())
|
2009-07-11 15:08:13 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
Base = Offset = N;
|
2007-01-24 06:59:13 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
|
|
|
return true;
|
2006-12-15 02:58:37 +08:00
|
|
|
}
|
|
|
|
|
2007-01-24 10:21:22 +08:00
|
|
|
bool
|
2010-12-14 11:36:38 +08:00
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
|
|
|
|
SDValue &Offset, unsigned Scale) {
|
2007-01-24 10:21:22 +08:00
|
|
|
if (Scale == 4) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue TmpBase, TmpOffImm;
|
2010-09-22 04:31:19 +08:00
|
|
|
if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
|
2007-01-24 10:21:22 +08:00
|
|
|
return false; // We want to select tLDRspi / tSTRspi instead.
|
2010-12-14 11:36:38 +08:00
|
|
|
|
2007-01-24 16:53:17 +08:00
|
|
|
if (N.getOpcode() == ARMISD::Wrapper &&
|
|
|
|
N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
|
|
|
|
return false; // We want to select tLDRpci instead.
|
2007-01-24 10:21:22 +08:00
|
|
|
}
|
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (!CurDAG->isBaseWithConstantOffset(N))
|
2010-12-15 09:03:19 +08:00
|
|
|
return false;
|
2006-08-22 06:00:32 +08:00
|
|
|
|
2007-02-06 08:22:06 +08:00
|
|
|
// Thumb does not have [sp, r] address mode.
|
|
|
|
RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
|
|
|
|
RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
|
|
|
|
if ((LHSR && LHSR->getReg() == ARM::SP) ||
|
2010-12-15 09:03:19 +08:00
|
|
|
(RHSR && RHSR->getReg() == ARM::SP))
|
|
|
|
return false;
|
2010-12-14 11:36:38 +08:00
|
|
|
|
2011-01-19 23:12:16 +08:00
|
|
|
// FIXME: Why do we explicitly check for a match here and then return false?
|
|
|
|
// Presumably to allow something else to match, but shouldn't this be
|
|
|
|
// documented?
|
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
|
|
|
|
return false;
|
2010-12-14 11:36:38 +08:00
|
|
|
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
Offset = N.getOperand(1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset) {
|
|
|
|
return SelectThumbAddrModeRI(N, Base, Offset, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset) {
|
|
|
|
return SelectThumbAddrModeRI(N, Base, Offset, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset) {
|
|
|
|
return SelectThumbAddrModeRI(N, Base, Offset, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
|
|
|
|
SDValue &Base, SDValue &OffImm) {
|
|
|
|
if (Scale == 4) {
|
|
|
|
SDValue TmpBase, TmpOffImm;
|
|
|
|
if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
|
|
|
|
return false; // We want to select tLDRspi / tSTRspi instead.
|
|
|
|
|
|
|
|
if (N.getOpcode() == ARMISD::Wrapper &&
|
|
|
|
N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
|
|
|
|
return false; // We want to select tLDRpci instead.
|
|
|
|
}
|
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (!CurDAG->isBaseWithConstantOffset(N)) {
|
2010-12-14 11:36:38 +08:00
|
|
|
if (N.getOpcode() == ARMISD::Wrapper &&
|
2013-12-02 18:35:41 +08:00
|
|
|
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
|
2010-12-14 11:36:38 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
} else {
|
|
|
|
Base = N;
|
|
|
|
}
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2007-02-06 08:22:06 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-12-15 09:03:19 +08:00
|
|
|
RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
|
|
|
|
RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
|
|
|
|
if ((LHSR && LHSR->getReg() == ARM::SP) ||
|
|
|
|
(RHSR && RHSR->getReg() == ARM::SP)) {
|
|
|
|
ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
|
|
|
|
ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
|
|
|
|
unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
|
|
|
|
unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
|
|
|
|
|
|
|
|
// Thumb does not have [sp, #imm5] address mode for non-zero imm5.
|
|
|
|
if (LHSC != 0 || RHSC != 0) return false;
|
|
|
|
|
|
|
|
Base = N;
|
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// If the RHS is + imm5 * scale, fold into addr mode.
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
|
|
|
return true;
|
2007-01-01 02:52:39 +08:00
|
|
|
}
|
2006-08-24 21:45:55 +08:00
|
|
|
|
2007-01-24 06:59:13 +08:00
|
|
|
Base = N.getOperand(0);
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2007-01-24 06:59:13 +08:00
|
|
|
return true;
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm) {
|
|
|
|
return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm) {
|
|
|
|
return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
|
2006-08-15 03:01:24 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
bool
|
|
|
|
ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm) {
|
|
|
|
return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
|
2006-08-15 03:01:24 +08:00
|
|
|
}
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
|
|
|
|
SDValue &Base, SDValue &OffImm) {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2006-09-13 20:09:43 +08:00
|
|
|
return true;
|
2006-09-12 01:25:40 +08:00
|
|
|
}
|
2007-01-24 10:21:22 +08:00
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (!CurDAG->isBaseWithConstantOffset(N))
|
2007-02-06 08:22:06 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
|
2007-02-06 17:11:20 +08:00
|
|
|
if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
|
|
|
|
(LHSR && LHSR->getReg() == ARM::SP)) {
|
2007-01-24 10:21:22 +08:00
|
|
|
// If the RHS is + imm8 * scale, fold into addr mode.
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2007-01-24 10:21:22 +08:00
|
|
|
}
|
2011-01-19 23:12:16 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
|
|
|
return true;
|
2007-01-24 10:21:22 +08:00
|
|
|
}
|
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
return false;
|
2006-09-12 01:25:40 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 11:36:38 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Thumb 2 Addressing Modes
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
|
2009-06-27 10:26:13 +08:00
|
|
|
SDValue &Opc) {
|
2010-07-31 07:33:54 +08:00
|
|
|
if (DisableShifterOp)
|
|
|
|
return false;
|
|
|
|
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
|
2009-06-27 10:26:13 +08:00
|
|
|
|
|
|
|
// Don't match base register only case. That is matched to a separate
|
|
|
|
// lower complexity pattern with explicit register operand.
|
|
|
|
if (ShOpcVal == ARM_AM::no_shift) return false;
|
|
|
|
|
|
|
|
BaseReg = N.getOperand(0);
|
|
|
|
unsigned ShImmVal = 0;
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
ShImmVal = RHS->getZExtValue() & 31;
|
|
|
|
Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &Base, SDValue &OffImm) {
|
|
|
|
// Match simple R + imm12 operands.
|
2009-07-20 23:55:39 +08:00
|
|
|
|
2009-08-11 16:52:18 +08:00
|
|
|
// Base only.
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
|
|
|
|
!CurDAG->isBaseWithConstantOffset(N)) {
|
2009-07-20 23:55:39 +08:00
|
|
|
if (N.getOpcode() == ISD::FrameIndex) {
|
2011-02-14 06:25:43 +08:00
|
|
|
// Match frame index.
|
2009-07-20 23:55:39 +08:00
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2009-07-20 23:55:39 +08:00
|
|
|
return true;
|
2011-02-14 06:25:43 +08:00
|
|
|
}
|
2011-03-19 03:46:58 +08:00
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() == ARMISD::Wrapper &&
|
2013-12-02 18:35:41 +08:00
|
|
|
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
|
2009-08-11 16:52:18 +08:00
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::TargetConstantPool)
|
|
|
|
return false; // We want to select t2LDRpci instead.
|
|
|
|
} else
|
|
|
|
Base = N;
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2009-08-11 16:52:18 +08:00
|
|
|
return true;
|
2009-07-20 23:55:39 +08:00
|
|
|
}
|
2009-06-29 15:51:04 +08:00
|
|
|
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
2010-09-22 04:31:19 +08:00
|
|
|
if (SelectT2AddrModeImm8(N, Base, OffImm))
|
2009-08-11 16:52:18 +08:00
|
|
|
// Let t2LDRi8 handle (R - imm8).
|
|
|
|
return false;
|
|
|
|
|
2009-06-29 15:51:04 +08:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2009-07-31 02:56:48 +08:00
|
|
|
if (N.getOpcode() == ISD::SUB)
|
|
|
|
RHSC = -RHSC;
|
|
|
|
|
|
|
|
if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
|
2009-06-29 15:51:04 +08:00
|
|
|
Base = N.getOperand(0);
|
2009-07-31 02:56:48 +08:00
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-07-31 02:56:48 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
2009-06-29 15:51:04 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-11 16:52:18 +08:00
|
|
|
// Base only.
|
|
|
|
Base = N;
|
2009-08-12 04:47:22 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
2009-08-11 16:52:18 +08:00
|
|
|
return true;
|
2009-06-29 15:51:04 +08:00
|
|
|
}
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &Base, SDValue &OffImm) {
|
2009-07-31 02:56:48 +08:00
|
|
|
// Match simple R - imm8 operands.
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
|
|
|
|
!CurDAG->isBaseWithConstantOffset(N))
|
|
|
|
return false;
|
2011-03-19 03:46:58 +08:00
|
|
|
|
2011-02-14 06:25:43 +08:00
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
int RHSC = (int)RHS->getSExtValue();
|
|
|
|
if (N.getOpcode() == ISD::SUB)
|
|
|
|
RHSC = -RHSC;
|
|
|
|
|
|
|
|
if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
Base = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-06-29 15:51:04 +08:00
|
|
|
}
|
2011-02-14 06:25:43 +08:00
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
|
|
|
return true;
|
2009-06-29 15:51:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
|
2009-07-02 15:28:31 +08:00
|
|
|
SDValue &OffImm){
|
2010-01-05 09:24:18 +08:00
|
|
|
unsigned Opcode = Op->getOpcode();
|
2009-07-02 15:28:31 +08:00
|
|
|
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
|
|
|
|
? cast<LoadSDNode>(Op)->getAddressingMode()
|
|
|
|
: cast<StoreSDNode>(Op)->getAddressingMode();
|
2011-01-19 23:12:16 +08:00
|
|
|
int RHSC;
|
|
|
|
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
|
|
|
|
OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
|
|
|
|
? CurDAG->getTargetConstant(RHSC, MVT::i32)
|
|
|
|
: CurDAG->getTargetConstant(-RHSC, MVT::i32);
|
|
|
|
return true;
|
2009-07-02 15:28:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-22 04:31:19 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
|
2009-06-29 15:51:04 +08:00
|
|
|
SDValue &Base,
|
|
|
|
SDValue &OffReg, SDValue &ShImm) {
|
2009-08-11 16:52:18 +08:00
|
|
|
// (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
|
2011-02-14 06:25:43 +08:00
|
|
|
if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
|
2009-08-11 16:52:18 +08:00
|
|
|
return false;
|
2009-06-29 15:51:04 +08:00
|
|
|
|
2009-08-11 16:52:18 +08:00
|
|
|
// Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
|
|
|
|
return false;
|
|
|
|
else if (RHSC < 0 && RHSC >= -255) // 8 bits
|
2009-07-31 02:56:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-06-29 15:51:04 +08:00
|
|
|
// Look for (R + R) or (R + (R << [1,2,3])).
|
|
|
|
unsigned ShAmt = 0;
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
OffReg = N.getOperand(1);
|
|
|
|
|
|
|
|
// Swap if it is ((R << c) + R).
|
2011-07-21 07:34:39 +08:00
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
|
2009-06-29 15:51:04 +08:00
|
|
|
if (ShOpcVal != ARM_AM::lsl) {
|
2011-07-21 07:34:39 +08:00
|
|
|
ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
|
2009-06-29 15:51:04 +08:00
|
|
|
if (ShOpcVal == ARM_AM::lsl)
|
|
|
|
std::swap(Base, OffReg);
|
2009-08-11 23:33:49 +08:00
|
|
|
}
|
|
|
|
|
2009-06-29 15:51:04 +08:00
|
|
|
if (ShOpcVal == ARM_AM::lsl) {
|
|
|
|
// Check to see if the RHS of the shift is a constant, if not, we can't fold
|
|
|
|
// it.
|
|
|
|
if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
|
|
|
|
ShAmt = Sh->getZExtValue();
|
2010-10-28 07:41:30 +08:00
|
|
|
if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
|
|
|
|
OffReg = OffReg.getOperand(0);
|
|
|
|
else {
|
2009-06-29 15:51:04 +08:00
|
|
|
ShAmt = 0;
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
2010-10-28 07:41:30 +08:00
|
|
|
}
|
2009-06-29 15:51:04 +08:00
|
|
|
} else {
|
|
|
|
ShOpcVal = ARM_AM::no_shift;
|
|
|
|
}
|
2009-07-15 23:50:19 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
|
2009-06-29 15:51:04 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-16 17:46:55 +08:00
|
|
|
bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
|
|
|
|
SDValue &OffImm) {
|
|
|
|
// This *must* succeed since it's used for the irreplacable ldrex and strex
|
|
|
|
// instructions.
|
|
|
|
Base = N;
|
|
|
|
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
|
|
|
|
|
|
|
|
if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
|
|
|
|
if (!RHS)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
uint32_t RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (RHSC > 1020 || RHSC % 4 != 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
Base = N.getOperand(0);
|
|
|
|
if (Base.getOpcode() == ISD::FrameIndex) {
|
|
|
|
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
|
|
|
|
Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy());
|
|
|
|
}
|
|
|
|
|
|
|
|
OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-06-29 15:51:04 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2007-07-05 15:15:27 +08:00
|
|
|
/// getAL - Returns a ARMCC::AL immediate node.
|
2008-07-28 05:46:04 +08:00
|
|
|
static inline SDValue getAL(SelectionDAG *CurDAG) {
|
2009-08-12 04:47:22 +08:00
|
|
|
return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
|
2007-05-15 09:29:07 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
|
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(N);
|
2009-07-02 09:23:32 +08:00
|
|
|
ISD::MemIndexedMode AM = LD->getAddressingMode();
|
|
|
|
if (AM == ISD::UNINDEXED)
|
|
|
|
return NULL;
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT LoadedVT = LD->getMemoryVT();
|
2009-07-02 09:23:32 +08:00
|
|
|
SDValue Offset, AMOpc;
|
|
|
|
bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
|
|
|
|
unsigned Opcode = 0;
|
|
|
|
bool Match = false;
|
2011-08-30 04:16:50 +08:00
|
|
|
if (LoadedVT == MVT::i32 && isPre &&
|
|
|
|
SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
|
|
|
|
Opcode = ARM::LDR_PRE_IMM;
|
|
|
|
Match = true;
|
|
|
|
} else if (LoadedVT == MVT::i32 && !isPre &&
|
2011-07-27 04:54:26 +08:00
|
|
|
SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
|
2011-08-30 04:16:50 +08:00
|
|
|
Opcode = ARM::LDR_POST_IMM;
|
2011-07-27 04:54:26 +08:00
|
|
|
Match = true;
|
|
|
|
} else if (LoadedVT == MVT::i32 &&
|
|
|
|
SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
|
2011-08-27 04:43:14 +08:00
|
|
|
Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
|
2009-07-02 09:23:32 +08:00
|
|
|
Match = true;
|
2011-07-27 04:54:26 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
} else if (LoadedVT == MVT::i16 &&
|
2010-01-05 09:24:18 +08:00
|
|
|
SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
|
2009-07-02 09:23:32 +08:00
|
|
|
Match = true;
|
|
|
|
Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
|
|
|
|
? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
|
|
|
|
: (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
|
2009-08-12 04:47:22 +08:00
|
|
|
} else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
|
2009-07-02 09:23:32 +08:00
|
|
|
if (LD->getExtensionType() == ISD::SEXTLOAD) {
|
2010-01-05 09:24:18 +08:00
|
|
|
if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
|
2009-07-02 09:23:32 +08:00
|
|
|
Match = true;
|
|
|
|
Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
|
|
|
|
}
|
|
|
|
} else {
|
2011-08-30 04:16:50 +08:00
|
|
|
if (isPre &&
|
|
|
|
SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
|
|
|
|
Match = true;
|
|
|
|
Opcode = ARM::LDRB_PRE_IMM;
|
|
|
|
} else if (!isPre &&
|
|
|
|
SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
|
2011-07-27 04:54:26 +08:00
|
|
|
Match = true;
|
2011-08-30 04:16:50 +08:00
|
|
|
Opcode = ARM::LDRB_POST_IMM;
|
2011-07-27 04:54:26 +08:00
|
|
|
} else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
|
2009-07-02 09:23:32 +08:00
|
|
|
Match = true;
|
2011-08-27 04:43:14 +08:00
|
|
|
Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
|
2009-07-02 09:23:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Match) {
|
2011-08-27 05:12:37 +08:00
|
|
|
if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
|
|
|
|
SDValue Chain = LD->getChain();
|
|
|
|
SDValue Base = LD->getBasePtr();
|
|
|
|
SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32), Chain };
|
2013-05-25 10:42:55 +08:00
|
|
|
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
|
2013-04-20 06:22:57 +08:00
|
|
|
MVT::i32, MVT::Other, Ops);
|
2011-08-27 05:12:37 +08:00
|
|
|
} else {
|
|
|
|
SDValue Chain = LD->getChain();
|
|
|
|
SDValue Base = LD->getBasePtr();
|
|
|
|
SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32), Chain };
|
2013-05-25 10:42:55 +08:00
|
|
|
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
|
2013-04-20 06:22:57 +08:00
|
|
|
MVT::i32, MVT::Other, Ops);
|
2011-08-27 05:12:37 +08:00
|
|
|
}
|
2009-07-02 09:23:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
|
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(N);
|
2009-07-02 15:28:31 +08:00
|
|
|
ISD::MemIndexedMode AM = LD->getAddressingMode();
|
|
|
|
if (AM == ISD::UNINDEXED)
|
|
|
|
return NULL;
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT LoadedVT = LD->getMemoryVT();
|
2009-07-03 07:16:11 +08:00
|
|
|
bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
|
2009-07-02 15:28:31 +08:00
|
|
|
SDValue Offset;
|
|
|
|
bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
|
|
|
|
unsigned Opcode = 0;
|
|
|
|
bool Match = false;
|
2010-01-05 09:24:18 +08:00
|
|
|
if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (LoadedVT.getSimpleVT().SimpleTy) {
|
|
|
|
case MVT::i32:
|
2009-07-02 15:28:31 +08:00
|
|
|
Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2009-07-03 07:16:11 +08:00
|
|
|
if (isSExtLd)
|
|
|
|
Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
|
|
|
|
else
|
|
|
|
Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
|
2009-07-02 15:28:31 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i1:
|
2009-07-03 07:16:11 +08:00
|
|
|
if (isSExtLd)
|
|
|
|
Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
|
|
|
|
else
|
|
|
|
Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
|
2009-07-02 15:28:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
Match = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Match) {
|
|
|
|
SDValue Chain = LD->getChain();
|
|
|
|
SDValue Base = LD->getBasePtr();
|
|
|
|
SDValue Ops[]= { Base, Offset, getAL(CurDAG),
|
2009-08-12 04:47:22 +08:00
|
|
|
CurDAG->getRegister(0, MVT::i32), Chain };
|
2013-05-25 10:42:55 +08:00
|
|
|
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
|
2013-04-20 06:22:57 +08:00
|
|
|
MVT::Other, Ops);
|
2009-07-02 15:28:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-11-17 05:55:34 +08:00
|
|
|
/// \brief Form a GPRPair pseudo register from a pair of GPR regs.
|
|
|
|
SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2012-11-17 05:55:34 +08:00
|
|
|
SDValue RegClass =
|
|
|
|
CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
|
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
|
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2012-11-17 05:55:34 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form a D register from a pair of S registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass =
|
|
|
|
CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
|
2010-06-04 08:04:02 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2010-06-04 08:04:02 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form a quad register from a pair of D registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
|
2010-05-25 00:54:32 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2009-10-07 06:01:59 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form 4 consecutive D registers from a pair of Q registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
|
2010-05-25 00:54:32 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2010-05-11 01:34:18 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form 4 consecutive S registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
|
2010-06-04 08:04:02 +08:00
|
|
|
SDValue V2, SDValue V3) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass =
|
|
|
|
CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
|
2010-06-04 08:04:02 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
|
|
|
|
SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
|
|
|
|
SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
|
|
|
|
V2, SubReg2, V3, SubReg3 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2010-06-04 08:04:02 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form 4 consecutive D registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
|
2010-05-11 01:34:18 +08:00
|
|
|
SDValue V2, SDValue V3) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
|
2010-05-25 00:54:32 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
|
|
|
|
SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
|
|
|
|
SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
|
|
|
|
V2, SubReg2, V3, SubReg3 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2010-05-11 01:34:18 +08:00
|
|
|
}
|
|
|
|
|
2012-11-17 08:23:35 +08:00
|
|
|
/// \brief Form 4 consecutive Q registers.
|
|
|
|
SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
|
2010-05-16 11:27:48 +08:00
|
|
|
SDValue V2, SDValue V3) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(V0.getNode());
|
2011-06-17 02:17:13 +08:00
|
|
|
SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
|
2010-05-25 00:54:32 +08:00
|
|
|
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
|
|
|
|
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
|
|
|
|
SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
|
|
|
|
SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
|
2011-06-17 02:17:13 +08:00
|
|
|
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
|
|
|
|
V2, SubReg2, V3, SubReg3 };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
|
2010-05-16 11:27:48 +08:00
|
|
|
}
|
|
|
|
|
2010-09-24 07:42:37 +08:00
|
|
|
/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
|
|
|
|
/// of a NEON VLD or VST instruction. The supported values depend on the
|
|
|
|
/// number of registers being loaded.
|
2010-11-02 07:40:51 +08:00
|
|
|
SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
|
|
|
|
bool is64BitVector) {
|
2010-09-24 07:42:37 +08:00
|
|
|
unsigned NumRegs = NumVecs;
|
|
|
|
if (!is64BitVector && NumVecs < 3)
|
|
|
|
NumRegs *= 2;
|
|
|
|
|
2010-11-02 07:40:51 +08:00
|
|
|
unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
|
2010-09-24 07:42:37 +08:00
|
|
|
if (Alignment >= 32 && NumRegs == 4)
|
2010-11-02 07:40:51 +08:00
|
|
|
Alignment = 32;
|
|
|
|
else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
|
|
|
|
Alignment = 16;
|
|
|
|
else if (Alignment >= 8)
|
|
|
|
Alignment = 8;
|
|
|
|
else
|
|
|
|
Alignment = 0;
|
|
|
|
|
|
|
|
return CurDAG->getTargetConstant(Alignment, MVT::i32);
|
2010-09-24 07:42:37 +08:00
|
|
|
}
|
|
|
|
|
2011-10-25 05:45:13 +08:00
|
|
|
// Get the register stride update opcode of a VLD/VST instruction that
|
|
|
|
// is otherwise equivalent to the given fixed stride updating instruction.
|
|
|
|
static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
|
|
|
|
switch (Opc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
|
|
|
|
case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
|
|
|
|
case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
|
|
|
|
case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
|
|
|
|
case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
|
|
|
|
case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
|
|
|
|
case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
|
|
|
|
case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
|
2011-11-01 05:50:31 +08:00
|
|
|
|
|
|
|
case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
|
|
|
|
case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
|
|
|
|
case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
|
|
|
|
case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
|
|
|
|
case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
|
|
|
|
case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
|
|
|
|
case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
|
|
|
|
case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
|
2011-11-30 06:38:04 +08:00
|
|
|
case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
|
2011-11-30 06:58:48 +08:00
|
|
|
case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
|
2011-12-10 05:28:25 +08:00
|
|
|
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
|
|
|
|
case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
|
|
|
|
case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
|
|
|
|
case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
|
|
|
|
case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
|
|
|
|
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
|
|
|
|
case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
|
|
|
|
case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
|
2011-12-15 05:32:11 +08:00
|
|
|
case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
|
|
|
|
case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
|
|
|
|
case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
|
2011-12-22 03:40:55 +08:00
|
|
|
|
2012-03-07 06:01:44 +08:00
|
|
|
case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
|
|
|
|
case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
|
|
|
|
case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
|
2011-10-25 05:45:13 +08:00
|
|
|
}
|
|
|
|
return Opc; // If not one we handle, return it unchanged.
|
|
|
|
}
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes,
|
|
|
|
const uint16_t *QOpcodes0,
|
|
|
|
const uint16_t *QOpcodes1) {
|
2010-03-23 13:25:43 +08:00
|
|
|
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2009-10-15 01:28:52 +08:00
|
|
|
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue MemAddr, Align;
|
2011-02-08 01:43:21 +08:00
|
|
|
unsigned AddrOpIdx = isUpdating ? 1 : 2;
|
|
|
|
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
|
2009-10-15 01:28:52 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
bool is64BitVector = VT.is64BitVector();
|
2010-11-02 07:40:51 +08:00
|
|
|
Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
|
2010-09-24 05:43:54 +08:00
|
|
|
|
2009-10-15 01:28:52 +08:00
|
|
|
unsigned OpcodeIndex;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: llvm_unreachable("unhandled vld type");
|
|
|
|
// Double-register operations:
|
|
|
|
case MVT::v8i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v4i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v2f32:
|
|
|
|
case MVT::v2i32: OpcodeIndex = 2; break;
|
|
|
|
case MVT::v1i64: OpcodeIndex = 3; break;
|
|
|
|
// Quad-register operations:
|
|
|
|
case MVT::v16i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v8i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: OpcodeIndex = 2; break;
|
2010-03-23 13:25:43 +08:00
|
|
|
case MVT::v2i64: OpcodeIndex = 3;
|
2010-03-23 14:20:33 +08:00
|
|
|
assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
|
2010-03-23 13:25:43 +08:00
|
|
|
break;
|
2009-10-15 01:28:52 +08:00
|
|
|
}
|
|
|
|
|
2010-09-04 02:16:02 +08:00
|
|
|
EVT ResTy;
|
|
|
|
if (NumVecs == 1)
|
|
|
|
ResTy = VT;
|
|
|
|
else {
|
|
|
|
unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
|
|
|
|
if (!is64BitVector)
|
|
|
|
ResTyElts *= 2;
|
|
|
|
ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
|
|
|
|
}
|
2011-02-08 01:43:21 +08:00
|
|
|
std::vector<EVT> ResTys;
|
|
|
|
ResTys.push_back(ResTy);
|
|
|
|
if (isUpdating)
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
ResTys.push_back(MVT::Other);
|
2010-09-04 02:16:02 +08:00
|
|
|
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *VLd;
|
|
|
|
SmallVector<SDValue, 7> Ops;
|
2010-09-03 00:00:54 +08:00
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
// Double registers and VLD1/VLD2 quad registers are directly supported.
|
|
|
|
if (is64BitVector || NumVecs <= 2) {
|
|
|
|
unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
|
|
|
|
QOpcodes0[OpcodeIndex]);
|
|
|
|
Ops.push_back(MemAddr);
|
|
|
|
Ops.push_back(Align);
|
|
|
|
if (isUpdating) {
|
|
|
|
SDValue Inc = N->getOperand(AddrOpIdx + 1);
|
2011-12-10 05:28:25 +08:00
|
|
|
// FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
|
2011-10-25 05:45:13 +08:00
|
|
|
// case entirely when the rest are updated to that form, too.
|
2011-12-10 05:28:25 +08:00
|
|
|
if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
|
2011-10-25 05:45:13 +08:00
|
|
|
Opc = getVLDSTRegisterUpdateOpcode(Opc);
|
2011-12-10 05:28:25 +08:00
|
|
|
// We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
|
2011-11-01 05:50:31 +08:00
|
|
|
// check for that explicitly too. Horribly hacky, but temporary.
|
2012-03-06 03:33:30 +08:00
|
|
|
if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
|
2011-11-01 05:50:31 +08:00
|
|
|
!isa<ConstantSDNode>(Inc.getNode()))
|
2011-10-25 05:45:13 +08:00
|
|
|
Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
|
2011-02-08 01:43:21 +08:00
|
|
|
}
|
|
|
|
Ops.push_back(Pred);
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
Ops.push_back(Chain);
|
2013-04-20 06:22:57 +08:00
|
|
|
VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
2010-09-03 00:00:54 +08:00
|
|
|
|
2009-10-15 01:28:52 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise, quad registers are loaded with two separate instructions,
|
|
|
|
// where one loads the even registers and the other loads the odd registers.
|
2010-09-04 02:16:02 +08:00
|
|
|
EVT AddrTy = MemAddr.getValueType();
|
2009-10-15 01:28:52 +08:00
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
// Load the even subregs. This is always an updating load, so that it
|
|
|
|
// provides the address to the second load for the odd subregs.
|
2010-09-04 02:16:02 +08:00
|
|
|
SDValue ImplDef =
|
|
|
|
SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
|
|
|
|
const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
|
2011-02-08 01:43:15 +08:00
|
|
|
SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
|
2013-04-20 06:22:57 +08:00
|
|
|
ResTy, AddrTy, MVT::Other, OpsA);
|
2010-09-04 02:16:02 +08:00
|
|
|
Chain = SDValue(VLdA, 2);
|
2009-10-15 01:28:52 +08:00
|
|
|
|
2009-10-15 02:32:29 +08:00
|
|
|
// Load the odd subregs.
|
2011-02-08 01:43:21 +08:00
|
|
|
Ops.push_back(SDValue(VLdA, 1));
|
|
|
|
Ops.push_back(Align);
|
|
|
|
if (isUpdating) {
|
|
|
|
SDValue Inc = N->getOperand(AddrOpIdx + 1);
|
|
|
|
assert(isa<ConstantSDNode>(Inc.getNode()) &&
|
|
|
|
"only constant post-increment update allowed for VLD3/4");
|
|
|
|
(void)Inc;
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
}
|
|
|
|
Ops.push_back(SDValue(VLdA, 0));
|
|
|
|
Ops.push_back(Pred);
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
Ops.push_back(Chain);
|
2013-04-20 06:22:57 +08:00
|
|
|
VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
|
2011-02-08 01:43:21 +08:00
|
|
|
}
|
|
|
|
|
2011-04-19 08:04:03 +08:00
|
|
|
// Transfer memoperands.
|
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
if (NumVecs == 1)
|
|
|
|
return VLd;
|
|
|
|
|
|
|
|
// Extract out the subregisters.
|
|
|
|
SDValue SuperReg = SDValue(VLd, 0);
|
|
|
|
assert(ARM::dsub_7 == ARM::dsub_0+7 &&
|
|
|
|
ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
|
|
|
|
unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
|
|
|
|
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
|
|
|
|
ReplaceUses(SDValue(N, Vec),
|
|
|
|
CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
|
|
|
|
ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
|
|
|
|
if (isUpdating)
|
|
|
|
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
|
2009-10-15 01:28:52 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes,
|
|
|
|
const uint16_t *QOpcodes0,
|
|
|
|
const uint16_t *QOpcodes1) {
|
2010-07-07 07:36:25 +08:00
|
|
|
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2009-10-15 02:32:29 +08:00
|
|
|
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue MemAddr, Align;
|
2011-02-08 01:43:21 +08:00
|
|
|
unsigned AddrOpIdx = isUpdating ? 1 : 2;
|
|
|
|
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
|
|
|
|
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
|
2009-10-15 02:32:29 +08:00
|
|
|
return NULL;
|
|
|
|
|
2011-04-19 08:04:03 +08:00
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
|
2009-10-15 02:32:29 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
2011-02-08 01:43:21 +08:00
|
|
|
EVT VT = N->getOperand(Vec0Idx).getValueType();
|
2009-10-15 02:32:29 +08:00
|
|
|
bool is64BitVector = VT.is64BitVector();
|
2010-11-02 07:40:51 +08:00
|
|
|
Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
|
2010-09-24 07:42:37 +08:00
|
|
|
|
2009-10-15 02:32:29 +08:00
|
|
|
unsigned OpcodeIndex;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: llvm_unreachable("unhandled vst type");
|
|
|
|
// Double-register operations:
|
|
|
|
case MVT::v8i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v4i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v2f32:
|
|
|
|
case MVT::v2i32: OpcodeIndex = 2; break;
|
|
|
|
case MVT::v1i64: OpcodeIndex = 3; break;
|
|
|
|
// Quad-register operations:
|
|
|
|
case MVT::v16i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v8i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: OpcodeIndex = 2; break;
|
2010-03-23 14:20:33 +08:00
|
|
|
case MVT::v2i64: OpcodeIndex = 3;
|
|
|
|
assert(NumVecs == 1 && "v2i64 type only supported for VST1");
|
|
|
|
break;
|
2009-10-15 02:32:29 +08:00
|
|
|
}
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
std::vector<EVT> ResTys;
|
|
|
|
if (isUpdating)
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
ResTys.push_back(MVT::Other);
|
|
|
|
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2011-02-08 01:43:21 +08:00
|
|
|
SmallVector<SDValue, 7> Ops;
|
2009-11-21 14:21:52 +08:00
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
// Double registers and VST1/VST2 quad registers are directly supported.
|
|
|
|
if (is64BitVector || NumVecs <= 2) {
|
2011-02-08 01:43:15 +08:00
|
|
|
SDValue SrcReg;
|
2010-08-28 13:12:57 +08:00
|
|
|
if (NumVecs == 1) {
|
2011-02-08 01:43:21 +08:00
|
|
|
SrcReg = N->getOperand(Vec0Idx);
|
|
|
|
} else if (is64BitVector) {
|
2010-05-11 09:19:40 +08:00
|
|
|
// Form a REG_SEQUENCE to force register allocation.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue V0 = N->getOperand(Vec0Idx + 0);
|
|
|
|
SDValue V1 = N->getOperand(Vec0Idx + 1);
|
2010-05-11 09:19:40 +08:00
|
|
|
if (NumVecs == 2)
|
2012-11-17 08:23:35 +08:00
|
|
|
SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
|
2010-05-11 09:19:40 +08:00
|
|
|
else {
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue V2 = N->getOperand(Vec0Idx + 2);
|
2011-02-08 01:43:15 +08:00
|
|
|
// If it's a vst3, form a quad D-register and leave the last part as
|
2010-05-11 09:19:40 +08:00
|
|
|
// an undef.
|
|
|
|
SDValue V3 = (NumVecs == 3)
|
|
|
|
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
|
2011-02-08 01:43:21 +08:00
|
|
|
: N->getOperand(Vec0Idx + 3);
|
2012-11-17 08:23:35 +08:00
|
|
|
SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
|
2010-05-11 09:19:40 +08:00
|
|
|
}
|
2010-08-28 13:12:57 +08:00
|
|
|
} else {
|
|
|
|
// Form a QQ register.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue Q0 = N->getOperand(Vec0Idx);
|
|
|
|
SDValue Q1 = N->getOperand(Vec0Idx + 1);
|
2012-11-17 08:23:35 +08:00
|
|
|
SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
|
2009-10-15 02:32:29 +08:00
|
|
|
}
|
2011-02-08 01:43:21 +08:00
|
|
|
|
|
|
|
unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
|
|
|
|
QOpcodes0[OpcodeIndex]);
|
|
|
|
Ops.push_back(MemAddr);
|
|
|
|
Ops.push_back(Align);
|
|
|
|
if (isUpdating) {
|
|
|
|
SDValue Inc = N->getOperand(AddrOpIdx + 1);
|
2011-12-15 05:32:11 +08:00
|
|
|
// FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
|
2011-11-01 05:50:31 +08:00
|
|
|
// case entirely when the rest are updated to that form, too.
|
2011-12-15 05:32:11 +08:00
|
|
|
if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
|
2011-11-01 05:50:31 +08:00
|
|
|
Opc = getVLDSTRegisterUpdateOpcode(Opc);
|
|
|
|
// We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
|
|
|
|
// check for that explicitly too. Horribly hacky, but temporary.
|
2012-03-06 03:33:30 +08:00
|
|
|
if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
|
2011-11-01 05:50:31 +08:00
|
|
|
!isa<ConstantSDNode>(Inc.getNode()))
|
|
|
|
Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
|
2011-02-08 01:43:21 +08:00
|
|
|
}
|
|
|
|
Ops.push_back(SrcReg);
|
|
|
|
Ops.push_back(Pred);
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
Ops.push_back(Chain);
|
2013-04-20 06:22:57 +08:00
|
|
|
SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
2011-04-19 08:04:03 +08:00
|
|
|
|
|
|
|
// Transfer memoperands.
|
|
|
|
cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
|
|
|
|
|
|
|
|
return VSt;
|
2009-10-15 02:32:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, quad registers are stored with two separate instructions,
|
|
|
|
// where one stores the even registers and the other stores the odd registers.
|
2010-05-15 15:53:37 +08:00
|
|
|
|
2010-06-17 05:34:01 +08:00
|
|
|
// Form the QQQQ REG_SEQUENCE.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue V0 = N->getOperand(Vec0Idx + 0);
|
|
|
|
SDValue V1 = N->getOperand(Vec0Idx + 1);
|
|
|
|
SDValue V2 = N->getOperand(Vec0Idx + 2);
|
2010-08-28 13:12:57 +08:00
|
|
|
SDValue V3 = (NumVecs == 3)
|
|
|
|
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
|
2011-02-08 01:43:21 +08:00
|
|
|
: N->getOperand(Vec0Idx + 3);
|
2012-11-17 08:23:35 +08:00
|
|
|
SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
|
2010-06-17 05:34:01 +08:00
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
// Store the even D registers. This is always an updating store, so that it
|
|
|
|
// provides the address to the second store for the odd subregs.
|
2011-02-08 01:43:15 +08:00
|
|
|
const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
|
|
|
|
SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
|
|
|
|
MemAddr.getValueType(),
|
2013-04-20 06:22:57 +08:00
|
|
|
MVT::Other, OpsA);
|
2011-04-19 08:04:03 +08:00
|
|
|
cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
|
2010-06-17 05:34:01 +08:00
|
|
|
Chain = SDValue(VStA, 1);
|
|
|
|
|
|
|
|
// Store the odd D registers.
|
2011-02-08 01:43:21 +08:00
|
|
|
Ops.push_back(SDValue(VStA, 0));
|
|
|
|
Ops.push_back(Align);
|
|
|
|
if (isUpdating) {
|
|
|
|
SDValue Inc = N->getOperand(AddrOpIdx + 1);
|
|
|
|
assert(isa<ConstantSDNode>(Inc.getNode()) &&
|
|
|
|
"only constant post-increment update allowed for VST3/4");
|
|
|
|
(void)Inc;
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
}
|
|
|
|
Ops.push_back(RegSeq);
|
|
|
|
Ops.push_back(Pred);
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
Ops.push_back(Chain);
|
2011-04-19 08:04:03 +08:00
|
|
|
SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
|
2013-04-20 06:22:57 +08:00
|
|
|
Ops);
|
2011-04-19 08:04:03 +08:00
|
|
|
cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
|
|
|
|
return VStB;
|
2009-10-15 02:32:29 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
|
2011-02-08 01:43:21 +08:00
|
|
|
bool isUpdating, unsigned NumVecs,
|
2012-05-24 13:17:00 +08:00
|
|
|
const uint16_t *DOpcodes,
|
|
|
|
const uint16_t *QOpcodes) {
|
2009-10-15 00:46:45 +08:00
|
|
|
assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2009-10-15 00:19:03 +08:00
|
|
|
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue MemAddr, Align;
|
2011-02-08 01:43:21 +08:00
|
|
|
unsigned AddrOpIdx = isUpdating ? 1 : 2;
|
|
|
|
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
|
|
|
|
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
|
2009-10-15 00:19:03 +08:00
|
|
|
return NULL;
|
|
|
|
|
2011-04-19 08:04:03 +08:00
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
|
2009-10-15 00:19:03 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
unsigned Lane =
|
2011-02-08 01:43:21 +08:00
|
|
|
cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
|
|
|
|
EVT VT = N->getOperand(Vec0Idx).getValueType();
|
2009-10-15 00:19:03 +08:00
|
|
|
bool is64BitVector = VT.is64BitVector();
|
|
|
|
|
2010-11-02 07:40:51 +08:00
|
|
|
unsigned Alignment = 0;
|
2010-10-19 08:16:32 +08:00
|
|
|
if (NumVecs != 3) {
|
2010-11-02 07:40:51 +08:00
|
|
|
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
|
2010-10-19 08:16:32 +08:00
|
|
|
unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
|
|
|
|
if (Alignment > NumBytes)
|
|
|
|
Alignment = NumBytes;
|
2010-12-11 03:37:42 +08:00
|
|
|
if (Alignment < 8 && Alignment < NumBytes)
|
|
|
|
Alignment = 0;
|
2010-10-19 08:16:32 +08:00
|
|
|
// Alignment must be a power of two; make sure of that.
|
|
|
|
Alignment = (Alignment & -Alignment);
|
2010-11-02 07:40:51 +08:00
|
|
|
if (Alignment == 1)
|
|
|
|
Alignment = 0;
|
2010-10-19 08:16:32 +08:00
|
|
|
}
|
2010-11-02 07:40:51 +08:00
|
|
|
Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
|
2010-10-19 08:16:32 +08:00
|
|
|
|
2009-10-15 00:19:03 +08:00
|
|
|
unsigned OpcodeIndex;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2009-10-15 00:46:45 +08:00
|
|
|
default: llvm_unreachable("unhandled vld/vst lane type");
|
2009-10-15 00:19:03 +08:00
|
|
|
// Double-register operations:
|
|
|
|
case MVT::v8i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v4i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v2f32:
|
|
|
|
case MVT::v2i32: OpcodeIndex = 2; break;
|
|
|
|
// Quad-register operations:
|
|
|
|
case MVT::v8i16: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: OpcodeIndex = 1; break;
|
|
|
|
}
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
std::vector<EVT> ResTys;
|
|
|
|
if (IsLoad) {
|
|
|
|
unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
|
|
|
|
if (!is64BitVector)
|
|
|
|
ResTyElts *= 2;
|
|
|
|
ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
|
|
|
|
MVT::i64, ResTyElts));
|
|
|
|
}
|
|
|
|
if (isUpdating)
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
ResTys.push_back(MVT::Other);
|
|
|
|
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2010-03-21 06:13:40 +08:00
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2009-11-21 14:21:52 +08:00
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2009-10-15 00:19:03 +08:00
|
|
|
Ops.push_back(MemAddr);
|
2009-11-08 05:25:39 +08:00
|
|
|
Ops.push_back(Align);
|
2011-02-08 01:43:21 +08:00
|
|
|
if (isUpdating) {
|
|
|
|
SDValue Inc = N->getOperand(AddrOpIdx + 1);
|
|
|
|
Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
|
|
|
|
}
|
2010-06-17 05:34:01 +08:00
|
|
|
|
2010-09-14 07:01:35 +08:00
|
|
|
SDValue SuperReg;
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue V0 = N->getOperand(Vec0Idx + 0);
|
|
|
|
SDValue V1 = N->getOperand(Vec0Idx + 1);
|
2010-09-14 07:01:35 +08:00
|
|
|
if (NumVecs == 2) {
|
|
|
|
if (is64BitVector)
|
2012-11-17 08:23:35 +08:00
|
|
|
SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
|
2010-09-14 07:01:35 +08:00
|
|
|
else
|
2012-11-17 08:23:35 +08:00
|
|
|
SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
|
2009-10-15 00:19:03 +08:00
|
|
|
} else {
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue V2 = N->getOperand(Vec0Idx + 2);
|
2010-09-14 07:01:35 +08:00
|
|
|
SDValue V3 = (NumVecs == 3)
|
2011-02-08 01:43:21 +08:00
|
|
|
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
|
|
|
|
: N->getOperand(Vec0Idx + 3);
|
2010-09-14 07:01:35 +08:00
|
|
|
if (is64BitVector)
|
2012-11-17 08:23:35 +08:00
|
|
|
SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
|
2010-09-14 07:01:35 +08:00
|
|
|
else
|
2012-11-17 08:23:35 +08:00
|
|
|
SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
|
2009-10-15 00:19:03 +08:00
|
|
|
}
|
2010-09-14 07:01:35 +08:00
|
|
|
Ops.push_back(SuperReg);
|
2009-10-15 00:19:03 +08:00
|
|
|
Ops.push_back(getI32Imm(Lane));
|
2009-11-21 14:21:52 +08:00
|
|
|
Ops.push_back(Pred);
|
2010-03-21 06:13:40 +08:00
|
|
|
Ops.push_back(Reg0);
|
2009-10-15 00:19:03 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
|
|
|
|
QOpcodes[OpcodeIndex]);
|
2013-04-20 06:22:57 +08:00
|
|
|
SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
2011-04-19 08:04:03 +08:00
|
|
|
cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
|
2009-10-15 00:46:45 +08:00
|
|
|
if (!IsLoad)
|
2011-02-08 01:43:21 +08:00
|
|
|
return VLdLn;
|
2009-10-15 00:19:03 +08:00
|
|
|
|
2010-09-14 07:01:35 +08:00
|
|
|
// Extract the subregisters.
|
2011-02-08 01:43:21 +08:00
|
|
|
SuperReg = SDValue(VLdLn, 0);
|
|
|
|
assert(ARM::dsub_7 == ARM::dsub_0+7 &&
|
|
|
|
ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
|
|
|
|
unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
|
2010-06-17 05:34:01 +08:00
|
|
|
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
|
|
|
|
ReplaceUses(SDValue(N, Vec),
|
2011-02-08 01:43:21 +08:00
|
|
|
CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
|
|
|
|
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
|
|
|
|
if (isUpdating)
|
|
|
|
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
|
2009-10-15 00:19:03 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-02-08 01:43:21 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
|
2012-05-24 13:17:00 +08:00
|
|
|
unsigned NumVecs,
|
|
|
|
const uint16_t *Opcodes) {
|
2010-11-28 14:51:26 +08:00
|
|
|
assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2010-11-28 14:51:26 +08:00
|
|
|
|
|
|
|
SDValue MemAddr, Align;
|
|
|
|
if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
|
|
|
|
return NULL;
|
|
|
|
|
2011-04-19 08:04:03 +08:00
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
|
2010-11-28 14:51:26 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
|
|
|
|
unsigned Alignment = 0;
|
|
|
|
if (NumVecs != 3) {
|
|
|
|
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
|
|
|
|
unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
|
|
|
|
if (Alignment > NumBytes)
|
|
|
|
Alignment = NumBytes;
|
2010-12-11 03:37:42 +08:00
|
|
|
if (Alignment < 8 && Alignment < NumBytes)
|
|
|
|
Alignment = 0;
|
2010-11-28 14:51:26 +08:00
|
|
|
// Alignment must be a power of two; make sure of that.
|
|
|
|
Alignment = (Alignment & -Alignment);
|
|
|
|
if (Alignment == 1)
|
|
|
|
Alignment = 0;
|
|
|
|
}
|
|
|
|
Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
|
|
|
|
|
|
|
|
unsigned OpcodeIndex;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: llvm_unreachable("unhandled vld-dup type");
|
|
|
|
case MVT::v8i8: OpcodeIndex = 0; break;
|
|
|
|
case MVT::v4i16: OpcodeIndex = 1; break;
|
|
|
|
case MVT::v2f32:
|
|
|
|
case MVT::v2i32: OpcodeIndex = 2; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue Pred = getAL(CurDAG);
|
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
SDValue SuperReg;
|
|
|
|
unsigned Opc = Opcodes[OpcodeIndex];
|
2011-02-08 01:43:21 +08:00
|
|
|
SmallVector<SDValue, 6> Ops;
|
|
|
|
Ops.push_back(MemAddr);
|
|
|
|
Ops.push_back(Align);
|
|
|
|
if (isUpdating) {
|
2011-12-22 03:40:55 +08:00
|
|
|
// fixed-stride update instructions don't have an explicit writeback
|
|
|
|
// operand. It's implicit in the opcode itself.
|
2011-02-08 01:43:21 +08:00
|
|
|
SDValue Inc = N->getOperand(2);
|
2011-12-22 03:40:55 +08:00
|
|
|
if (!isa<ConstantSDNode>(Inc.getNode()))
|
|
|
|
Ops.push_back(Inc);
|
|
|
|
// FIXME: VLD3 and VLD4 haven't been updated to that form yet.
|
|
|
|
else if (NumVecs > 2)
|
|
|
|
Ops.push_back(Reg0);
|
2011-02-08 01:43:21 +08:00
|
|
|
}
|
|
|
|
Ops.push_back(Pred);
|
|
|
|
Ops.push_back(Reg0);
|
|
|
|
Ops.push_back(Chain);
|
2010-11-28 14:51:26 +08:00
|
|
|
|
|
|
|
unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
|
2011-02-08 01:43:21 +08:00
|
|
|
std::vector<EVT> ResTys;
|
2011-04-19 08:04:03 +08:00
|
|
|
ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
|
2011-02-08 01:43:21 +08:00
|
|
|
if (isUpdating)
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
ResTys.push_back(MVT::Other);
|
2013-04-20 06:22:57 +08:00
|
|
|
SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
|
2011-04-19 08:04:03 +08:00
|
|
|
cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
|
2010-11-28 14:51:26 +08:00
|
|
|
SuperReg = SDValue(VLdDup, 0);
|
|
|
|
|
|
|
|
// Extract the subregisters.
|
|
|
|
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
|
|
|
|
unsigned SubIdx = ARM::dsub_0;
|
|
|
|
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
|
|
|
|
ReplaceUses(SDValue(N, Vec),
|
|
|
|
CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
|
2011-02-08 01:43:21 +08:00
|
|
|
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
|
|
|
|
if (isUpdating)
|
|
|
|
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
|
2010-11-28 14:51:26 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-07 08:08:54 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
|
|
|
|
unsigned Opc) {
|
2010-07-07 07:36:25 +08:00
|
|
|
assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2010-07-07 07:36:25 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2010-07-07 08:08:54 +08:00
|
|
|
unsigned FirstTblReg = IsExt ? 2 : 1;
|
2010-07-07 07:36:25 +08:00
|
|
|
|
|
|
|
// Form a REG_SEQUENCE to force register allocation.
|
|
|
|
SDValue RegSeq;
|
2010-07-07 08:08:54 +08:00
|
|
|
SDValue V0 = N->getOperand(FirstTblReg + 0);
|
|
|
|
SDValue V1 = N->getOperand(FirstTblReg + 1);
|
2010-07-07 07:36:25 +08:00
|
|
|
if (NumVecs == 2)
|
2012-11-17 08:23:35 +08:00
|
|
|
RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
|
2010-07-07 07:36:25 +08:00
|
|
|
else {
|
2010-07-07 08:08:54 +08:00
|
|
|
SDValue V2 = N->getOperand(FirstTblReg + 2);
|
2010-10-22 03:38:40 +08:00
|
|
|
// If it's a vtbl3, form a quad D-register and leave the last part as
|
2010-07-07 07:36:25 +08:00
|
|
|
// an undef.
|
|
|
|
SDValue V3 = (NumVecs == 3)
|
|
|
|
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
|
2010-07-07 08:08:54 +08:00
|
|
|
: N->getOperand(FirstTblReg + 3);
|
2012-11-17 08:23:35 +08:00
|
|
|
RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
|
2010-07-07 07:36:25 +08:00
|
|
|
}
|
|
|
|
|
2010-07-07 08:08:54 +08:00
|
|
|
SmallVector<SDValue, 6> Ops;
|
|
|
|
if (IsExt)
|
|
|
|
Ops.push_back(N->getOperand(1));
|
2010-09-14 07:55:10 +08:00
|
|
|
Ops.push_back(RegSeq);
|
2010-07-07 08:08:54 +08:00
|
|
|
Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
|
2010-07-07 07:36:25 +08:00
|
|
|
Ops.push_back(getAL(CurDAG)); // predicate
|
|
|
|
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(Opc, dl, VT, Ops);
|
2010-07-07 07:36:25 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
|
2010-04-23 07:24:18 +08:00
|
|
|
bool isSigned) {
|
2009-10-14 02:59:48 +08:00
|
|
|
if (!Subtarget->hasV6T2Ops())
|
|
|
|
return NULL;
|
2009-10-15 00:46:45 +08:00
|
|
|
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
unsigned Opc = isSigned
|
|
|
|
? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
|
2010-04-23 07:24:18 +08:00
|
|
|
: (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
|
|
|
|
|
|
|
|
// For unsigned extracts, check for a shift right and mask
|
|
|
|
unsigned And_imm = 0;
|
|
|
|
if (N->getOpcode() == ISD::AND) {
|
|
|
|
if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
|
|
|
|
|
2012-09-27 18:14:43 +08:00
|
|
|
// The immediate is a mask of the low bits iff imm & (imm+1) == 0
|
2010-04-23 07:24:18 +08:00
|
|
|
if (And_imm & (And_imm + 1))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
unsigned Srl_imm = 0;
|
|
|
|
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
|
|
|
|
Srl_imm)) {
|
|
|
|
assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
|
|
|
|
|
2011-07-28 05:09:25 +08:00
|
|
|
// Note: The width operand is encoded as width-1.
|
|
|
|
unsigned Width = CountTrailingOnes_32(And_imm) - 1;
|
2010-04-23 07:24:18 +08:00
|
|
|
unsigned LSB = Srl_imm;
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
|
2010-04-23 07:24:18 +08:00
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
LLVM sdisel normalize bit extraction of the form:
((x & 0xff00) >> 8) << 2
to
(x >> 6) & 0x3fc
This is general goodness since it folds a left shift into the mask. However,
the trailing zeros in the mask prevents the ARM backend from using the bit
extraction instructions. And worse since the mask materialization may require
an addition instruction. This comes up fairly frequently when the result of
the bit twiddling is used as memory address. e.g.
= ptr[(x & 0xFF0000) >> 16]
We want to generate:
ubfx r3, r1, #16, #8
ldr.w r3, [r0, r3, lsl #2]
vs.
mov.w r9, #1020
and.w r2, r9, r1, lsr #14
ldr r2, [r0, r2]
Add a late ARM specific isel optimization to
ARMDAGToDAGISel::PreprocessISelDAG(). It folds the left shift to the
'base + offset' address computation; change the mask to one which doesn't have
trailing zeros and enable the use of ubfx.
Note the optimization has to be done late since it's target specific and we
don't want to change the DAG normalization. It's also fairly restrictive
as shifter operands are not always free. It's only done for lsh 1 / 2. It's
known to be free on some cpus and they are most common for address
computation.
This is a slight win for blowfish, rijndael, etc.
rdar://12870177
llvm-svn: 170581
2012-12-20 04:16:09 +08:00
|
|
|
|
|
|
|
if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
|
|
|
|
// It's cheaper to use a right shift to extract the top bits.
|
|
|
|
if (Subtarget->isThumb()) {
|
|
|
|
Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
|
|
|
|
SDValue Ops[] = { N->getOperand(0).getOperand(0),
|
|
|
|
CurDAG->getTargetConstant(LSB, MVT::i32),
|
|
|
|
getAL(CurDAG), Reg0, Reg0 };
|
|
|
|
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
|
|
|
|
}
|
|
|
|
|
|
|
|
// ARM models shift instructions as MOVsi with shifter operand.
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
|
|
|
|
SDValue ShOpc =
|
|
|
|
CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
|
|
|
|
MVT::i32);
|
|
|
|
SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
|
|
|
|
getAL(CurDAG), Reg0, Reg0 };
|
|
|
|
return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops, 5);
|
|
|
|
}
|
|
|
|
|
2010-04-23 07:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0).getOperand(0),
|
|
|
|
CurDAG->getTargetConstant(LSB, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(Width, MVT::i32),
|
|
|
|
getAL(CurDAG), Reg0 };
|
|
|
|
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we're looking for a shift of a shift
|
2009-10-14 02:59:48 +08:00
|
|
|
unsigned Shl_imm = 0;
|
2010-01-05 09:24:18 +08:00
|
|
|
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
|
2009-10-14 02:59:48 +08:00
|
|
|
assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
|
|
|
|
unsigned Srl_imm = 0;
|
2010-01-05 09:24:18 +08:00
|
|
|
if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
|
2009-10-14 02:59:48 +08:00
|
|
|
assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
|
2011-07-28 05:09:25 +08:00
|
|
|
// Note: The width operand is encoded as width-1.
|
|
|
|
unsigned Width = 32 - Srl_imm - 1;
|
2009-10-14 02:59:48 +08:00
|
|
|
int LSB = Srl_imm - Shl_imm;
|
2009-10-22 08:40:00 +08:00
|
|
|
if (LSB < 0)
|
2009-10-14 02:59:48 +08:00
|
|
|
return NULL;
|
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0).getOperand(0),
|
2009-10-14 02:59:48 +08:00
|
|
|
CurDAG->getTargetConstant(LSB, MVT::i32),
|
|
|
|
CurDAG->getTargetConstant(Width, MVT::i32),
|
|
|
|
getAL(CurDAG), Reg0 };
|
2010-01-05 09:24:18 +08:00
|
|
|
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
|
2009-10-14 02:59:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-10-11 06:59:55 +08:00
|
|
|
/// Target-specific DAG combining for ISD::XOR.
|
|
|
|
/// Target-independent combining lowers SELECT_CC nodes of the form
|
|
|
|
/// select_cc setg[ge] X, 0, X, -X
|
|
|
|
/// select_cc setgt X, -1, X, -X
|
|
|
|
/// select_cc setl[te] X, 0, -X, X
|
|
|
|
/// select_cc setlt X, 1, -X, X
|
|
|
|
/// which represent Integer ABS into:
|
|
|
|
/// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
|
|
|
|
/// ARM instruction selection detects the latter and matches it to
|
|
|
|
/// ARM::ABS or ARM::t2ABS machine node.
|
|
|
|
SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
|
|
|
|
SDValue XORSrc0 = N->getOperand(0);
|
|
|
|
SDValue XORSrc1 = N->getOperand(1);
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
|
|
|
|
if (Subtarget->isThumb1Only())
|
|
|
|
return NULL;
|
|
|
|
|
2012-08-02 04:33:00 +08:00
|
|
|
if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
|
2011-10-11 06:59:55 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
SDValue ADDSrc0 = XORSrc0.getOperand(0);
|
|
|
|
SDValue ADDSrc1 = XORSrc0.getOperand(1);
|
|
|
|
SDValue SRASrc0 = XORSrc1.getOperand(0);
|
|
|
|
SDValue SRASrc1 = XORSrc1.getOperand(1);
|
|
|
|
ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
|
|
|
|
EVT XType = SRASrc0.getValueType();
|
|
|
|
unsigned Size = XType.getSizeInBits() - 1;
|
|
|
|
|
2012-08-02 04:33:00 +08:00
|
|
|
if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
|
|
|
|
XType.isInteger() && SRAConstant != NULL &&
|
2011-10-11 06:59:55 +08:00
|
|
|
Size == SRAConstant->getZExtValue()) {
|
2012-08-02 04:33:00 +08:00
|
|
|
unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
|
2011-10-11 06:59:55 +08:00
|
|
|
return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-05-06 02:28:36 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
|
|
|
|
// The only time a CONCAT_VECTORS operation can have legal types is when
|
|
|
|
// two 64-bit vectors are concatenated to a 128-bit vector.
|
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
if (!VT.is128BitVector() || N->getNumOperands() != 2)
|
|
|
|
llvm_unreachable("unexpected CONCAT_VECTORS");
|
2012-11-17 08:23:35 +08:00
|
|
|
return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
|
2010-05-06 02:28:36 +08:00
|
|
|
}
|
|
|
|
|
2013-09-26 20:22:36 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
|
|
|
|
unsigned Op16,unsigned Op32,
|
|
|
|
unsigned Op64) {
|
|
|
|
// Mostly direct translation to the given operations, except that we preserve
|
|
|
|
// the AtomicOrdering for use later on.
|
|
|
|
AtomicSDNode *AN = cast<AtomicSDNode>(Node);
|
|
|
|
EVT VT = AN->getMemoryVT();
|
|
|
|
|
|
|
|
unsigned Op;
|
|
|
|
SDVTList VTs = CurDAG->getVTList(AN->getValueType(0), MVT::Other);
|
|
|
|
if (VT == MVT::i8)
|
|
|
|
Op = Op8;
|
|
|
|
else if (VT == MVT::i16)
|
|
|
|
Op = Op16;
|
|
|
|
else if (VT == MVT::i32)
|
|
|
|
Op = Op32;
|
|
|
|
else if (VT == MVT::i64) {
|
|
|
|
Op = Op64;
|
|
|
|
VTs = CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other);
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unexpected atomic operation");
|
|
|
|
|
2011-09-01 01:52:22 +08:00
|
|
|
SmallVector<SDValue, 6> Ops;
|
2013-09-26 20:22:36 +08:00
|
|
|
for (unsigned i = 1; i < AN->getNumOperands(); ++i)
|
|
|
|
Ops.push_back(AN->getOperand(i));
|
|
|
|
|
|
|
|
Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
|
|
|
|
Ops.push_back(AN->getOperand(0)); // Chain moves to the end
|
|
|
|
|
|
|
|
return CurDAG->SelectNodeTo(Node, Op, VTs, &Ops[0], Ops.size());
|
2011-08-31 08:31:29 +08:00
|
|
|
}
|
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2013-09-22 16:21:56 +08:00
|
|
|
if (N->isMachineOpcode()) {
|
|
|
|
N->setNodeId(-1);
|
2007-01-19 15:51:42 +08:00
|
|
|
return NULL; // Already selected.
|
2013-09-22 16:21:56 +08:00
|
|
|
}
|
2006-06-12 20:28:08 +08:00
|
|
|
|
|
|
|
switch (N->getOpcode()) {
|
2007-01-19 15:51:42 +08:00
|
|
|
default: break;
|
2013-02-15 02:10:21 +08:00
|
|
|
case ISD::INLINEASM: {
|
|
|
|
SDNode *ResNode = SelectInlineAsm(N);
|
|
|
|
if (ResNode)
|
|
|
|
return ResNode;
|
|
|
|
break;
|
|
|
|
}
|
2011-10-11 06:59:55 +08:00
|
|
|
case ISD::XOR: {
|
|
|
|
// Select special operations if XOR node forms integer ABS pattern
|
|
|
|
SDNode *ResNode = SelectABSOp(N);
|
|
|
|
if (ResNode)
|
|
|
|
return ResNode;
|
|
|
|
// Other cases are autogenerated.
|
|
|
|
break;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::Constant: {
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
|
2007-01-19 15:51:42 +08:00
|
|
|
bool UseCP = true;
|
2009-09-28 07:52:58 +08:00
|
|
|
if (Subtarget->hasThumb2())
|
|
|
|
// Thumb2-aware targets have the MOVT instruction, so all immediates can
|
|
|
|
// be done with MOV + MOVT, at worst.
|
|
|
|
UseCP = 0;
|
|
|
|
else {
|
|
|
|
if (Subtarget->isThumb()) {
|
2009-06-23 01:29:13 +08:00
|
|
|
UseCP = (Val > 255 && // MOV
|
|
|
|
~Val > 255 && // MOV + MVN
|
|
|
|
!ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
|
2009-09-28 07:52:58 +08:00
|
|
|
} else
|
|
|
|
UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
|
|
|
|
ARM_AM::getSOImmVal(~Val) == -1 && // MVN
|
|
|
|
!ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
if (UseCP) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue CPIdx =
|
2009-08-14 05:58:54 +08:00
|
|
|
CurDAG->getTargetConstantPool(ConstantInt::get(
|
|
|
|
Type::getInt32Ty(*CurDAG->getContext()), Val),
|
2013-06-20 05:36:55 +08:00
|
|
|
getTargetLowering()->getPointerTy());
|
2007-01-24 16:53:17 +08:00
|
|
|
|
|
|
|
SDNode *ResNode;
|
2009-07-11 14:43:01 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
|
2009-07-11 14:43:01 +08:00
|
|
|
SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
|
2010-12-16 07:52:36 +08:00
|
|
|
ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
|
2013-04-20 06:22:57 +08:00
|
|
|
Ops);
|
2009-07-11 14:43:01 +08:00
|
|
|
} else {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = {
|
2009-08-11 23:33:49 +08:00
|
|
|
CPIdx,
|
2009-08-12 04:47:22 +08:00
|
|
|
CurDAG->getTargetConstant(0, MVT::i32),
|
2007-07-05 15:15:27 +08:00
|
|
|
getAL(CurDAG),
|
2009-08-12 04:47:22 +08:00
|
|
|
CurDAG->getRegister(0, MVT::i32),
|
2007-01-24 16:53:17 +08:00
|
|
|
CurDAG->getEntryNode()
|
|
|
|
};
|
2009-09-26 02:54:59 +08:00
|
|
|
ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
|
2013-04-20 06:22:57 +08:00
|
|
|
Ops);
|
2007-01-24 16:53:17 +08:00
|
|
|
}
|
2010-01-05 09:24:18 +08:00
|
|
|
ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
|
2007-01-19 15:51:42 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// Other cases are autogenerated.
|
2006-06-12 20:28:08 +08:00
|
|
|
break;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2006-11-09 21:58:55 +08:00
|
|
|
case ISD::FrameIndex: {
|
2007-01-19 15:51:42 +08:00
|
|
|
// Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
|
2006-11-09 21:58:55 +08:00
|
|
|
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
2013-06-20 05:36:55 +08:00
|
|
|
SDValue TFI = CurDAG->getTargetFrameIndex(FI,
|
|
|
|
getTargetLowering()->getPointerTy());
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2011-08-25 01:46:13 +08:00
|
|
|
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
|
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
|
|
|
|
return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
|
2009-04-08 04:34:09 +08:00
|
|
|
} else {
|
2009-07-15 02:48:51 +08:00
|
|
|
unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
|
|
|
|
ARM::t2ADDri : ARM::ADDri);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
|
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
|
|
|
CurDAG->getRegister(0, MVT::i32) };
|
|
|
|
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
|
2007-07-05 15:15:27 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-10-14 02:59:48 +08:00
|
|
|
case ISD::SRL:
|
2010-04-23 07:24:18 +08:00
|
|
|
if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
|
2009-10-14 02:59:48 +08:00
|
|
|
return I;
|
|
|
|
break;
|
|
|
|
case ISD::SRA:
|
2010-04-23 07:24:18 +08:00
|
|
|
if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
|
2009-10-14 02:59:48 +08:00
|
|
|
return I;
|
|
|
|
break;
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::MUL:
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-01-24 10:21:22 +08:00
|
|
|
break;
|
2010-01-05 09:24:18 +08:00
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned RHSV = C->getZExtValue();
|
2007-01-19 15:51:42 +08:00
|
|
|
if (!RHSV) break;
|
|
|
|
if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
|
2009-07-21 08:31:12 +08:00
|
|
|
unsigned ShImm = Log2_32(RHSV-1);
|
|
|
|
if (ShImm >= 32)
|
|
|
|
break;
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue V = N->getOperand(0);
|
2009-07-21 08:31:12 +08:00
|
|
|
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
|
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2009-07-23 02:08:05 +08:00
|
|
|
if (Subtarget->isThumb()) {
|
2009-07-21 08:31:12 +08:00
|
|
|
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
|
2009-08-12 04:47:22 +08:00
|
|
|
return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
|
2009-07-21 08:31:12 +08:00
|
|
|
} else {
|
|
|
|
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
|
2011-07-22 02:54:16 +08:00
|
|
|
return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
|
2009-07-21 08:31:12 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
|
2009-07-21 08:31:12 +08:00
|
|
|
unsigned ShImm = Log2_32(RHSV+1);
|
|
|
|
if (ShImm >= 32)
|
|
|
|
break;
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue V = N->getOperand(0);
|
2009-07-21 08:31:12 +08:00
|
|
|
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
|
|
|
|
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
|
2009-07-23 02:08:05 +08:00
|
|
|
if (Subtarget->isThumb()) {
|
2010-05-28 08:27:15 +08:00
|
|
|
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
|
|
|
|
return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
|
2009-07-21 08:31:12 +08:00
|
|
|
} else {
|
|
|
|
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
|
2011-07-22 02:54:16 +08:00
|
|
|
return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
|
2009-07-21 08:31:12 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2009-10-21 16:15:52 +08:00
|
|
|
case ISD::AND: {
|
2010-04-23 07:24:18 +08:00
|
|
|
// Check for unsigned bitfield extract
|
|
|
|
if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
|
|
|
|
return I;
|
|
|
|
|
2009-10-21 16:15:52 +08:00
|
|
|
// (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
|
|
|
|
// of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
|
|
|
|
// are entirely contributed by c2 and lower 16-bits are entirely contributed
|
|
|
|
// by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
|
|
|
|
// Select it to: "movt x, ((c1 & 0xffff) >> 16)
|
2010-01-05 09:24:18 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-10-21 16:15:52 +08:00
|
|
|
if (VT != MVT::i32)
|
|
|
|
break;
|
|
|
|
unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
|
|
|
|
? ARM::t2MOVTi16
|
|
|
|
: (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
|
|
|
|
if (!Opc)
|
|
|
|
break;
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
|
2009-10-21 16:15:52 +08:00
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
if (!N1C)
|
|
|
|
break;
|
|
|
|
if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
|
|
|
|
SDValue N2 = N0.getOperand(1);
|
|
|
|
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
|
|
|
|
if (!N2C)
|
|
|
|
break;
|
|
|
|
unsigned N1CVal = N1C->getZExtValue();
|
|
|
|
unsigned N2CVal = N2C->getZExtValue();
|
|
|
|
if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
|
|
|
|
(N1CVal & 0xffffU) == 0xffffU &&
|
|
|
|
(N2CVal & 0xffffU) == 0x0U) {
|
|
|
|
SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
|
|
|
|
MVT::i32);
|
|
|
|
SDValue Ops[] = { N0.getOperand(0), Imm16,
|
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(Opc, dl, VT, Ops);
|
2009-10-21 16:15:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-11-09 08:11:35 +08:00
|
|
|
case ARMISD::VMOVRRD:
|
|
|
|
return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
|
2010-01-05 09:24:18 +08:00
|
|
|
N->getOperand(0), getAL(CurDAG),
|
2009-09-26 02:54:59 +08:00
|
|
|
CurDAG->getRegister(0, MVT::i32));
|
2007-10-09 02:33:35 +08:00
|
|
|
case ISD::UMUL_LOHI: {
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
|
|
|
break;
|
|
|
|
if (Subtarget->isThumb()) {
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
2013-04-20 06:22:57 +08:00
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
|
|
|
|
return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
|
2009-07-07 09:17:28 +08:00
|
|
|
} else {
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
2009-08-12 04:47:22 +08:00
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
|
|
|
CurDAG->getRegister(0, MVT::i32) };
|
2011-01-02 04:38:38 +08:00
|
|
|
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
|
|
|
|
ARM::UMULL : ARM::UMULLv5,
|
2013-04-20 06:22:57 +08:00
|
|
|
dl, MVT::i32, MVT::i32, Ops);
|
2009-07-07 09:17:28 +08:00
|
|
|
}
|
2007-07-05 15:15:27 +08:00
|
|
|
}
|
2007-10-09 02:33:35 +08:00
|
|
|
case ISD::SMUL_LOHI: {
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
|
|
|
break;
|
|
|
|
if (Subtarget->isThumb()) {
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
2009-08-12 04:47:22 +08:00
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
|
2009-07-07 09:17:28 +08:00
|
|
|
} else {
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
2009-08-12 04:47:22 +08:00
|
|
|
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
|
|
|
CurDAG->getRegister(0, MVT::i32) };
|
2011-01-02 04:38:38 +08:00
|
|
|
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
|
|
|
|
ARM::SMULL : ARM::SMULLv5,
|
2013-04-20 06:22:57 +08:00
|
|
|
dl, MVT::i32, MVT::i32, Ops);
|
2009-07-07 09:17:28 +08:00
|
|
|
}
|
2007-07-05 15:15:27 +08:00
|
|
|
}
|
2012-09-04 22:37:49 +08:00
|
|
|
case ARMISD::UMLAL:{
|
|
|
|
if (Subtarget->isThumb()) {
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
|
|
|
|
N->getOperand(3), getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32)};
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
|
2012-09-04 22:37:49 +08:00
|
|
|
}else{
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
|
|
|
|
N->getOperand(3), getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32),
|
|
|
|
CurDAG->getRegister(0, MVT::i32) };
|
|
|
|
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
|
|
|
|
ARM::UMLAL : ARM::UMLALv5,
|
2013-04-20 06:22:57 +08:00
|
|
|
dl, MVT::i32, MVT::i32, Ops);
|
2012-09-04 22:37:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case ARMISD::SMLAL:{
|
|
|
|
if (Subtarget->isThumb()) {
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
|
|
|
|
N->getOperand(3), getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32)};
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
|
2012-09-04 22:37:49 +08:00
|
|
|
}else{
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
|
|
|
|
N->getOperand(3), getAL(CurDAG),
|
|
|
|
CurDAG->getRegister(0, MVT::i32),
|
|
|
|
CurDAG->getRegister(0, MVT::i32) };
|
|
|
|
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
|
|
|
|
ARM::SMLAL : ARM::SMLALv5,
|
2013-04-20 06:22:57 +08:00
|
|
|
dl, MVT::i32, MVT::i32, Ops);
|
2012-09-04 22:37:49 +08:00
|
|
|
}
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::LOAD: {
|
2009-07-02 15:28:31 +08:00
|
|
|
SDNode *ResNode = 0;
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
2010-01-05 09:24:18 +08:00
|
|
|
ResNode = SelectT2IndexedLoad(N);
|
2009-07-02 15:28:31 +08:00
|
|
|
else
|
2010-01-05 09:24:18 +08:00
|
|
|
ResNode = SelectARMIndexedLoad(N);
|
2009-07-02 09:23:32 +08:00
|
|
|
if (ResNode)
|
|
|
|
return ResNode;
|
2007-01-19 15:51:42 +08:00
|
|
|
// Other cases are autogenerated.
|
2006-11-09 21:58:55 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-07-05 15:15:27 +08:00
|
|
|
case ARMISD::BRCOND: {
|
|
|
|
// Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Pattern complexity = 6 cost = 1 size = 0
|
|
|
|
|
|
|
|
// Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Pattern complexity = 6 cost = 1 size = 0
|
|
|
|
|
2009-07-01 02:04:13 +08:00
|
|
|
// Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
|
|
|
|
// Pattern complexity = 6 cost = 1 size = 0
|
|
|
|
|
2009-08-11 23:33:49 +08:00
|
|
|
unsigned Opc = Subtarget->isThumb() ?
|
2009-07-01 02:04:13 +08:00
|
|
|
((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
|
2010-01-05 09:24:18 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
SDValue N1 = N->getOperand(1);
|
|
|
|
SDValue N2 = N->getOperand(2);
|
|
|
|
SDValue N3 = N->getOperand(3);
|
|
|
|
SDValue InFlag = N->getOperand(4);
|
2007-07-05 15:15:27 +08:00
|
|
|
assert(N1.getOpcode() == ISD::BasicBlock);
|
|
|
|
assert(N2.getOpcode() == ISD::Constant);
|
|
|
|
assert(N3.getOpcode() == ISD::Register);
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
|
2008-09-13 00:56:44 +08:00
|
|
|
cast<ConstantSDNode>(N2)->getZExtValue()),
|
2009-08-12 04:47:22 +08:00
|
|
|
MVT::i32);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
|
2009-09-26 02:54:59 +08:00
|
|
|
SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
|
2013-04-20 06:22:57 +08:00
|
|
|
MVT::Glue, Ops);
|
2008-07-28 05:46:04 +08:00
|
|
|
Chain = SDValue(ResNode, 0);
|
2010-01-05 09:24:18 +08:00
|
|
|
if (N->getNumValues() == 2) {
|
2008-07-28 05:46:04 +08:00
|
|
|
InFlag = SDValue(ResNode, 1);
|
2010-01-05 09:24:18 +08:00
|
|
|
ReplaceUses(SDValue(N, 1), InFlag);
|
2008-02-03 11:20:59 +08:00
|
|
|
}
|
2010-01-05 09:24:18 +08:00
|
|
|
ReplaceUses(SDValue(N, 0),
|
2009-11-19 16:16:50 +08:00
|
|
|
SDValue(Chain.getNode(), Chain.getResNo()));
|
2007-07-05 15:15:27 +08:00
|
|
|
return NULL;
|
2006-06-12 20:28:08 +08:00
|
|
|
}
|
2009-08-21 20:41:42 +08:00
|
|
|
case ARMISD::VZIP: {
|
|
|
|
unsigned Opc = 0;
|
2009-08-21 20:40:50 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-08-21 20:41:42 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return NULL;
|
|
|
|
case MVT::v8i8: Opc = ARM::VZIPd8; break;
|
|
|
|
case MVT::v4i16: Opc = ARM::VZIPd16; break;
|
|
|
|
case MVT::v2f32:
|
2012-04-12 00:53:25 +08:00
|
|
|
// vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
|
|
|
|
case MVT::v2i32: Opc = ARM::VTRNd32; break;
|
2009-08-21 20:41:42 +08:00
|
|
|
case MVT::v16i8: Opc = ARM::VZIPq8; break;
|
|
|
|
case MVT::v8i16: Opc = ARM::VZIPq16; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: Opc = ARM::VZIPq32; break;
|
|
|
|
}
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2009-11-21 14:21:52 +08:00
|
|
|
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
|
2009-08-21 20:40:50 +08:00
|
|
|
}
|
2009-08-21 20:41:42 +08:00
|
|
|
case ARMISD::VUZP: {
|
|
|
|
unsigned Opc = 0;
|
2009-08-21 20:40:50 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-08-21 20:41:42 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return NULL;
|
|
|
|
case MVT::v8i8: Opc = ARM::VUZPd8; break;
|
|
|
|
case MVT::v4i16: Opc = ARM::VUZPd16; break;
|
|
|
|
case MVT::v2f32:
|
2012-04-12 01:40:18 +08:00
|
|
|
// vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
|
|
|
|
case MVT::v2i32: Opc = ARM::VTRNd32; break;
|
2009-08-21 20:41:42 +08:00
|
|
|
case MVT::v16i8: Opc = ARM::VUZPq8; break;
|
|
|
|
case MVT::v8i16: Opc = ARM::VUZPq16; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: Opc = ARM::VUZPq32; break;
|
|
|
|
}
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2009-11-21 14:21:52 +08:00
|
|
|
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
|
2009-08-21 20:40:50 +08:00
|
|
|
}
|
2009-08-21 20:41:42 +08:00
|
|
|
case ARMISD::VTRN: {
|
|
|
|
unsigned Opc = 0;
|
2009-08-21 20:40:50 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-08-21 20:41:42 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default: return NULL;
|
|
|
|
case MVT::v8i8: Opc = ARM::VTRNd8; break;
|
|
|
|
case MVT::v4i16: Opc = ARM::VTRNd16; break;
|
|
|
|
case MVT::v2f32:
|
|
|
|
case MVT::v2i32: Opc = ARM::VTRNd32; break;
|
|
|
|
case MVT::v16i8: Opc = ARM::VTRNq8; break;
|
|
|
|
case MVT::v8i16: Opc = ARM::VTRNq16; break;
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v4i32: Opc = ARM::VTRNq32; break;
|
|
|
|
}
|
2010-04-16 13:46:06 +08:00
|
|
|
SDValue Pred = getAL(CurDAG);
|
2009-11-21 14:21:52 +08:00
|
|
|
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
|
|
|
|
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
|
2009-08-21 20:40:50 +08:00
|
|
|
}
|
2010-06-04 08:04:02 +08:00
|
|
|
case ARMISD::BUILD_VECTOR: {
|
|
|
|
EVT VecVT = N->getValueType(0);
|
|
|
|
EVT EltVT = VecVT.getVectorElementType();
|
|
|
|
unsigned NumElts = VecVT.getVectorNumElements();
|
2010-11-03 20:17:33 +08:00
|
|
|
if (EltVT == MVT::f64) {
|
2010-06-04 08:04:02 +08:00
|
|
|
assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
|
2012-11-17 08:23:35 +08:00
|
|
|
return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
|
2010-06-04 08:04:02 +08:00
|
|
|
}
|
2010-11-03 20:17:33 +08:00
|
|
|
assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
|
2010-06-04 08:04:02 +08:00
|
|
|
if (NumElts == 2)
|
2012-11-17 08:23:35 +08:00
|
|
|
return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
|
2010-06-04 08:04:02 +08:00
|
|
|
assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
|
2012-11-17 08:23:35 +08:00
|
|
|
return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
|
2010-06-04 08:04:02 +08:00
|
|
|
N->getOperand(2), N->getOperand(3));
|
|
|
|
}
|
2009-08-27 01:39:53 +08:00
|
|
|
|
2010-11-28 14:51:26 +08:00
|
|
|
case ARMISD::VLD2DUP: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
|
|
|
|
ARM::VLD2DUPd32 };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, false, 2, Opcodes);
|
2010-11-28 14:51:26 +08:00
|
|
|
}
|
|
|
|
|
2010-11-30 03:35:29 +08:00
|
|
|
case ARMISD::VLD3DUP: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
|
|
|
|
ARM::VLD3DUPd16Pseudo,
|
|
|
|
ARM::VLD3DUPd32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, false, 3, Opcodes);
|
2010-11-30 03:35:29 +08:00
|
|
|
}
|
|
|
|
|
2010-11-30 08:00:35 +08:00
|
|
|
case ARMISD::VLD4DUP: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
|
|
|
|
ARM::VLD4DUPd16Pseudo,
|
|
|
|
ARM::VLD4DUPd32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, false, 4, Opcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD2DUP_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
|
|
|
|
ARM::VLD2DUPd16wb_fixed,
|
|
|
|
ARM::VLD2DUPd32wb_fixed };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, true, 2, Opcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD3DUP_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
|
|
|
|
ARM::VLD3DUPd16Pseudo_UPD,
|
|
|
|
ARM::VLD3DUPd32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, true, 3, Opcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD4DUP_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
|
|
|
|
ARM::VLD4DUPd16Pseudo_UPD,
|
|
|
|
ARM::VLD4DUPd32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDDup(N, true, 4, Opcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD1_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
|
|
|
|
ARM::VLD1d16wb_fixed,
|
|
|
|
ARM::VLD1d32wb_fixed,
|
|
|
|
ARM::VLD1d64wb_fixed };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
|
|
|
|
ARM::VLD1q16wb_fixed,
|
|
|
|
ARM::VLD1q32wb_fixed,
|
|
|
|
ARM::VLD1q64wb_fixed };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD2_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
|
|
|
|
ARM::VLD2d16wb_fixed,
|
|
|
|
ARM::VLD2d32wb_fixed,
|
|
|
|
ARM::VLD1q64wb_fixed};
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
|
|
|
|
ARM::VLD2q16PseudoWB_fixed,
|
|
|
|
ARM::VLD2q32PseudoWB_fixed };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD3_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
|
|
|
|
ARM::VLD3d16Pseudo_UPD,
|
|
|
|
ARM::VLD3d32Pseudo_UPD,
|
|
|
|
ARM::VLD1q64wb_fixed};
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
|
|
|
|
ARM::VLD3q16Pseudo_UPD,
|
|
|
|
ARM::VLD3q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
|
|
|
|
ARM::VLD3q16oddPseudo_UPD,
|
|
|
|
ARM::VLD3q32oddPseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD4_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
|
|
|
|
ARM::VLD4d16Pseudo_UPD,
|
|
|
|
ARM::VLD4d32Pseudo_UPD,
|
|
|
|
ARM::VLD1q64wb_fixed};
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
|
|
|
|
ARM::VLD4q16Pseudo_UPD,
|
|
|
|
ARM::VLD4q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
|
|
|
|
ARM::VLD4q16oddPseudo_UPD,
|
|
|
|
ARM::VLD4q32oddPseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD2LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
|
|
|
|
ARM::VLD2LNd16Pseudo_UPD,
|
|
|
|
ARM::VLD2LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
|
|
|
|
ARM::VLD2LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD3LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
|
|
|
|
ARM::VLD3LNd16Pseudo_UPD,
|
|
|
|
ARM::VLD3LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
|
|
|
|
ARM::VLD3LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VLD4LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
|
|
|
|
ARM::VLD4LNd16Pseudo_UPD,
|
|
|
|
ARM::VLD4LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
|
|
|
|
ARM::VLD4LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST1_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
|
|
|
|
ARM::VST1d16wb_fixed,
|
|
|
|
ARM::VST1d32wb_fixed,
|
|
|
|
ARM::VST1d64wb_fixed };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
|
|
|
|
ARM::VST1q16wb_fixed,
|
|
|
|
ARM::VST1q32wb_fixed,
|
|
|
|
ARM::VST1q64wb_fixed };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST2_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
|
|
|
|
ARM::VST2d16wb_fixed,
|
|
|
|
ARM::VST2d32wb_fixed,
|
|
|
|
ARM::VST1q64wb_fixed};
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
|
|
|
|
ARM::VST2q16PseudoWB_fixed,
|
|
|
|
ARM::VST2q32PseudoWB_fixed };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST3_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
|
|
|
|
ARM::VST3d16Pseudo_UPD,
|
|
|
|
ARM::VST3d32Pseudo_UPD,
|
|
|
|
ARM::VST1d64TPseudoWB_fixed};
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
|
|
|
|
ARM::VST3q16Pseudo_UPD,
|
|
|
|
ARM::VST3q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
|
|
|
|
ARM::VST3q16oddPseudo_UPD,
|
|
|
|
ARM::VST3q32oddPseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST4_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
|
|
|
|
ARM::VST4d16Pseudo_UPD,
|
|
|
|
ARM::VST4d32Pseudo_UPD,
|
|
|
|
ARM::VST1d64QPseudoWB_fixed};
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
|
|
|
|
ARM::VST4q16Pseudo_UPD,
|
|
|
|
ARM::VST4q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
|
|
|
|
ARM::VST4q16oddPseudo_UPD,
|
|
|
|
ARM::VST4q32oddPseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST2LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
|
|
|
|
ARM::VST2LNd16Pseudo_UPD,
|
|
|
|
ARM::VST2LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
|
|
|
|
ARM::VST2LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST3LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
|
|
|
|
ARM::VST3LNd16Pseudo_UPD,
|
|
|
|
ARM::VST3LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
|
|
|
|
ARM::VST3LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARMISD::VST4LN_UPD: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
|
|
|
|
ARM::VST4LNd16Pseudo_UPD,
|
|
|
|
ARM::VST4LNd32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
|
|
|
|
ARM::VST4LNq32Pseudo_UPD };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
|
2010-11-30 08:00:35 +08:00
|
|
|
}
|
|
|
|
|
2009-08-27 01:39:53 +08:00
|
|
|
case ISD::INTRINSIC_VOID:
|
|
|
|
case ISD::INTRINSIC_W_CHAIN: {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
default:
|
2010-05-07 00:05:26 +08:00
|
|
|
break;
|
2009-08-27 01:39:53 +08:00
|
|
|
|
2011-05-28 12:07:29 +08:00
|
|
|
case Intrinsic::arm_ldrexd: {
|
|
|
|
SDValue MemAddr = N->getOperand(2);
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2011-05-28 12:07:29 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
|
2012-11-17 05:55:34 +08:00
|
|
|
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
|
|
|
|
unsigned NewOpc = isThumb ? ARM::t2LDREXD :ARM::LDREXD;
|
2011-05-28 12:07:29 +08:00
|
|
|
|
|
|
|
// arm_ldrexd returns a i64 value in {i32, i32}
|
|
|
|
std::vector<EVT> ResTys;
|
2012-11-17 05:55:34 +08:00
|
|
|
if (isThumb) {
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
ResTys.push_back(MVT::i32);
|
|
|
|
} else
|
|
|
|
ResTys.push_back(MVT::Untyped);
|
2011-05-28 12:07:29 +08:00
|
|
|
ResTys.push_back(MVT::Other);
|
|
|
|
|
2012-11-17 05:55:34 +08:00
|
|
|
// Place arguments in the right order.
|
2011-05-28 12:07:29 +08:00
|
|
|
SmallVector<SDValue, 7> Ops;
|
|
|
|
Ops.push_back(MemAddr);
|
|
|
|
Ops.push_back(getAL(CurDAG));
|
|
|
|
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
|
|
|
|
Ops.push_back(Chain);
|
2013-04-20 06:22:57 +08:00
|
|
|
SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
|
2011-05-28 12:07:29 +08:00
|
|
|
// Transfer memoperands.
|
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
|
|
|
|
|
|
|
|
// Remap uses.
|
2013-03-10 06:56:09 +08:00
|
|
|
SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
|
2011-05-28 12:07:29 +08:00
|
|
|
if (!SDValue(N, 0).use_empty()) {
|
2012-11-17 05:55:34 +08:00
|
|
|
SDValue Result;
|
|
|
|
if (isThumb)
|
|
|
|
Result = SDValue(Ld, 0);
|
|
|
|
else {
|
|
|
|
SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
|
|
|
|
SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
|
2013-03-10 06:56:09 +08:00
|
|
|
dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
|
2012-11-17 05:55:34 +08:00
|
|
|
Result = SDValue(ResNode,0);
|
|
|
|
}
|
2011-05-28 12:07:29 +08:00
|
|
|
ReplaceUses(SDValue(N, 0), Result);
|
|
|
|
}
|
|
|
|
if (!SDValue(N, 1).use_empty()) {
|
2012-11-17 05:55:34 +08:00
|
|
|
SDValue Result;
|
|
|
|
if (isThumb)
|
|
|
|
Result = SDValue(Ld, 1);
|
|
|
|
else {
|
|
|
|
SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
|
|
|
|
SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
|
2013-03-10 06:56:09 +08:00
|
|
|
dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
|
2012-11-17 05:55:34 +08:00
|
|
|
Result = SDValue(ResNode,0);
|
|
|
|
}
|
2011-05-28 12:07:29 +08:00
|
|
|
ReplaceUses(SDValue(N, 1), Result);
|
|
|
|
}
|
2013-03-10 06:56:09 +08:00
|
|
|
ReplaceUses(SDValue(N, 2), OutChain);
|
2011-05-28 12:07:29 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_strexd: {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2011-05-28 12:07:29 +08:00
|
|
|
SDValue Chain = N->getOperand(0);
|
|
|
|
SDValue Val0 = N->getOperand(2);
|
|
|
|
SDValue Val1 = N->getOperand(3);
|
|
|
|
SDValue MemAddr = N->getOperand(4);
|
|
|
|
|
|
|
|
// Store exclusive double return a i32 value which is the return status
|
|
|
|
// of the issued store.
|
2013-03-08 04:33:29 +08:00
|
|
|
EVT ResTys[] = { MVT::i32, MVT::Other };
|
2011-05-28 12:07:29 +08:00
|
|
|
|
2012-11-17 05:55:34 +08:00
|
|
|
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
|
|
|
|
// Place arguments in the right order.
|
2011-05-28 12:07:29 +08:00
|
|
|
SmallVector<SDValue, 7> Ops;
|
2012-11-17 05:55:34 +08:00
|
|
|
if (isThumb) {
|
|
|
|
Ops.push_back(Val0);
|
|
|
|
Ops.push_back(Val1);
|
|
|
|
} else
|
|
|
|
// arm_strexd uses GPRPair.
|
|
|
|
Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
|
2011-05-28 12:07:29 +08:00
|
|
|
Ops.push_back(MemAddr);
|
|
|
|
Ops.push_back(getAL(CurDAG));
|
|
|
|
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
|
2012-11-17 05:55:34 +08:00
|
|
|
unsigned NewOpc = isThumb ? ARM::t2STREXD : ARM::STREXD;
|
2011-05-28 12:07:29 +08:00
|
|
|
|
2013-04-20 06:22:57 +08:00
|
|
|
SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
|
2011-05-28 12:07:29 +08:00
|
|
|
// Transfer memoperands.
|
|
|
|
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
|
|
|
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
|
|
|
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
|
|
|
|
|
|
|
|
return St;
|
|
|
|
}
|
|
|
|
|
2010-03-23 13:25:43 +08:00
|
|
|
case Intrinsic::arm_neon_vld1: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
|
|
|
|
ARM::VLD1d32, ARM::VLD1d64 };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
|
|
|
|
ARM::VLD1q32, ARM::VLD1q64};
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
|
2010-03-23 13:25:43 +08:00
|
|
|
}
|
|
|
|
|
2009-08-27 01:39:53 +08:00
|
|
|
case Intrinsic::arm_neon_vld2: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
|
|
|
|
ARM::VLD2d32, ARM::VLD1q64 };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
|
|
|
|
ARM::VLD2q32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vld3: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
|
|
|
|
ARM::VLD3d16Pseudo,
|
|
|
|
ARM::VLD3d32Pseudo,
|
|
|
|
ARM::VLD1d64TPseudo };
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
|
|
|
|
ARM::VLD3q16Pseudo_UPD,
|
|
|
|
ARM::VLD3q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
|
|
|
|
ARM::VLD3q16oddPseudo,
|
|
|
|
ARM::VLD3q32oddPseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vld4: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
|
|
|
|
ARM::VLD4d16Pseudo,
|
|
|
|
ARM::VLD4d32Pseudo,
|
|
|
|
ARM::VLD1d64QPseudo };
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
|
|
|
|
ARM::VLD4q16Pseudo_UPD,
|
|
|
|
ARM::VLD4q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
|
|
|
|
ARM::VLD4q16oddPseudo,
|
|
|
|
ARM::VLD4q32oddPseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
|
|
|
|
2009-09-01 12:26:28 +08:00
|
|
|
case Intrinsic::arm_neon_vld2lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
|
|
|
|
ARM::VLD2LNd16Pseudo,
|
|
|
|
ARM::VLD2LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
|
|
|
|
ARM::VLD2LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
|
2009-09-01 12:26:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vld3lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
|
|
|
|
ARM::VLD3LNd16Pseudo,
|
|
|
|
ARM::VLD3LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
|
|
|
|
ARM::VLD3LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
|
2009-09-01 12:26:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vld4lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
|
|
|
|
ARM::VLD4LNd16Pseudo,
|
|
|
|
ARM::VLD4LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
|
|
|
|
ARM::VLD4LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
|
2009-09-01 12:26:28 +08:00
|
|
|
}
|
|
|
|
|
2010-03-23 14:20:33 +08:00
|
|
|
case Intrinsic::arm_neon_vst1: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
|
|
|
|
ARM::VST1d32, ARM::VST1d64 };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
|
|
|
|
ARM::VST1q32, ARM::VST1q64 };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
|
2010-03-23 14:20:33 +08:00
|
|
|
}
|
|
|
|
|
2009-08-27 01:39:53 +08:00
|
|
|
case Intrinsic::arm_neon_vst2: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
|
|
|
|
ARM::VST2d32, ARM::VST1q64 };
|
|
|
|
static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
|
|
|
|
ARM::VST2q32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vst3: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
|
|
|
|
ARM::VST3d16Pseudo,
|
|
|
|
ARM::VST3d32Pseudo,
|
|
|
|
ARM::VST1d64TPseudo };
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
|
|
|
|
ARM::VST3q16Pseudo_UPD,
|
|
|
|
ARM::VST3q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
|
|
|
|
ARM::VST3q16oddPseudo,
|
|
|
|
ARM::VST3q32oddPseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vst4: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
|
|
|
|
ARM::VST4d16Pseudo,
|
|
|
|
ARM::VST4d32Pseudo,
|
|
|
|
ARM::VST1d64QPseudo };
|
|
|
|
static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
|
|
|
|
ARM::VST4q16Pseudo_UPD,
|
|
|
|
ARM::VST4q32Pseudo_UPD };
|
|
|
|
static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
|
|
|
|
ARM::VST4q16oddPseudo,
|
|
|
|
ARM::VST4q32oddPseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
2009-09-02 02:51:56 +08:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vst2lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
|
|
|
|
ARM::VST2LNd16Pseudo,
|
|
|
|
ARM::VST2LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
|
|
|
|
ARM::VST2LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
|
2009-09-02 02:51:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vst3lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
|
|
|
|
ARM::VST3LNd16Pseudo,
|
|
|
|
ARM::VST3LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
|
|
|
|
ARM::VST3LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
|
2009-09-02 02:51:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vst4lane: {
|
2012-05-24 13:17:00 +08:00
|
|
|
static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
|
|
|
|
ARM::VST4LNd16Pseudo,
|
|
|
|
ARM::VST4LNd32Pseudo };
|
|
|
|
static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
|
|
|
|
ARM::VST4LNq32Pseudo };
|
2011-02-08 01:43:21 +08:00
|
|
|
return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
|
2009-09-02 02:51:56 +08:00
|
|
|
}
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
2010-05-07 00:05:26 +08:00
|
|
|
break;
|
2009-08-27 01:39:53 +08:00
|
|
|
}
|
2010-05-06 02:28:36 +08:00
|
|
|
|
2010-07-07 07:36:25 +08:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN: {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vtbl2:
|
2012-03-06 03:33:30 +08:00
|
|
|
return SelectVTBL(N, false, 2, ARM::VTBL2);
|
2010-07-07 07:36:25 +08:00
|
|
|
case Intrinsic::arm_neon_vtbl3:
|
2010-09-14 07:55:10 +08:00
|
|
|
return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
|
2010-07-07 07:36:25 +08:00
|
|
|
case Intrinsic::arm_neon_vtbl4:
|
2010-09-14 07:55:10 +08:00
|
|
|
return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
|
2010-07-07 08:08:54 +08:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vtbx2:
|
2012-03-06 03:33:30 +08:00
|
|
|
return SelectVTBL(N, true, 2, ARM::VTBX2);
|
2010-07-07 08:08:54 +08:00
|
|
|
case Intrinsic::arm_neon_vtbx3:
|
2010-09-14 07:55:10 +08:00
|
|
|
return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
|
2010-07-07 08:08:54 +08:00
|
|
|
case Intrinsic::arm_neon_vtbx4:
|
2010-09-14 07:55:10 +08:00
|
|
|
return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
|
2010-07-07 07:36:25 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
case ARMISD::VTBL1: {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
SmallVector<SDValue, 6> Ops;
|
|
|
|
|
|
|
|
Ops.push_back(N->getOperand(0));
|
|
|
|
Ops.push_back(N->getOperand(1));
|
|
|
|
Ops.push_back(getAL(CurDAG)); // Predicate
|
|
|
|
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
}
|
|
|
|
case ARMISD::VTBL2: {
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// Form a REG_SEQUENCE to force register allocation.
|
|
|
|
SDValue V0 = N->getOperand(0);
|
|
|
|
SDValue V1 = N->getOperand(1);
|
2012-11-17 08:23:35 +08:00
|
|
|
SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
|
|
|
|
SmallVector<SDValue, 6> Ops;
|
|
|
|
Ops.push_back(RegSeq);
|
|
|
|
Ops.push_back(N->getOperand(2));
|
|
|
|
Ops.push_back(getAL(CurDAG)); // Predicate
|
|
|
|
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
|
2013-04-20 06:22:57 +08:00
|
|
|
return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
}
|
|
|
|
|
2010-05-07 00:05:26 +08:00
|
|
|
case ISD::CONCAT_VECTORS:
|
2010-05-06 02:28:36 +08:00
|
|
|
return SelectConcatVector(N);
|
2011-08-31 08:31:29 +08:00
|
|
|
|
2013-09-26 20:22:36 +08:00
|
|
|
case ISD::ATOMIC_LOAD:
|
|
|
|
if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
|
|
|
|
return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_LOAD_I64);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::ATOMIC_STORE:
|
|
|
|
if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
|
|
|
|
return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_STORE_I64);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::ATOMIC_LOAD_ADD:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_ADD_I8,
|
|
|
|
ARM::ATOMIC_LOAD_ADD_I16,
|
|
|
|
ARM::ATOMIC_LOAD_ADD_I32,
|
|
|
|
ARM::ATOMIC_LOAD_ADD_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_SUB:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_SUB_I8,
|
|
|
|
ARM::ATOMIC_LOAD_SUB_I16,
|
|
|
|
ARM::ATOMIC_LOAD_SUB_I32,
|
|
|
|
ARM::ATOMIC_LOAD_SUB_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_AND:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_AND_I8,
|
|
|
|
ARM::ATOMIC_LOAD_AND_I16,
|
|
|
|
ARM::ATOMIC_LOAD_AND_I32,
|
|
|
|
ARM::ATOMIC_LOAD_AND_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_OR:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_OR_I8,
|
|
|
|
ARM::ATOMIC_LOAD_OR_I16,
|
|
|
|
ARM::ATOMIC_LOAD_OR_I32,
|
|
|
|
ARM::ATOMIC_LOAD_OR_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_XOR:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_XOR_I8,
|
|
|
|
ARM::ATOMIC_LOAD_XOR_I16,
|
|
|
|
ARM::ATOMIC_LOAD_XOR_I32,
|
|
|
|
ARM::ATOMIC_LOAD_XOR_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_NAND:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_NAND_I8,
|
|
|
|
ARM::ATOMIC_LOAD_NAND_I16,
|
|
|
|
ARM::ATOMIC_LOAD_NAND_I32,
|
|
|
|
ARM::ATOMIC_LOAD_NAND_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_MIN:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_MIN_I8,
|
|
|
|
ARM::ATOMIC_LOAD_MIN_I16,
|
|
|
|
ARM::ATOMIC_LOAD_MIN_I32,
|
|
|
|
ARM::ATOMIC_LOAD_MIN_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_MAX:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_MAX_I8,
|
|
|
|
ARM::ATOMIC_LOAD_MAX_I16,
|
|
|
|
ARM::ATOMIC_LOAD_MAX_I32,
|
|
|
|
ARM::ATOMIC_LOAD_MAX_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_UMIN:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_UMIN_I8,
|
|
|
|
ARM::ATOMIC_LOAD_UMIN_I16,
|
|
|
|
ARM::ATOMIC_LOAD_UMIN_I32,
|
|
|
|
ARM::ATOMIC_LOAD_UMIN_I64);
|
|
|
|
case ISD::ATOMIC_LOAD_UMAX:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_LOAD_UMAX_I8,
|
|
|
|
ARM::ATOMIC_LOAD_UMAX_I16,
|
|
|
|
ARM::ATOMIC_LOAD_UMAX_I32,
|
|
|
|
ARM::ATOMIC_LOAD_UMAX_I64);
|
|
|
|
case ISD::ATOMIC_SWAP:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_SWAP_I8,
|
|
|
|
ARM::ATOMIC_SWAP_I16,
|
|
|
|
ARM::ATOMIC_SWAP_I32,
|
|
|
|
ARM::ATOMIC_SWAP_I64);
|
|
|
|
case ISD::ATOMIC_CMP_SWAP:
|
|
|
|
return SelectAtomic(N,
|
|
|
|
ARM::ATOMIC_CMP_SWAP_I8,
|
|
|
|
ARM::ATOMIC_CMP_SWAP_I16,
|
|
|
|
ARM::ATOMIC_CMP_SWAP_I32,
|
|
|
|
ARM::ATOMIC_CMP_SWAP_I64);
|
2010-05-06 02:28:36 +08:00
|
|
|
}
|
2008-12-11 05:54:21 +08:00
|
|
|
|
2010-01-05 09:24:18 +08:00
|
|
|
return SelectCode(N);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2006-05-15 06:18:28 +08:00
|
|
|
|
2013-02-15 02:10:21 +08:00
|
|
|
SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
|
|
|
|
std::vector<SDValue> AsmNodeOperands;
|
|
|
|
unsigned Flag, Kind;
|
|
|
|
bool Changed = false;
|
|
|
|
unsigned NumOps = N->getNumOperands();
|
|
|
|
|
|
|
|
// Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
|
|
|
|
// However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
|
|
|
|
// (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
|
|
|
|
// respectively. Since there is no constraint to explicitly specify a
|
2013-06-29 01:26:02 +08:00
|
|
|
// reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
|
|
|
|
// the 64-bit data may be referred by H, Q, R modifiers, so we still pack
|
|
|
|
// them into a GPRPair.
|
2013-02-15 02:10:21 +08:00
|
|
|
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl(N);
|
2013-06-29 01:26:02 +08:00
|
|
|
SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0);
|
2013-02-15 02:10:21 +08:00
|
|
|
|
2013-06-29 01:26:02 +08:00
|
|
|
SmallVector<bool, 8> OpChanged;
|
2013-02-15 02:10:21 +08:00
|
|
|
// Glue node will be appended late.
|
2013-06-29 01:26:02 +08:00
|
|
|
for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
|
2013-02-15 02:10:21 +08:00
|
|
|
SDValue op = N->getOperand(i);
|
|
|
|
AsmNodeOperands.push_back(op);
|
|
|
|
|
|
|
|
if (i < InlineAsm::Op_FirstOperand)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
|
|
|
|
Flag = C->getZExtValue();
|
|
|
|
Kind = InlineAsm::getKind(Flag);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
2013-07-09 03:52:51 +08:00
|
|
|
// Immediate operands to inline asm in the SelectionDAG are modeled with
|
|
|
|
// two operands. The first is a constant of value InlineAsm::Kind_Imm, and
|
|
|
|
// the second is a constant with the value of the immediate. If we get here
|
|
|
|
// and we have a Kind_Imm, skip the next operand, and continue.
|
2013-07-05 18:19:40 +08:00
|
|
|
if (Kind == InlineAsm::Kind_Imm) {
|
|
|
|
SDValue op = N->getOperand(++i);
|
|
|
|
AsmNodeOperands.push_back(op);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-06-29 01:26:02 +08:00
|
|
|
unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
|
|
|
|
if (NumRegs)
|
|
|
|
OpChanged.push_back(false);
|
|
|
|
|
|
|
|
unsigned DefIdx = 0;
|
|
|
|
bool IsTiedToChangedOp = false;
|
|
|
|
// If it's a use that is tied with a previous def, it has no
|
|
|
|
// reg class constraint.
|
|
|
|
if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
|
|
|
|
IsTiedToChangedOp = OpChanged[DefIdx];
|
|
|
|
|
2013-02-15 02:10:21 +08:00
|
|
|
if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
|
|
|
|
&& Kind != InlineAsm::Kind_RegDefEarlyClobber)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned RC;
|
|
|
|
bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
|
2013-06-29 01:26:02 +08:00
|
|
|
if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
|
|
|
|
|| NumRegs != 2)
|
2013-02-15 02:10:21 +08:00
|
|
|
continue;
|
|
|
|
|
2013-06-29 01:26:02 +08:00
|
|
|
assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
|
2013-02-15 02:10:21 +08:00
|
|
|
SDValue V0 = N->getOperand(i+1);
|
|
|
|
SDValue V1 = N->getOperand(i+2);
|
|
|
|
unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
|
|
|
|
unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
|
|
|
|
SDValue PairedReg;
|
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
|
|
|
|
if (Kind == InlineAsm::Kind_RegDef ||
|
|
|
|
Kind == InlineAsm::Kind_RegDefEarlyClobber) {
|
|
|
|
// Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
|
|
|
|
// the original GPRs.
|
|
|
|
|
|
|
|
unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
|
|
|
|
PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
|
|
|
|
SDValue Chain = SDValue(N,0);
|
|
|
|
|
|
|
|
SDNode *GU = N->getGluedUser();
|
|
|
|
SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
|
|
|
|
Chain.getValue(1));
|
|
|
|
|
|
|
|
// Extract values from a GPRPair reg and copy to the original GPR reg.
|
|
|
|
SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
|
|
|
|
RegCopy);
|
|
|
|
SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
|
|
|
|
RegCopy);
|
|
|
|
SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
|
|
|
|
RegCopy.getValue(1));
|
|
|
|
SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
|
|
|
|
|
|
|
|
// Update the original glue user.
|
|
|
|
std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
|
|
|
|
Ops.push_back(T1.getValue(1));
|
|
|
|
CurDAG->UpdateNodeOperands(GU, &Ops[0], Ops.size());
|
|
|
|
GU = T1.getNode();
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
|
|
|
|
// GPRPair and then pass the GPRPair to the inline asm.
|
|
|
|
SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
|
|
|
|
|
|
|
|
// As REG_SEQ doesn't take RegisterSDNode, we copy them first.
|
|
|
|
SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
|
|
|
|
Chain.getValue(1));
|
|
|
|
SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
|
|
|
|
T0.getValue(1));
|
|
|
|
SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
|
|
|
|
|
|
|
|
// Copy REG_SEQ into a GPRPair-typed VR and replace the original two
|
|
|
|
// i32 VRs of inline asm with it.
|
|
|
|
unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
|
|
|
|
PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
|
|
|
|
Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
|
|
|
|
|
|
|
|
AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
|
|
|
|
Glue = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
|
|
|
|
if(PairedReg.getNode()) {
|
2013-06-29 01:26:02 +08:00
|
|
|
OpChanged[OpChanged.size() -1 ] = true;
|
2013-02-15 02:10:21 +08:00
|
|
|
Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
|
2013-08-19 02:06:03 +08:00
|
|
|
if (IsTiedToChangedOp)
|
|
|
|
Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
|
|
|
|
else
|
|
|
|
Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
|
2013-02-15 02:10:21 +08:00
|
|
|
// Replace the current flag.
|
|
|
|
AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
|
|
|
|
Flag, MVT::i32);
|
|
|
|
// Add the new register node and skip the original two GPRs.
|
|
|
|
AsmNodeOperands.push_back(PairedReg);
|
|
|
|
// Skip the next two GPRs.
|
|
|
|
i += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-29 01:26:02 +08:00
|
|
|
if (Glue.getNode())
|
|
|
|
AsmNodeOperands.push_back(Glue);
|
2013-02-15 02:10:21 +08:00
|
|
|
if (!Changed)
|
|
|
|
return NULL;
|
|
|
|
|
2013-05-25 10:42:55 +08:00
|
|
|
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
|
2013-02-15 02:10:21 +08:00
|
|
|
CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0],
|
|
|
|
AsmNodeOperands.size());
|
|
|
|
New->setNodeId(-1);
|
|
|
|
return New.getNode();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-05-19 13:53:42 +08:00
|
|
|
bool ARMDAGToDAGISel::
|
|
|
|
SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
|
|
|
|
std::vector<SDValue> &OutOps) {
|
|
|
|
assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
|
2009-10-14 04:50:28 +08:00
|
|
|
// Require the address to be in a register. That is safe for all ARM
|
|
|
|
// variants and it is hard to do anything much smarter without knowing
|
|
|
|
// how the operand is used.
|
|
|
|
OutOps.push_back(Op);
|
2009-05-19 13:53:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-05-15 06:18:28 +08:00
|
|
|
/// createARMISelDag - This pass converts a legalized DAG into a
|
|
|
|
/// ARM-specific DAG, ready for instruction scheduling.
|
|
|
|
///
|
2009-09-28 22:30:20 +08:00
|
|
|
FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
|
|
|
|
CodeGenOpt::Level OptLevel) {
|
|
|
|
return new ARMDAGToDAGISel(TM, OptLevel);
|
2006-05-15 06:18:28 +08:00
|
|
|
}
|