2007-01-19 15:51:42 +08:00
|
|
|
//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that ARM uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef ARMISELLOWERING_H
|
|
|
|
#define ARMISELLOWERING_H
|
|
|
|
|
2012-03-17 15:33:42 +08:00
|
|
|
#include "ARM.h"
|
2007-11-06 07:12:20 +08:00
|
|
|
#include "ARMSubtarget.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2010-07-22 06:26:11 +08:00
|
|
|
#include "llvm/CodeGen/FastISel.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
class ARMConstantPoolValue;
|
|
|
|
|
|
|
|
namespace ARMISD {
|
|
|
|
// ARM Specific DAG Nodes
|
|
|
|
enum NodeType {
|
2009-05-14 06:32:43 +08:00
|
|
|
// Start the numbering where the builtin ops and target ops leave off.
|
2008-09-24 02:42:32 +08:00
|
|
|
FIRST_NUMBER = ISD::BUILTIN_OP_END,
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
|
|
|
|
// TargetExternalSymbol, and TargetGlobalAddress.
|
2011-01-22 02:55:51 +08:00
|
|
|
WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in
|
|
|
|
// DYN mode.
|
2011-01-17 16:03:18 +08:00
|
|
|
WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
|
|
|
|
// PIC mode.
|
2007-01-19 15:51:42 +08:00
|
|
|
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2012-06-01 10:44:42 +08:00
|
|
|
// Add pseudo op to model memcpy for struct byval.
|
|
|
|
COPY_STRUCT_BYVAL,
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
CALL, // Function call.
|
2007-06-20 05:05:09 +08:00
|
|
|
CALL_PRED, // Function call that's predicable.
|
2007-01-19 15:51:42 +08:00
|
|
|
CALL_NOLINK, // Function call with branch not branch-and-link.
|
|
|
|
tCALL, // Thumb function call.
|
|
|
|
BRCOND, // Conditional branch.
|
|
|
|
BR_JT, // Jumptable branch.
|
2009-07-29 10:18:14 +08:00
|
|
|
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
|
2007-01-19 15:51:42 +08:00
|
|
|
RET_FLAG, // Return with a flag operand.
|
|
|
|
|
|
|
|
PIC_ADD, // Add with a PC operand and a PIC label.
|
|
|
|
|
|
|
|
CMP, // ARM compare instructions.
|
2012-06-11 16:07:26 +08:00
|
|
|
CMN, // ARM CMN instructions.
|
2009-06-29 23:33:01 +08:00
|
|
|
CMPZ, // ARM compare that sets only Z flag.
|
2007-01-19 15:51:42 +08:00
|
|
|
CMPFP, // ARM VFP compare instruction, sets FPSCR.
|
|
|
|
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
|
|
|
|
FMSTAT, // ARM fmstat instruction.
|
Optimize a couple of common patterns involving conditional moves where the false
value is zero. Instead of a cmov + op, issue an conditional op instead. e.g.
cmp r9, r4
mov r4, #0
moveq r4, #1
orr lr, lr, r4
should be:
cmp r9, r4
orreq lr, lr, #1
That is, optimize (or x, (cmov 0, y, cond)) to (or.cond x, y). Similarly extend
this to xor as well as (and x, (cmov -1, y, cond)) => (and.cond x, y).
It's possible to extend this to ADD and SUB but I don't think they are common.
rdar://8659097
llvm-svn: 151224
2012-02-23 09:19:06 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
CMOV, // ARM conditional move instructions.
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2010-07-14 03:27:42 +08:00
|
|
|
BCC_i64,
|
|
|
|
|
2010-01-19 03:58:49 +08:00
|
|
|
RBIT, // ARM bitreverse instruction
|
|
|
|
|
2010-03-20 06:51:32 +08:00
|
|
|
FTOSI, // FP to sint within a FP register.
|
|
|
|
FTOUI, // FP to uint within a FP register.
|
|
|
|
SITOF, // sint to FP within a FP register.
|
|
|
|
UITOF, // uint to FP within a FP register.
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
|
|
|
|
SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
|
|
|
|
RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2011-08-30 09:34:54 +08:00
|
|
|
ADDC, // Add with carry
|
|
|
|
ADDE, // Add using carry
|
|
|
|
SUBC, // Sub with carry
|
|
|
|
SUBE, // Sub using carry
|
|
|
|
|
2009-11-09 08:11:35 +08:00
|
|
|
VMOVRRD, // double to two gprs.
|
|
|
|
VMOVDRR, // Two gprs to double.
|
2007-04-27 21:54:47 +08:00
|
|
|
|
2010-10-20 07:27:08 +08:00
|
|
|
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
|
|
|
|
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
|
2009-05-13 07:59:14 +08:00
|
|
|
|
2010-06-04 05:09:53 +08:00
|
|
|
TC_RETURN, // Tail call return pseudo.
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
THREAD_POINTER,
|
|
|
|
|
2009-08-07 08:34:42 +08:00
|
|
|
DYN_ALLOC, // Dynamic allocation on the stack.
|
|
|
|
|
2010-10-30 08:54:37 +08:00
|
|
|
MEMBARRIER_MCR, // Memory barrier (MCR)
|
2010-11-03 14:34:55 +08:00
|
|
|
|
|
|
|
PRELOAD, // Preload
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
VCEQ, // Vector compare equal.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCEQZ, // Vector compare equal to zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGE, // Vector compare greater than or equal.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCGEZ, // Vector compare greater than or equal to zero.
|
|
|
|
VCLEZ, // Vector compare less than or equal to zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGEU, // Vector compare unsigned greater than or equal.
|
|
|
|
VCGT, // Vector compare greater than.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCGTZ, // Vector compare greater than zero.
|
|
|
|
VCLTZ, // Vector compare less than zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGTU, // Vector compare unsigned greater than.
|
|
|
|
VTST, // Vector test bits.
|
|
|
|
|
|
|
|
// Vector shift by immediate:
|
|
|
|
VSHL, // ...left
|
|
|
|
VSHRs, // ...right (signed)
|
|
|
|
VSHRu, // ...right (unsigned)
|
|
|
|
VSHLLs, // ...left long (signed)
|
|
|
|
VSHLLu, // ...left long (unsigned)
|
|
|
|
VSHLLi, // ...left long (with maximum shift count)
|
|
|
|
VSHRN, // ...right narrow
|
|
|
|
|
|
|
|
// Vector rounding shift by immediate:
|
|
|
|
VRSHRs, // ...right (signed)
|
|
|
|
VRSHRu, // ...right (unsigned)
|
|
|
|
VRSHRN, // ...right narrow
|
|
|
|
|
|
|
|
// Vector saturating shift by immediate:
|
|
|
|
VQSHLs, // ...left (signed)
|
|
|
|
VQSHLu, // ...left (unsigned)
|
|
|
|
VQSHLsu, // ...left (signed to unsigned)
|
|
|
|
VQSHRNs, // ...right narrow (signed)
|
|
|
|
VQSHRNu, // ...right narrow (unsigned)
|
|
|
|
VQSHRNsu, // ...right narrow (signed to unsigned)
|
|
|
|
|
|
|
|
// Vector saturating rounding shift by immediate:
|
|
|
|
VQRSHRNs, // ...right narrow (signed)
|
|
|
|
VQRSHRNu, // ...right narrow (unsigned)
|
|
|
|
VQRSHRNsu, // ...right narrow (signed to unsigned)
|
|
|
|
|
|
|
|
// Vector shift and insert:
|
|
|
|
VSLI, // ...left
|
|
|
|
VSRI, // ...right
|
|
|
|
|
|
|
|
// Vector get lane (VMOV scalar to ARM core register)
|
|
|
|
// (These are used for 8- and 16-bit element types only.)
|
|
|
|
VGETLANEu, // zero-extend vector extract element
|
|
|
|
VGETLANEs, // sign-extend vector extract element
|
|
|
|
|
2010-07-14 14:31:50 +08:00
|
|
|
// Vector move immediate and move negated immediate:
|
2010-07-14 05:16:48 +08:00
|
|
|
VMOVIMM,
|
2010-07-14 14:31:50 +08:00
|
|
|
VMVNIMM,
|
|
|
|
|
2011-11-15 10:12:34 +08:00
|
|
|
// Vector move f32 immediate:
|
|
|
|
VMOVFPIMM,
|
|
|
|
|
2010-07-14 14:31:50 +08:00
|
|
|
// Vector duplicate:
|
2009-08-14 13:13:08 +08:00
|
|
|
VDUP,
|
2009-08-14 13:08:32 +08:00
|
|
|
VDUPLANE,
|
2009-08-04 08:36:16 +08:00
|
|
|
|
2009-08-13 06:31:50 +08:00
|
|
|
// Vector shuffles:
|
2009-08-20 01:03:43 +08:00
|
|
|
VEXT, // extract
|
2009-08-13 06:31:50 +08:00
|
|
|
VREV64, // reverse elements within 64-bit doublewords
|
|
|
|
VREV32, // reverse elements within 32-bit words
|
2009-08-21 20:41:24 +08:00
|
|
|
VREV16, // reverse elements within 16-bit halfwords
|
2009-08-22 04:54:19 +08:00
|
|
|
VZIP, // zip (interleave)
|
|
|
|
VUZP, // unzip (deinterleave)
|
2010-02-18 14:05:53 +08:00
|
|
|
VTRN, // transpose
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
VTBL1, // 1-register shuffle with mask
|
|
|
|
VTBL2, // 2-register shuffle with mask
|
2010-02-18 14:05:53 +08:00
|
|
|
|
2010-09-02 07:50:19 +08:00
|
|
|
// Vector multiply long:
|
|
|
|
VMULLs, // ...signed
|
|
|
|
VMULLu, // ...unsigned
|
|
|
|
|
2012-09-04 22:37:49 +08:00
|
|
|
UMLAL, // 64bit Unsigned Accumulate Multiply
|
|
|
|
SMLAL, // 64bit Signed Accumulate Multiply
|
|
|
|
|
2010-06-04 08:04:02 +08:00
|
|
|
// Operands of the standard BUILD_VECTOR node are not legalized, which
|
|
|
|
// is fine if BUILD_VECTORs are always lowered to shuffles or other
|
|
|
|
// operations, but for ARM some BUILD_VECTORs are legal as-is and their
|
|
|
|
// operands need to be legalized. Define an ARM-specific version of
|
|
|
|
// BUILD_VECTOR for this purpose.
|
|
|
|
BUILD_VECTOR,
|
|
|
|
|
2010-02-18 14:05:53 +08:00
|
|
|
// Floating-point max and min:
|
|
|
|
FMAX,
|
2010-07-17 07:05:05 +08:00
|
|
|
FMIN,
|
2013-08-23 20:01:13 +08:00
|
|
|
VMAXNM,
|
|
|
|
VMINNM,
|
2010-07-17 07:05:05 +08:00
|
|
|
|
|
|
|
// Bit-field insert
|
2010-11-04 06:44:51 +08:00
|
|
|
BFI,
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2010-11-04 06:44:51 +08:00
|
|
|
// Vector OR with immediate
|
2010-11-06 03:27:46 +08:00
|
|
|
VORRIMM,
|
|
|
|
// Vector AND with NOT of immediate
|
2010-11-28 14:51:26 +08:00
|
|
|
VBICIMM,
|
|
|
|
|
2011-03-31 07:01:21 +08:00
|
|
|
// Vector bitwise select
|
|
|
|
VBSL,
|
|
|
|
|
2010-11-28 14:51:26 +08:00
|
|
|
// Vector load N-element structure to all lanes:
|
|
|
|
VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
|
|
|
VLD3DUP,
|
2011-02-08 01:43:21 +08:00
|
|
|
VLD4DUP,
|
|
|
|
|
|
|
|
// NEON loads with post-increment base updates:
|
|
|
|
VLD1_UPD,
|
|
|
|
VLD2_UPD,
|
|
|
|
VLD3_UPD,
|
|
|
|
VLD4_UPD,
|
|
|
|
VLD2LN_UPD,
|
|
|
|
VLD3LN_UPD,
|
|
|
|
VLD4LN_UPD,
|
|
|
|
VLD2DUP_UPD,
|
|
|
|
VLD3DUP_UPD,
|
|
|
|
VLD4DUP_UPD,
|
|
|
|
|
|
|
|
// NEON stores with post-increment base updates:
|
|
|
|
VST1_UPD,
|
|
|
|
VST2_UPD,
|
|
|
|
VST3_UPD,
|
|
|
|
VST4_UPD,
|
|
|
|
VST2LN_UPD,
|
|
|
|
VST3LN_UPD,
|
2013-09-26 20:22:36 +08:00
|
|
|
VST4LN_UPD
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
/// Define some predicates that are used for node matching.
|
|
|
|
namespace ARM {
|
2010-07-17 07:05:05 +08:00
|
|
|
bool isBitFieldInvertedMask(unsigned v);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-05-21 00:30:25 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
2007-03-20 08:30:56 +08:00
|
|
|
// ARMTargetLowering - ARM Implementation of the TargetLowering interface
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
class ARMTargetLowering : public TargetLowering {
|
|
|
|
public:
|
2007-08-03 05:21:54 +08:00
|
|
|
explicit ARMTargetLowering(TargetMachine &TM);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2012-11-16 00:51:49 +08:00
|
|
|
virtual unsigned getJumpTableEncoding() const;
|
2010-07-20 01:20:38 +08:00
|
|
|
|
2010-04-17 23:26:15 +08:00
|
|
|
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
|
2008-12-01 19:39:25 +08:00
|
|
|
|
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
///
|
|
|
|
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
|
2010-04-17 23:26:15 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2008-12-01 19:39:25 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
virtual const char *getTargetNodeName(unsigned Opcode) const;
|
|
|
|
|
2012-09-02 20:10:19 +08:00
|
|
|
virtual bool isSelectSupported(SelectSupportKind Kind) const {
|
|
|
|
// ARM does not support scalar condition selects on vectors.
|
|
|
|
return (Kind != ScalarCondVectorVal);
|
|
|
|
}
|
|
|
|
|
2011-09-07 03:07:46 +08:00
|
|
|
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
|
2013-05-18 08:21:46 +08:00
|
|
|
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
|
2011-09-07 03:07:46 +08:00
|
|
|
|
2010-05-01 08:01:06 +08:00
|
|
|
virtual MachineBasicBlock *
|
|
|
|
EmitInstrWithCustomInserter(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *MBB) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2011-08-31 03:09:48 +08:00
|
|
|
virtual void
|
|
|
|
AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
|
|
|
|
|
2011-07-13 08:42:17 +08:00
|
|
|
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
|
2011-02-02 09:06:55 +08:00
|
|
|
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
|
|
|
|
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
|
|
|
|
|
2009-08-16 05:21:19 +08:00
|
|
|
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
|
2012-12-11 07:21:26 +08:00
|
|
|
/// unaligned memory accesses of the specified type. Returns whether it
|
|
|
|
/// is "fast" by reference in the second argument.
|
|
|
|
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
|
2009-08-16 05:21:19 +08:00
|
|
|
|
2011-11-03 06:52:45 +08:00
|
|
|
virtual EVT getOptimalMemOpType(uint64_t Size,
|
|
|
|
unsigned DstAlign, unsigned SrcAlign,
|
2012-12-12 10:34:41 +08:00
|
|
|
bool IsMemset, bool ZeroMemset,
|
2011-11-03 06:52:45 +08:00
|
|
|
bool MemcpyStrSrc,
|
|
|
|
MachineFunction &MF) const;
|
|
|
|
|
2012-12-07 07:15:36 +08:00
|
|
|
using TargetLowering::isZExtFree;
|
2012-12-07 03:13:27 +08:00
|
|
|
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
|
|
|
|
|
2013-08-06 21:58:03 +08:00
|
|
|
virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
|
|
|
|
|
|
|
|
|
2007-03-31 07:15:24 +08:00
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2011-07-18 12:54:35 +08:00
|
|
|
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
|
2009-08-15 04:09:37 +08:00
|
|
|
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2009-11-12 03:05:52 +08:00
|
|
|
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
2010-06-03 05:53:11 +08:00
|
|
|
/// icmp immediate, that is the target has icmp instructions which can
|
|
|
|
/// compare a register against the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
2009-11-12 15:13:11 +08:00
|
|
|
virtual bool isLegalICmpImmediate(int64_t Imm) const;
|
2009-11-12 03:05:52 +08:00
|
|
|
|
2011-05-03 08:46:49 +08:00
|
|
|
/// isLegalAddImmediate - Return true if the specified immediate is legal
|
|
|
|
/// add immediate, that is the target has add instructions which can
|
|
|
|
/// add a register and the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
|
|
|
virtual bool isLegalAddImmediate(int64_t Imm) const;
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
2008-07-28 05:46:04 +08:00
|
|
|
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|
|
|
SDValue &Offset,
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-16 00:29:45 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
/// getPostIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if this node can be
|
|
|
|
/// combined with a load / store to form a post-indexed load / store.
|
|
|
|
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Base, SDValue &Offset,
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-16 00:29:45 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
|
2009-05-14 06:32:43 +08:00
|
|
|
APInt &KnownZero,
|
2008-02-13 08:35:47 +08:00
|
|
|
APInt &KnownOne,
|
2007-06-22 22:59:07 +08:00
|
|
|
const SelectionDAG &DAG,
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned Depth) const;
|
2009-08-16 05:21:19 +08:00
|
|
|
|
|
|
|
|
2011-01-08 09:24:27 +08:00
|
|
|
virtual bool ExpandInlineAsm(CallInst *CI) const;
|
|
|
|
|
2007-03-25 10:14:49 +08:00
|
|
|
ConstraintType getConstraintType(const std::string &Constraint) const;
|
2010-10-30 01:29:13 +08:00
|
|
|
|
|
|
|
/// Examine constraint string and operand type and determine a weight value.
|
|
|
|
/// The operand object must already have been set up with the operand type.
|
|
|
|
ConstraintWeight getSingleConstraintMatchWeight(
|
|
|
|
AsmOperandInfo &info, const char *constraint) const;
|
|
|
|
|
2009-05-14 06:32:43 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*>
|
2007-01-19 15:51:42 +08:00
|
|
|
getRegForInlineAsmConstraint(const std::string &Constraint,
|
2013-06-23 02:37:38 +08:00
|
|
|
MVT VT) const;
|
2007-11-06 07:12:20 +08:00
|
|
|
|
2009-04-02 01:58:54 +08:00
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
|
|
|
|
/// true it means one of the asm constraint of the inline asm instruction
|
|
|
|
/// being processed is 'm'.
|
|
|
|
virtual void LowerAsmOperandForConstraint(SDValue Op,
|
2011-06-03 07:16:42 +08:00
|
|
|
std::string &Constraint,
|
2009-04-02 01:58:54 +08:00
|
|
|
std::vector<SDValue> &Ops,
|
|
|
|
SelectionDAG &DAG) const;
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2010-05-12 00:21:03 +08:00
|
|
|
const ARMSubtarget* getSubtarget() const {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
return Subtarget;
|
2007-11-06 07:12:20 +08:00
|
|
|
}
|
|
|
|
|
2010-05-15 10:18:07 +08:00
|
|
|
/// getRegClassFor - Return the register class that should be used for the
|
|
|
|
/// specified value type.
|
2012-12-13 14:34:11 +08:00
|
|
|
virtual const TargetRegisterClass *getRegClassFor(MVT VT) const;
|
2010-05-15 10:18:07 +08:00
|
|
|
|
2010-07-25 05:52:08 +08:00
|
|
|
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
|
|
|
|
/// be used for loads / stores from the global.
|
|
|
|
virtual unsigned getMaximalGlobalOffset() const;
|
|
|
|
|
2010-07-22 06:26:11 +08:00
|
|
|
/// createFastISel - This method returns a target specific FastISel object,
|
|
|
|
/// or null if the target does not support "fast" ISel.
|
2012-08-03 12:06:28 +08:00
|
|
|
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo) const;
|
2010-07-22 06:26:11 +08:00
|
|
|
|
2010-05-21 07:26:43 +08:00
|
|
|
Sched::Preference getSchedulingPreference(SDNode *N) const;
|
|
|
|
|
2009-08-21 20:40:07 +08:00
|
|
|
bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
|
2009-09-24 03:04:09 +08:00
|
|
|
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
|
2009-10-28 09:44:26 +08:00
|
|
|
|
|
|
|
/// isFPImmLegal - Returns true if the target can instruction select the
|
|
|
|
/// specified FP immediate natively. If false, the legalizer will
|
|
|
|
/// materialize the FP immediate as a load from a constant pool.
|
|
|
|
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
|
|
|
|
|
2010-09-22 01:56:22 +08:00
|
|
|
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|
|
|
const CallInst &I,
|
|
|
|
unsigned Intrinsic) const;
|
2010-07-20 06:15:08 +08:00
|
|
|
protected:
|
2010-07-21 14:09:07 +08:00
|
|
|
std::pair<const TargetRegisterClass*, uint8_t>
|
2012-12-19 19:30:36 +08:00
|
|
|
findRepresentativeClass(MVT VT) const;
|
2010-07-20 06:15:08 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
private:
|
|
|
|
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const ARMSubtarget *Subtarget;
|
|
|
|
|
2010-07-24 06:39:59 +08:00
|
|
|
const TargetRegisterInfo *RegInfo;
|
|
|
|
|
2010-09-10 09:29:16 +08:00
|
|
|
const InstrItineraryData *Itins;
|
|
|
|
|
2009-07-14 02:11:36 +08:00
|
|
|
/// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
|
2007-01-19 15:51:42 +08:00
|
|
|
///
|
|
|
|
unsigned ARMPCLabelIndex;
|
|
|
|
|
2012-08-12 11:16:37 +08:00
|
|
|
void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
|
|
|
|
void addDRTypeForNEON(MVT VT);
|
|
|
|
void addQRTypeForNEON(MVT VT);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
|
2013-05-25 10:42:55 +08:00
|
|
|
void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue Chain, SDValue &Arg,
|
|
|
|
RegsToPassVector &RegsToPass,
|
|
|
|
CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &StackPtr,
|
2013-07-14 12:42:23 +08:00
|
|
|
SmallVectorImpl<SDValue> &MemOpChains,
|
2010-04-17 23:26:15 +08:00
|
|
|
ISD::ArgFlagsTy Flags) const;
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue &Root, SelectionDAG &DAG,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl) const;
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2010-06-03 05:53:11 +08:00
|
|
|
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
|
|
|
|
bool isVarArg) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SelectionDAG &DAG,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const CCValAssign &VA,
|
2010-04-17 23:26:15 +08:00
|
|
|
ISD::ArgFlagsTy Flags) const;
|
2010-05-27 04:22:18 +08:00
|
|
|
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
|
2010-05-22 09:06:18 +08:00
|
|
|
SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
|
2010-02-09 07:22:00 +08:00
|
|
|
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
|
2010-04-17 23:26:15 +08:00
|
|
|
const ARMSubtarget *Subtarget) const;
|
|
|
|
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
|
2010-04-17 23:26:15 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
|
2012-05-04 17:40:39 +08:00
|
|
|
SelectionDAG &DAG,
|
|
|
|
TLSModel::Model model) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
|
Consider this code snippet:
float t1(int argc) {
return (argc == 1123) ? 1.234f : 2.38213f;
}
We would generate truly awful code on ARM (those with a weak stomach should look
away):
_t1:
movw r1, #1123
movs r2, #1
movs r3, #0
cmp r0, r1
mov.w r0, #0
it eq
moveq r0, r2
movs r1, #4
cmp r0, #0
it ne
movne r3, r1
adr r0, #LCPI1_0
ldr r0, [r0, r3]
bx lr
The problem was that legalization was creating a cascade of SELECT_CC nodes, for
for the comparison of "argc == 1123" which was fed into a SELECT node for the ?:
statement which was itself converted to a SELECT_CC node. This is because the
ARM back-end doesn't have custom lowering for SELECT nodes, so it used the
default "Expand".
I added a fairly simple "LowerSELECT" to the ARM back-end. It takes care of this
testcase, but can obviously be expanded to include more cases.
Now we generate this, which looks optimal to me:
_t1:
movw r1, #1123
movs r2, #0
cmp r0, r1
adr r0, #LCPI0_0
it eq
moveq r2, #4
ldr r0, [r0, r2]
bx lr
.align 2
LCPI0_0:
.long 1075344593 @ float 2.382130e+00
.long 1067316150 @ float 1.234000e+00
llvm-svn: 110799
2010-08-11 16:43:16 +08:00
|
|
|
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
|
2010-07-08 10:08:50 +08:00
|
|
|
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
|
2010-05-22 09:47:14 +08:00
|
|
|
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
|
2010-08-04 05:31:55 +08:00
|
|
|
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
|
2012-03-16 02:49:02 +08:00
|
|
|
SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) const;
|
2011-04-23 11:24:11 +08:00
|
|
|
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
2011-01-08 05:37:30 +08:00
|
|
|
const ARMSubtarget *ST) const;
|
2013-07-16 17:32:17 +08:00
|
|
|
SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
|
2011-01-08 05:37:30 +08:00
|
|
|
|
2013-07-10 09:54:24 +08:00
|
|
|
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
|
|
|
|
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
|
|
|
|
/// expanded to FMAs when this method returns true, otherwise fmuladd is
|
|
|
|
/// expanded to fmul + fadd.
|
|
|
|
///
|
|
|
|
/// ARM supports both fused and unfused multiply-add operations; we already
|
2013-07-10 09:57:39 +08:00
|
|
|
/// lower a pair of fmul and fadd to the latter so it's not clear that there
|
2013-07-10 09:54:24 +08:00
|
|
|
/// would be a gain or that the gain would be worthwhile enough to risk
|
|
|
|
/// correctness bugs.
|
|
|
|
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
|
|
|
|
|
2011-01-08 05:37:30 +08:00
|
|
|
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SelectionDAG &DAG,
|
2013-04-20 13:14:40 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals,
|
|
|
|
bool isThisReturn, SDValue ThisVal) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
|
|
|
virtual SDValue
|
|
|
|
LowerFormalArguments(SDValue Chain,
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SelectionDAG &DAG,
|
2010-04-17 23:26:15 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2013-04-30 15:19:58 +08:00
|
|
|
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SDValue &Chain,
|
2013-04-30 15:19:58 +08:00
|
|
|
const Value *OrigArg,
|
2013-05-05 15:48:36 +08:00
|
|
|
unsigned InRegsParamRecordIdx,
|
2013-04-30 15:19:58 +08:00
|
|
|
unsigned OffsetFromOrigArg,
|
|
|
|
unsigned ArgOffset,
|
2013-05-20 16:01:34 +08:00
|
|
|
unsigned ArgSize,
|
2013-04-30 15:19:58 +08:00
|
|
|
bool ForceMutable) const;
|
|
|
|
|
2011-04-21 00:47:52 +08:00
|
|
|
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SDValue &Chain,
|
2012-10-19 16:23:06 +08:00
|
|
|
unsigned ArgOffset,
|
2013-04-30 15:19:58 +08:00
|
|
|
bool ForceMutable = false) const;
|
2011-04-21 00:47:52 +08:00
|
|
|
|
|
|
|
void computeRegArea(CCState &CCInfo, MachineFunction &MF,
|
2013-05-05 15:48:36 +08:00
|
|
|
unsigned InRegsParamRecordIdx,
|
2013-05-20 16:01:34 +08:00
|
|
|
unsigned ArgSize,
|
2013-04-30 15:19:58 +08:00
|
|
|
unsigned &ArgRegsSize,
|
|
|
|
unsigned &ArgRegsSaveSize) const;
|
2011-04-21 00:47:52 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
virtual SDValue
|
2012-05-26 00:35:28 +08:00
|
|
|
LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
2010-04-17 23:26:15 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2011-03-01 01:17:53 +08:00
|
|
|
/// HandleByVal - Target-specific cleanup for ByVal support.
|
Issue:
Stack is formed improperly for long structures passed as byval arguments for
EABI mode.
If we took AAPCS reference, we can found the next statements:
A: "If the argument requires double-word alignment (8-byte), the NCRN (Next
Core Register Number) is rounded up to the next even register number." (5.5
Parameter Passing, Stage C, C.3).
B: "The alignment of an aggregate shall be the alignment of its most-aligned
component." (4.3 Composite Types, 4.3.1 Aggregates).
So if we have structure with doubles (9 double fields) and 3 Core unused
registers (r1, r2, r3): caller should use r2 and r3 registers only.
Currently r1,r2,r3 set is used, but it is invalid.
Callee VA routine should also use r2 and r3 regs only. All is ok here. This
behaviour is guessed by rounding up SP address with ADD+BFC operations.
Fix:
Main fix is in ARMTargetLowering::HandleByVal. If we detected AAPCS mode and
8 byte alignment, we waste odd registers then.
P.S.:
I also improved LDRB_POST_IMM regression test. Since ldrb instruction will
not generated by current regression test after this patch.
llvm-svn: 166018
2012-10-16 15:16:47 +08:00
|
|
|
virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
|
2011-03-01 01:17:53 +08:00
|
|
|
|
2010-06-04 05:09:53 +08:00
|
|
|
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
|
|
|
/// for tail call optimization. Targets which want to do tail call
|
|
|
|
/// optimization should implement this function.
|
|
|
|
bool IsEligibleForTailCallOptimization(SDValue Callee,
|
|
|
|
CallingConv::ID CalleeCC,
|
|
|
|
bool isVarArg,
|
|
|
|
bool isCalleeStructRet,
|
|
|
|
bool isCallerStructRet,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2010-07-07 23:54:55 +08:00
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
2010-06-04 05:09:53 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
SelectionDAG& DAG) const;
|
2012-11-29 04:55:10 +08:00
|
|
|
|
|
|
|
virtual bool CanLowerReturn(CallingConv::ID CallConv,
|
|
|
|
MachineFunction &MF, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
LLVMContext &Context) const;
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
virtual SDValue
|
|
|
|
LowerReturn(SDValue Chain,
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2010-07-07 23:54:55 +08:00
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDLoc dl, SelectionDAG &DAG) const;
|
2009-11-12 15:13:11 +08:00
|
|
|
|
2012-04-10 09:51:00 +08:00
|
|
|
virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
|
2010-12-01 07:55:39 +08:00
|
|
|
|
2011-03-21 09:19:09 +08:00
|
|
|
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
|
|
|
|
|
2009-11-12 15:13:11 +08:00
|
|
|
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
2013-05-25 10:42:55 +08:00
|
|
|
SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
|
2010-07-14 03:27:42 +08:00
|
|
|
SDValue getVFPCmp(SDValue LHS, SDValue RHS,
|
2013-05-25 10:42:55 +08:00
|
|
|
SelectionDAG &DAG, SDLoc dl) const;
|
2011-03-08 09:17:20 +08:00
|
|
|
SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
|
2010-07-14 03:27:42 +08:00
|
|
|
|
|
|
|
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
|
2009-12-11 09:42:04 +08:00
|
|
|
|
2009-12-12 09:40:06 +08:00
|
|
|
MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB,
|
|
|
|
unsigned Size) const;
|
|
|
|
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB,
|
|
|
|
unsigned Size,
|
|
|
|
unsigned BinOpcode) const;
|
2011-08-31 08:31:29 +08:00
|
|
|
MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB,
|
|
|
|
unsigned Op1,
|
|
|
|
unsigned Op2,
|
2011-09-01 01:52:22 +08:00
|
|
|
bool NeedsCarry = false,
|
2012-11-29 22:41:25 +08:00
|
|
|
bool IsCmpxchg = false,
|
|
|
|
bool IsMinMax = false,
|
|
|
|
ARMCC::CondCodes CC = ARMCC::AL) const;
|
2011-04-27 03:44:18 +08:00
|
|
|
MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB,
|
|
|
|
unsigned Size,
|
|
|
|
bool signExtend,
|
|
|
|
ARMCC::CondCodes Cond) const;
|
2013-09-26 20:22:36 +08:00
|
|
|
MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB) const;
|
2009-12-11 09:42:04 +08:00
|
|
|
|
2011-10-07 06:18:16 +08:00
|
|
|
void SetupEntryBlockForSjLj(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock *DispatchBB, int FI) const;
|
|
|
|
|
2011-10-04 05:25:38 +08:00
|
|
|
MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *MBB) const;
|
|
|
|
|
2011-04-23 11:55:32 +08:00
|
|
|
bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
|
2012-06-02 03:33:18 +08:00
|
|
|
|
|
|
|
MachineBasicBlock *EmitStructByval(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *MBB) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2010-11-06 05:57:54 +08:00
|
|
|
enum NEONModImmType {
|
|
|
|
VMOVModImm,
|
|
|
|
VMVNModImm,
|
|
|
|
OtherModImm
|
|
|
|
};
|
2011-04-23 11:24:11 +08:00
|
|
|
|
|
|
|
|
2010-07-22 06:26:11 +08:00
|
|
|
namespace ARM {
|
2012-08-03 12:06:28 +08:00
|
|
|
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo);
|
2010-07-22 06:26:11 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif // ARMISELLOWERING_H
|