2007-01-19 15:51:42 +08:00
|
|
|
//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that ARM uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
|
|
|
|
#define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-03-23 07:51:00 +08:00
|
|
|
#include "MCTargetDesc/ARMBaseInfo.h"
|
2017-01-11 09:45:03 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2017-01-11 09:45:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineValueType.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2017-01-11 09:45:03 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
|
|
|
#include "llvm/IR/CallingConv.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2017-01-11 09:45:03 +08:00
|
|
|
#include <utility>
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-01-11 09:45:03 +08:00
|
|
|
|
|
|
|
class ARMSubtarget;
|
|
|
|
class InstrItineraryData;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
namespace ARMISD {
|
2017-01-11 09:45:03 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// ARM Specific DAG Nodes
|
2015-05-08 05:33:59 +08:00
|
|
|
enum NodeType : unsigned {
|
2009-05-14 06:32:43 +08:00
|
|
|
// Start the numbering where the builtin ops and target ops leave off.
|
2008-09-24 02:42:32 +08:00
|
|
|
FIRST_NUMBER = ISD::BUILTIN_OP_END,
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
|
|
|
|
// TargetExternalSymbol, and TargetGlobalAddress.
|
2011-01-17 16:03:18 +08:00
|
|
|
WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
|
|
|
|
// PIC mode.
|
2007-01-19 15:51:42 +08:00
|
|
|
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2012-06-01 10:44:42 +08:00
|
|
|
// Add pseudo op to model memcpy for struct byval.
|
|
|
|
COPY_STRUCT_BYVAL,
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
CALL, // Function call.
|
2007-06-20 05:05:09 +08:00
|
|
|
CALL_PRED, // Function call that's predicable.
|
2007-01-19 15:51:42 +08:00
|
|
|
CALL_NOLINK, // Function call with branch not branch-and-link.
|
|
|
|
BRCOND, // Conditional branch.
|
|
|
|
BR_JT, // Jumptable branch.
|
2009-07-29 10:18:14 +08:00
|
|
|
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
|
2007-01-19 15:51:42 +08:00
|
|
|
RET_FLAG, // Return with a flag operand.
|
2013-10-01 22:33:28 +08:00
|
|
|
INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
PIC_ADD, // Add with a PC operand and a PIC label.
|
|
|
|
|
|
|
|
CMP, // ARM compare instructions.
|
2012-06-11 16:07:26 +08:00
|
|
|
CMN, // ARM CMN instructions.
|
2009-06-29 23:33:01 +08:00
|
|
|
CMPZ, // ARM compare that sets only Z flag.
|
2007-01-19 15:51:42 +08:00
|
|
|
CMPFP, // ARM VFP compare instruction, sets FPSCR.
|
|
|
|
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
|
|
|
|
FMSTAT, // ARM fmstat instruction.
|
Optimize a couple of common patterns involving conditional moves where the false
value is zero. Instead of a cmov + op, issue an conditional op instead. e.g.
cmp r9, r4
mov r4, #0
moveq r4, #1
orr lr, lr, r4
should be:
cmp r9, r4
orreq lr, lr, #1
That is, optimize (or x, (cmov 0, y, cond)) to (or.cond x, y). Similarly extend
this to xor as well as (and x, (cmov -1, y, cond)) => (and.cond x, y).
It's possible to extend this to ADD and SUB but I don't think they are common.
rdar://8659097
llvm-svn: 151224
2012-02-23 09:19:06 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
CMOV, // ARM conditional move instructions.
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2016-06-24 00:53:49 +08:00
|
|
|
SSAT, // Signed saturation
|
|
|
|
|
2010-07-14 03:27:42 +08:00
|
|
|
BCC_i64,
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
|
|
|
|
SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
|
|
|
|
RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2011-08-30 09:34:54 +08:00
|
|
|
ADDC, // Add with carry
|
|
|
|
ADDE, // Add using carry
|
|
|
|
SUBC, // Sub with carry
|
|
|
|
SUBE, // Sub using carry
|
|
|
|
|
2009-11-09 08:11:35 +08:00
|
|
|
VMOVRRD, // double to two gprs.
|
|
|
|
VMOVDRR, // Two gprs to double.
|
2007-04-27 21:54:47 +08:00
|
|
|
|
2010-10-20 07:27:08 +08:00
|
|
|
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
|
|
|
|
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
|
2015-07-17 06:34:16 +08:00
|
|
|
EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
|
2009-05-13 07:59:14 +08:00
|
|
|
|
2010-06-04 05:09:53 +08:00
|
|
|
TC_RETURN, // Tail call return pseudo.
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
THREAD_POINTER,
|
|
|
|
|
2009-08-07 08:34:42 +08:00
|
|
|
DYN_ALLOC, // Dynamic allocation on the stack.
|
|
|
|
|
2010-10-30 08:54:37 +08:00
|
|
|
MEMBARRIER_MCR, // Memory barrier (MCR)
|
2010-11-03 14:34:55 +08:00
|
|
|
|
|
|
|
PRELOAD, // Preload
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2014-06-10 04:18:42 +08:00
|
|
|
WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
|
2015-09-25 13:15:46 +08:00
|
|
|
WIN__DBZCHK, // Windows' divide by zero check
|
2014-06-10 04:18:42 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
VCEQ, // Vector compare equal.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCEQZ, // Vector compare equal to zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGE, // Vector compare greater than or equal.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCGEZ, // Vector compare greater than or equal to zero.
|
|
|
|
VCLEZ, // Vector compare less than or equal to zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGEU, // Vector compare unsigned greater than or equal.
|
|
|
|
VCGT, // Vector compare greater than.
|
2010-11-09 07:21:22 +08:00
|
|
|
VCGTZ, // Vector compare greater than zero.
|
|
|
|
VCLTZ, // Vector compare less than zero.
|
2009-06-23 07:27:02 +08:00
|
|
|
VCGTU, // Vector compare unsigned greater than.
|
|
|
|
VTST, // Vector test bits.
|
|
|
|
|
|
|
|
// Vector shift by immediate:
|
|
|
|
VSHL, // ...left
|
|
|
|
VSHRs, // ...right (signed)
|
|
|
|
VSHRu, // ...right (unsigned)
|
|
|
|
|
|
|
|
// Vector rounding shift by immediate:
|
|
|
|
VRSHRs, // ...right (signed)
|
|
|
|
VRSHRu, // ...right (unsigned)
|
|
|
|
VRSHRN, // ...right narrow
|
|
|
|
|
|
|
|
// Vector saturating shift by immediate:
|
|
|
|
VQSHLs, // ...left (signed)
|
|
|
|
VQSHLu, // ...left (unsigned)
|
|
|
|
VQSHLsu, // ...left (signed to unsigned)
|
|
|
|
VQSHRNs, // ...right narrow (signed)
|
|
|
|
VQSHRNu, // ...right narrow (unsigned)
|
|
|
|
VQSHRNsu, // ...right narrow (signed to unsigned)
|
|
|
|
|
|
|
|
// Vector saturating rounding shift by immediate:
|
|
|
|
VQRSHRNs, // ...right narrow (signed)
|
|
|
|
VQRSHRNu, // ...right narrow (unsigned)
|
|
|
|
VQRSHRNsu, // ...right narrow (signed to unsigned)
|
|
|
|
|
|
|
|
// Vector shift and insert:
|
|
|
|
VSLI, // ...left
|
|
|
|
VSRI, // ...right
|
|
|
|
|
|
|
|
// Vector get lane (VMOV scalar to ARM core register)
|
|
|
|
// (These are used for 8- and 16-bit element types only.)
|
|
|
|
VGETLANEu, // zero-extend vector extract element
|
|
|
|
VGETLANEs, // sign-extend vector extract element
|
|
|
|
|
2010-07-14 14:31:50 +08:00
|
|
|
// Vector move immediate and move negated immediate:
|
2010-07-14 05:16:48 +08:00
|
|
|
VMOVIMM,
|
2010-07-14 14:31:50 +08:00
|
|
|
VMVNIMM,
|
|
|
|
|
2011-11-15 10:12:34 +08:00
|
|
|
// Vector move f32 immediate:
|
|
|
|
VMOVFPIMM,
|
|
|
|
|
2010-07-14 14:31:50 +08:00
|
|
|
// Vector duplicate:
|
2009-08-14 13:13:08 +08:00
|
|
|
VDUP,
|
2009-08-14 13:08:32 +08:00
|
|
|
VDUPLANE,
|
2009-08-04 08:36:16 +08:00
|
|
|
|
2009-08-13 06:31:50 +08:00
|
|
|
// Vector shuffles:
|
2009-08-20 01:03:43 +08:00
|
|
|
VEXT, // extract
|
2009-08-13 06:31:50 +08:00
|
|
|
VREV64, // reverse elements within 64-bit doublewords
|
|
|
|
VREV32, // reverse elements within 32-bit words
|
2009-08-21 20:41:24 +08:00
|
|
|
VREV16, // reverse elements within 16-bit halfwords
|
2009-08-22 04:54:19 +08:00
|
|
|
VZIP, // zip (interleave)
|
|
|
|
VUZP, // unzip (deinterleave)
|
2010-02-18 14:05:53 +08:00
|
|
|
VTRN, // transpose
|
Generate a VTBL instruction instead of a series of loads and stores when we
can. As Nate pointed out, VTBL isn't super performant, but it *has* to be better
than this:
_shuf:
@ BB#0: @ %entry
push {r4, r7, lr}
add r7, sp, #4
sub sp, #12
mov r4, sp
bic r4, r4, #7
mov sp, r4
mov r2, sp
vmov d16, r0, r1
orr r0, r2, #6
orr r3, r2, #7
vst1.8 {d16[0]}, [r3]
vst1.8 {d16[5]}, [r0]
subs r4, r7, #4
orr r0, r2, #5
vst1.8 {d16[4]}, [r0]
orr r0, r2, #4
vst1.8 {d16[4]}, [r0]
orr r0, r2, #3
vst1.8 {d16[0]}, [r0]
orr r0, r2, #2
vst1.8 {d16[2]}, [r0]
orr r0, r2, #1
vst1.8 {d16[1]}, [r0]
vst1.8 {d16[3]}, [r2]
vldr.64 d16, [sp]
vmov r0, r1, d16
mov sp, r4
pop {r4, r7, pc}
The "illegal" testcase in vext.ll is no longer illegal.
<rdar://problem/9078775>
llvm-svn: 127630
2011-03-15 07:02:38 +08:00
|
|
|
VTBL1, // 1-register shuffle with mask
|
|
|
|
VTBL2, // 2-register shuffle with mask
|
2010-02-18 14:05:53 +08:00
|
|
|
|
2010-09-02 07:50:19 +08:00
|
|
|
// Vector multiply long:
|
|
|
|
VMULLs, // ...signed
|
|
|
|
VMULLu, // ...unsigned
|
|
|
|
|
2017-03-14 17:13:22 +08:00
|
|
|
SMULWB, // Signed multiply word by half word, bottom
|
|
|
|
SMULWT, // Signed multiply word by half word, top
|
2012-09-04 22:37:49 +08:00
|
|
|
UMLAL, // 64bit Unsigned Accumulate Multiply
|
|
|
|
SMLAL, // 64bit Signed Accumulate Multiply
|
2016-06-21 00:47:09 +08:00
|
|
|
UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
|
2017-03-15 16:27:11 +08:00
|
|
|
SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
|
|
|
|
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
|
|
|
|
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
|
|
|
|
SMLALTT, // 64-bit signed accumulate multiply top, top 16
|
2017-05-04 15:31:28 +08:00
|
|
|
SMLALD, // Signed multiply accumulate long dual
|
|
|
|
SMLALDX, // Signed multiply accumulate long dual exchange
|
|
|
|
SMLSLD, // Signed multiply subtract long dual
|
|
|
|
SMLSLDX, // Signed multiply subtract long dual exchange
|
2012-09-04 22:37:49 +08:00
|
|
|
|
2010-06-04 08:04:02 +08:00
|
|
|
// Operands of the standard BUILD_VECTOR node are not legalized, which
|
|
|
|
// is fine if BUILD_VECTORs are always lowered to shuffles or other
|
|
|
|
// operations, but for ARM some BUILD_VECTORs are legal as-is and their
|
|
|
|
// operands need to be legalized. Define an ARM-specific version of
|
|
|
|
// BUILD_VECTOR for this purpose.
|
|
|
|
BUILD_VECTOR,
|
|
|
|
|
2010-07-17 07:05:05 +08:00
|
|
|
// Bit-field insert
|
2010-11-04 06:44:51 +08:00
|
|
|
BFI,
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2010-11-04 06:44:51 +08:00
|
|
|
// Vector OR with immediate
|
2010-11-06 03:27:46 +08:00
|
|
|
VORRIMM,
|
|
|
|
// Vector AND with NOT of immediate
|
2010-11-28 14:51:26 +08:00
|
|
|
VBICIMM,
|
|
|
|
|
2011-03-31 07:01:21 +08:00
|
|
|
// Vector bitwise select
|
|
|
|
VBSL,
|
|
|
|
|
2015-10-05 22:49:54 +08:00
|
|
|
// Pseudo-instruction representing a memory copy using ldm/stm
|
|
|
|
// instructions.
|
|
|
|
MEMCPY,
|
|
|
|
|
2010-11-28 14:51:26 +08:00
|
|
|
// Vector load N-element structure to all lanes:
|
2016-12-17 02:44:08 +08:00
|
|
|
VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
|
|
|
VLD2DUP,
|
2010-11-28 14:51:26 +08:00
|
|
|
VLD3DUP,
|
2011-02-08 01:43:21 +08:00
|
|
|
VLD4DUP,
|
|
|
|
|
|
|
|
// NEON loads with post-increment base updates:
|
|
|
|
VLD1_UPD,
|
|
|
|
VLD2_UPD,
|
|
|
|
VLD3_UPD,
|
|
|
|
VLD4_UPD,
|
|
|
|
VLD2LN_UPD,
|
|
|
|
VLD3LN_UPD,
|
|
|
|
VLD4LN_UPD,
|
2016-12-17 02:44:08 +08:00
|
|
|
VLD1DUP_UPD,
|
2011-02-08 01:43:21 +08:00
|
|
|
VLD2DUP_UPD,
|
|
|
|
VLD3DUP_UPD,
|
|
|
|
VLD4DUP_UPD,
|
|
|
|
|
|
|
|
// NEON stores with post-increment base updates:
|
|
|
|
VST1_UPD,
|
|
|
|
VST2_UPD,
|
|
|
|
VST3_UPD,
|
|
|
|
VST4_UPD,
|
|
|
|
VST2LN_UPD,
|
|
|
|
VST3LN_UPD,
|
2013-09-26 20:22:36 +08:00
|
|
|
VST4LN_UPD
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2017-01-11 09:45:03 +08:00
|
|
|
|
|
|
|
} // end namespace ARMISD
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
/// Define some predicates that are used for node matching.
|
|
|
|
namespace ARM {
|
2017-01-11 09:45:03 +08:00
|
|
|
|
2010-07-17 07:05:05 +08:00
|
|
|
bool isBitFieldInvertedMask(unsigned v);
|
2017-01-11 09:45:03 +08:00
|
|
|
|
|
|
|
} // end namespace ARM
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2009-05-21 00:30:25 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
2007-03-20 08:30:56 +08:00
|
|
|
// ARMTargetLowering - ARM Implementation of the TargetLowering interface
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
class ARMTargetLowering : public TargetLowering {
|
|
|
|
public:
|
2015-01-29 08:19:39 +08:00
|
|
|
explicit ARMTargetLowering(const TargetMachine &TM,
|
|
|
|
const ARMSubtarget &STI);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
unsigned getJumpTableEncoding() const override;
|
2015-05-12 09:26:05 +08:00
|
|
|
bool useSoftFloat() const override;
|
2010-07-20 01:20:38 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
|
2008-12-01 19:39:25 +08:00
|
|
|
|
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
///
|
2014-03-10 10:09:33 +08:00
|
|
|
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
|
|
|
|
SelectionDAG &DAG) const override;
|
2008-12-01 19:39:25 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
const char *getTargetNodeName(unsigned Opcode) const override;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isSelectSupported(SelectSupportKind Kind) const override {
|
2012-09-02 20:10:19 +08:00
|
|
|
// ARM does not support scalar condition selects on vectors.
|
|
|
|
return (Kind != ScalarCondVectorVal);
|
|
|
|
}
|
|
|
|
|
2011-09-07 03:07:46 +08:00
|
|
|
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
|
2015-07-09 10:09:04 +08:00
|
|
|
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
|
|
|
|
EVT VT) const override;
|
2011-09-07 03:07:46 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
MachineBasicBlock *
|
2016-07-01 06:52:52 +08:00
|
|
|
EmitInstrWithCustomInserter(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *MBB) const override;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
void AdjustInstrPostInstrSelection(MachineInstr &MI,
|
2014-03-10 10:09:33 +08:00
|
|
|
SDNode *Node) const override;
|
2011-08-31 03:09:48 +08:00
|
|
|
|
2011-07-13 08:42:17 +08:00
|
|
|
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
|
ARM: Better codegen for 64-bit compares.
This introduces a custom lowering for ISD::SETCCE (introduced in r253572)
that allows us to emit a short code sequence for 64-bit compares.
Before:
push {r7, lr}
cmp r0, r2
mov.w r0, #0
mov.w r12, #0
it hs
movhs r0, #1
cmp r1, r3
it ge
movge.w r12, #1
it eq
moveq r12, r0
cmp.w r12, #0
bne .LBB1_2
@ BB#1: @ %bb1
bl f
pop {r7, pc}
.LBB1_2: @ %bb2
bl g
pop {r7, pc}
After:
push {r7, lr}
subs r0, r0, r2
sbcs.w r0, r1, r3
bge .LBB1_2
@ BB#1: @ %bb1
bl f
pop {r7, pc}
.LBB1_2: @ %bb2
bl g
pop {r7, pc}
Saves around 80KB in Chromium's libchrome.so.
Some notes on this patch:
- I don't much like the ARMISD::BRCOND and ARMISD::CMOV combines I
introduced (nothing else needs them). However, they are necessary in
order to avoid poor codegen, and they seem similar to existing combines
in other backends (e.g. X86 combines (brcond (cmp (setcc Compare))) to
(brcond Compare)).
- No support for Thumb-1. This is in principle possible, but we'd need
to implement ARMISD::SUBE for Thumb-1.
Differential Revision: http://reviews.llvm.org/D15256
llvm-svn: 263962
2016-03-22 02:00:02 +08:00
|
|
|
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
|
2015-11-10 22:22:05 +08:00
|
|
|
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
|
2014-03-10 10:09:33 +08:00
|
|
|
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
|
2011-02-02 09:06:55 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
|
2011-02-02 09:06:55 +08:00
|
|
|
|
2014-07-28 01:46:40 +08:00
|
|
|
/// allowsMisalignedMemoryAccesses - Returns true if the target allows
|
2012-12-11 07:21:26 +08:00
|
|
|
/// unaligned memory accesses of the specified type. Returns whether it
|
|
|
|
/// is "fast" by reference in the second argument.
|
2014-07-28 01:46:40 +08:00
|
|
|
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
|
|
|
|
unsigned Align,
|
|
|
|
bool *Fast) const override;
|
2009-08-16 05:21:19 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
EVT getOptimalMemOpType(uint64_t Size,
|
|
|
|
unsigned DstAlign, unsigned SrcAlign,
|
|
|
|
bool IsMemset, bool ZeroMemset,
|
|
|
|
bool MemcpyStrSrc,
|
|
|
|
MachineFunction &MF) const override;
|
2011-11-03 06:52:45 +08:00
|
|
|
|
2012-12-07 07:15:36 +08:00
|
|
|
using TargetLowering::isZExtFree;
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isZExtFree(SDValue Val, EVT VT2) const override;
|
2012-12-07 03:13:27 +08:00
|
|
|
|
2015-03-06 03:37:53 +08:00
|
|
|
bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
|
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
|
2013-08-06 21:58:03 +08:00
|
|
|
|
|
|
|
|
2007-03-31 07:15:24 +08:00
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2015-07-09 10:09:40 +08:00
|
|
|
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
|
2017-07-21 19:59:37 +08:00
|
|
|
Type *Ty, unsigned AS,
|
|
|
|
Instruction *I = nullptr) const override;
|
2016-10-13 22:57:43 +08:00
|
|
|
|
|
|
|
/// getScalingFactorCost - Return the cost of the scaling used in
|
|
|
|
/// addressing mode represented by AM.
|
|
|
|
/// If the AM is supported, the return value must be >= 0.
|
|
|
|
/// If the AM is not supported, the return value must be negative.
|
|
|
|
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
|
|
|
|
unsigned AS) const override;
|
|
|
|
|
2009-08-15 04:09:37 +08:00
|
|
|
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2017-08-24 18:00:25 +08:00
|
|
|
/// \brief Returns true if the addresing mode representing by AM is legal
|
|
|
|
/// for the Thumb1 target, for a load/store of the specified type.
|
|
|
|
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
|
|
|
|
|
2009-11-12 03:05:52 +08:00
|
|
|
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
2010-06-03 05:53:11 +08:00
|
|
|
/// icmp immediate, that is the target has icmp instructions which can
|
|
|
|
/// compare a register against the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isLegalICmpImmediate(int64_t Imm) const override;
|
2009-11-12 03:05:52 +08:00
|
|
|
|
2011-05-03 08:46:49 +08:00
|
|
|
/// isLegalAddImmediate - Return true if the specified immediate is legal
|
|
|
|
/// add immediate, that is the target has add instructions which can
|
|
|
|
/// add a register and the immediate without having to materialize
|
|
|
|
/// the immediate into a register.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isLegalAddImmediate(int64_t Imm) const override;
|
2011-05-03 08:46:49 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
|
|
|
|
ISD::MemIndexedMode &AM,
|
|
|
|
SelectionDAG &DAG) const override;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
/// getPostIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if this node can be
|
|
|
|
/// combined with a load / store to form a post-indexed load / store.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
|
|
|
|
SDValue &Offset, ISD::MemIndexedMode &AM,
|
|
|
|
SelectionDAG &DAG) const override;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2017-04-28 13:31:46 +08:00
|
|
|
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
|
2017-03-31 19:24:16 +08:00
|
|
|
const APInt &DemandedElts,
|
2014-05-15 05:14:37 +08:00
|
|
|
const SelectionDAG &DAG,
|
|
|
|
unsigned Depth) const override;
|
2009-08-16 05:21:19 +08:00
|
|
|
|
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool ExpandInlineAsm(CallInst *CI) const override;
|
2011-01-08 09:24:27 +08:00
|
|
|
|
2015-07-06 03:29:18 +08:00
|
|
|
ConstraintType getConstraintType(StringRef Constraint) const override;
|
2010-10-30 01:29:13 +08:00
|
|
|
|
|
|
|
/// Examine constraint string and operand type and determine a weight value.
|
|
|
|
/// The operand object must already have been set up with the operand type.
|
|
|
|
ConstraintWeight getSingleConstraintMatchWeight(
|
2014-03-10 10:09:33 +08:00
|
|
|
AsmOperandInfo &info, const char *constraint) const override;
|
2010-10-30 01:29:13 +08:00
|
|
|
|
2015-02-27 06:38:43 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass *>
|
|
|
|
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
2015-07-06 03:29:18 +08:00
|
|
|
StringRef Constraint, MVT VT) const override;
|
2007-11-06 07:12:20 +08:00
|
|
|
|
2016-04-25 22:29:18 +08:00
|
|
|
const char *LowerXConstraint(EVT ConstraintVT) const override;
|
|
|
|
|
2009-04-02 01:58:54 +08:00
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
|
|
|
|
/// true it means one of the asm constraint of the inline asm instruction
|
|
|
|
/// being processed is 'm'.
|
2014-03-10 10:09:33 +08:00
|
|
|
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
|
|
|
|
std::vector<SDValue> &Ops,
|
|
|
|
SelectionDAG &DAG) const override;
|
2009-05-14 06:32:43 +08:00
|
|
|
|
2015-07-06 03:29:18 +08:00
|
|
|
unsigned
|
|
|
|
getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
|
2015-06-03 20:33:56 +08:00
|
|
|
if (ConstraintCode == "Q")
|
|
|
|
return InlineAsm::Constraint_Q;
|
2015-10-26 18:04:52 +08:00
|
|
|
else if (ConstraintCode == "o")
|
|
|
|
return InlineAsm::Constraint_o;
|
2015-06-03 20:33:56 +08:00
|
|
|
else if (ConstraintCode.size() == 2) {
|
|
|
|
if (ConstraintCode[0] == 'U') {
|
|
|
|
switch(ConstraintCode[1]) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case 'm':
|
|
|
|
return InlineAsm::Constraint_Um;
|
|
|
|
case 'n':
|
|
|
|
return InlineAsm::Constraint_Un;
|
|
|
|
case 'q':
|
|
|
|
return InlineAsm::Constraint_Uq;
|
|
|
|
case 's':
|
|
|
|
return InlineAsm::Constraint_Us;
|
|
|
|
case 't':
|
|
|
|
return InlineAsm::Constraint_Ut;
|
|
|
|
case 'v':
|
|
|
|
return InlineAsm::Constraint_Uv;
|
|
|
|
case 'y':
|
|
|
|
return InlineAsm::Constraint_Uy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
|
2015-03-16 21:13:41 +08:00
|
|
|
}
|
|
|
|
|
2010-05-12 00:21:03 +08:00
|
|
|
const ARMSubtarget* getSubtarget() const {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
return Subtarget;
|
2007-11-06 07:12:20 +08:00
|
|
|
}
|
|
|
|
|
2010-05-15 10:18:07 +08:00
|
|
|
/// getRegClassFor - Return the register class that should be used for the
|
|
|
|
/// specified value type.
|
2014-03-10 10:09:33 +08:00
|
|
|
const TargetRegisterClass *getRegClassFor(MVT VT) const override;
|
2010-05-15 10:18:07 +08:00
|
|
|
|
2013-12-03 19:23:11 +08:00
|
|
|
/// Returns true if a cast between SrcAS and DestAS is a noop.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
|
2013-12-03 19:23:11 +08:00
|
|
|
// Addrspacecasts are always noops.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-03-18 20:01:59 +08:00
|
|
|
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
|
|
|
|
unsigned &PrefAlign) const override;
|
|
|
|
|
2010-07-22 06:26:11 +08:00
|
|
|
/// createFastISel - This method returns a target specific FastISel object,
|
|
|
|
/// or null if the target does not support "fast" ISel.
|
2014-03-10 10:09:33 +08:00
|
|
|
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo) const override;
|
2010-07-22 06:26:11 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
Sched::Preference getSchedulingPreference(SDNode *N) const override;
|
2010-05-21 07:26:43 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool
|
2017-07-26 16:06:58 +08:00
|
|
|
isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
|
2009-10-28 09:44:26 +08:00
|
|
|
|
|
|
|
/// isFPImmLegal - Returns true if the target can instruction select the
|
|
|
|
/// specified FP immediate natively. If false, the legalizer will
|
|
|
|
/// materialize the FP immediate as a load from a constant pool.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
|
2009-10-28 09:44:26 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|
|
|
const CallInst &I,
|
|
|
|
unsigned Intrinsic) const override;
|
2014-01-28 09:20:14 +08:00
|
|
|
|
|
|
|
/// \brief Returns true if it is beneficial to convert a load of a constant
|
|
|
|
/// to just the constant itself.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
|
|
|
Type *Ty) const override;
|
2014-01-28 09:20:14 +08:00
|
|
|
|
2016-12-21 04:05:07 +08:00
|
|
|
/// Return true if EXTRACT_SUBVECTOR is cheap for this result type
|
|
|
|
/// with this index.
|
2017-08-14 01:29:07 +08:00
|
|
|
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
|
|
|
|
unsigned Index) const override;
|
2016-12-21 04:05:07 +08:00
|
|
|
|
2014-05-09 22:01:47 +08:00
|
|
|
/// \brief Returns true if an argument of type Ty needs to be passed in a
|
|
|
|
/// contiguous block of registers in calling convention CallConv.
|
|
|
|
bool functionArgumentNeedsConsecutiveRegisters(
|
|
|
|
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
|
|
|
|
|
2015-11-07 09:11:31 +08:00
|
|
|
/// If a physical register, this returns the register that receives the
|
|
|
|
/// exception address on entry to an EH pad.
|
|
|
|
unsigned
|
|
|
|
getExceptionPointerRegister(const Constant *PersonalityFn) const override;
|
|
|
|
|
|
|
|
/// If a physical register, this returns the register that receives the
|
|
|
|
/// exception typeid on entry to a landing pad.
|
|
|
|
unsigned
|
|
|
|
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
|
|
|
|
|
Restore "[ARM, Fix] Fix emitLeading/TrailingFence on old ARM processors"
Summary:
This patch was originally in D5304 (I could not find a way to reopen that revision).
It was accepted, commited and broke the build bots because the overloading of
the constructor of ArrayRef for braced initializer lists is not supported by all
toolchains. I then reverted it, and propose this fixed version that uses a plain
C array instead in makeDMB (that array is then converted implicitly to an
ArrayRef, but that is not behind an ifdef). Could someone confirm me whether
initialization lists for plain C arrays are supported by every toolchain used
to build llvm ? Otherwise I can just initialize the array in the old way:
args[0] = ...; .. ; args[5] = ...;
Below is the description of the original patch:
```
I had only tested this code for ARMv7 and ARMv8. This patch adds several
fallback paths if the processor does not support dmb ish:
- dmb sy if a cortex-M with support for dmb
- mcr p15, #0, r0, c7, c10, #5 for ARMv6 (special instruction equivalent to a DMB)
These fallback paths were chosen based on the code for fence seq_cst.
Thanks to luqmana for having noticed this bug.
```
Test Plan: Added more cases to atomic-load-store.ll + make check-all
Reviewers: jfb, t.p.northover, luqmana
Subscribers: llvm-commits, aemerson
Differential Revision: http://reviews.llvm.org/D5386
llvm-svn: 218066
2014-09-19 02:56:04 +08:00
|
|
|
Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
|
2014-04-18 02:22:47 +08:00
|
|
|
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
|
|
|
|
AtomicOrdering Ord) const override;
|
|
|
|
Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
|
|
|
|
Value *Addr, AtomicOrdering Ord) const override;
|
|
|
|
|
2015-09-23 01:22:58 +08:00
|
|
|
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
|
|
|
|
AtomicOrdering Ord) const override;
|
|
|
|
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
|
|
|
|
AtomicOrdering Ord) const override;
|
2014-09-04 05:01:03 +08:00
|
|
|
|
[ARM] Lower interleaved memory accesses to vldN/vstN intrinsics.
This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240755
2015-06-26 10:45:36 +08:00
|
|
|
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
|
|
|
|
|
|
|
|
bool lowerInterleavedLoad(LoadInst *LI,
|
|
|
|
ArrayRef<ShuffleVectorInst *> Shuffles,
|
|
|
|
ArrayRef<unsigned> Indices,
|
|
|
|
unsigned Factor) const override;
|
|
|
|
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
|
|
|
|
unsigned Factor) const override;
|
|
|
|
|
2016-03-17 06:12:04 +08:00
|
|
|
bool shouldInsertFencesForAtomic(const Instruction *I) const override;
|
2015-09-12 01:08:28 +08:00
|
|
|
TargetLoweringBase::AtomicExpansionKind
|
|
|
|
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
2014-09-04 05:29:59 +08:00
|
|
|
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
2015-09-12 01:08:17 +08:00
|
|
|
TargetLoweringBase::AtomicExpansionKind
|
Mutate TargetLowering::shouldExpandAtomicRMWInIR to specifically dictate how AtomicRMWInsts are expanded.
Summary:
In PNaCl, most atomic instructions have their own @llvm.nacl.atomic.* function, each one, with a few exceptions, represents a consistent behaviour across all NaCl-supported targets. Unfortunately, the atomic RMW operations nand, [u]min, and [u]max aren't directly represented by any such @llvm.nacl.atomic.* function. This patch refines shouldExpandAtomicRMWInIR in TargetLowering so that a future `Le32TargetLowering` class can selectively inform the caller how the target desires the atomic RMW instruction to be expanded (ie via load-linked/store-conditional for ARM/AArch64, via cmpxchg for X86/others?, or not at all for Mips) if at all.
This does not represent a behavioural change and as such no tests were added.
Patch by: Richard Diamond.
Reviewers: jfb
Reviewed By: jfb
Subscribers: jfb, aemerson, t.p.northover, llvm-commits
Differential Revision: http://reviews.llvm.org/D7713
llvm-svn: 231250
2015-03-04 23:47:57 +08:00
|
|
|
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
2015-09-12 01:08:28 +08:00
|
|
|
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
|
2014-04-18 02:22:47 +08:00
|
|
|
|
2014-07-26 03:31:34 +08:00
|
|
|
bool useLoadStackGuardNode() const override;
|
|
|
|
|
[CodeGenPrepare] Move extractelement close to store if they can be combined.
This patch adds an optimization in CodeGenPrepare to move an extractelement
right before a store when the target can combine them.
The optimization may promote any scalar operations to vector operations in the
way to make that possible.
** Context **
Some targets use different register files for both vector and scalar operations.
This means that transitioning from one domain to another may incur copy from one
register file to another. These copies are not coalescable and may be expensive.
For example, according to the scheduling model, on cortex-A8 a vector to GPR
move is 20 cycles.
** Motivating Example **
Let us consider an example:
define void @foo(<2 x i32>* %addr1, i32* %dest) {
%in1 = load <2 x i32>* %addr1, align 8
%extract = extractelement <2 x i32> %in1, i32 1
%out = or i32 %extract, 1
store i32 %out, i32* %dest, align 4
ret void
}
As it is, this IR generates the following assembly on armv7:
vldr d16, [r0] @vector load
vmov.32 r0, d16[1] @ cross-register-file copy: 20 cycles
orr r0, r0, #1 @ scalar bitwise or
str r0, [r1] @ scalar store
bx lr
Whereas we could generate much faster code:
vldr d16, [r0] @ vector load
vorr.i32 d16, #0x1 @ vector bitwise or
vst1.32 {d16[1]}, [r1:32] @ vector extract + store
bx lr
Half of the computation made in the vector is useless, but this allows to get
rid of the expensive cross-register-file copy.
** Proposed Solution **
To avoid this cross-register-copy penalty, we promote the scalar operations to
vector operations. The penalty will be removed if we manage to promote the whole
chain of computation in the vector domain.
Currently, we do that only when the chain of computation ends by a store and the
target is able to combine an extract with a store.
Stores are the most likely candidates, because other instructions produce values
that would need to be promoted and so, extracted as some point[1]. Moreover,
this is customary that targets feature stores that perform a vector extract (see
AArch64 and X86 for instance).
The proposed implementation relies on the TargetTransformInfo to decide whether
or not it is beneficial to promote a chain of computation in the vector domain.
Unfortunately, this interface is rather inaccurate for this level of details and
although this optimization may be beneficial for X86 and AArch64, the inaccuracy
will lead to the optimization being too aggressive.
Basically in TargetTransformInfo, everything that is legal has a cost of 1,
whereas, even if a vector type is legal, usually a vector operation is slightly
more expensive than its scalar counterpart. That will lead to too many
promotions that may not be counter balanced by the saving of the
cross-register-file copy. For instance, on AArch64 this penalty is just 4
cycles.
For now, the optimization is just enabled for ARM prior than v8, since those
processors have a larger penalty on cross-register-file copies, and the scope is
limited to basic blocks. Because of these two factors, we limit the effects of
the inaccuracy. Indeed, I did not want to build up a fancy cost model with block
frequency and everything on top of that.
[1] We can imagine targets that can combine an extractelement with other
instructions than just stores. If we want to go into that direction, the current
interfaces must be augmented and, moreover, I think this becomes a global isel
problem.
Differential Revision: http://reviews.llvm.org/D5921
<rdar://problem/14170854>
llvm-svn: 220978
2014-11-01 01:52:53 +08:00
|
|
|
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
|
|
|
|
unsigned &Cost) const override;
|
|
|
|
|
2017-07-11 04:25:54 +08:00
|
|
|
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
|
|
|
|
const SelectionDAG &DAG) const override {
|
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled.
Recommiting with compiler time improvements
Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner.
* Simplify Consecutive Merge Store Candidate Search
Now that address aliasing is much less conservative, push through
simplified store merging search and chain alias analysis which only
checks for parallel stores through the chain subgraph. This is cleaner
as the separation of non-interfering loads/stores from the
store-merging logic.
When merging stores search up the chain through a single load, and
finds all possible stores by looking down from through a load and a
TokenFactor to all stores visited.
This improves the quality of the output SelectionDAG and the output
Codegen (save perhaps for some ARM cases where we correctly constructs
wider loads, but then promotes them to float operations which appear
but requires more expensive constant generation).
Some minor peephole optimizations to deal with improved SubDAG shapes (listed below)
Additional Minor Changes:
1. Finishes removing unused AliasLoad code
2. Unifies the chain aggregation in the merged stores across code
paths
3. Re-add the Store node to the worklist after calling
SimplifyDemandedBits.
4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is
arbitrary, but seems sufficient to not cause regressions in
tests.
5. Remove Chain dependencies of Memory operations on CopyfromReg
nodes as these are captured by data dependence
6. Forward loads-store values through tokenfactors containing
{CopyToReg,CopyFromReg} Values.
7. Peephole to convert buildvector of extract_vector_elt to
extract_subvector if possible (see
CodeGen/AArch64/store-merge.ll)
8. Store merging for the ARM target is restricted to 32-bit as
some in some contexts invalid 64-bit operations are being
generated. This can be removed once appropriate checks are
added.
This finishes the change Matt Arsenault started in r246307 and
jyknight's original patch.
Many tests required some changes as memory operations are now
reorderable, improving load-store forwarding. One test in
particular is worth noting:
CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store
forwarding converts a load-store pair into a parallel store and
a memory-realized bitcast of the same value. However, because we
lose the sharing of the explicit and implicit store values we
must create another local store. A similar transformation
happens before SelectionDAG as well.
Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle
llvm-svn: 297695
2017-03-14 08:34:14 +08:00
|
|
|
// Do not merge to larger than i32.
|
|
|
|
return (MemVT.getSizeInBits() <= 32);
|
|
|
|
}
|
|
|
|
|
[ARM] add overrides for isCheapToSpeculateCttz() and isCheapToSpeculateCtlz()
ARM V6T2 has instructions for efficient count-leading/trailing-zeros, so this should be
considered a cheap operation (and therefore fair game for speculation) for any ARM V6T2
implementation.
The net result of allowing this speculation for the regression tests in this patch is
that we get this code:
ctlz:
clz r0, r0
bx lr
cttz:
rbit r0, r0
clz r0, r0
bx lr
Instead of:
ctlz:
cmp r0, #0
moveq r0, #32
clzne r0, r0
bx lr
cttz:
cmp r0, #0
moveq r0, #32
rbitne r0, r0
clzne r0, r0
bx lr
This will help solve a general speculation/despeculation problem noted in PR24818:
https://llvm.org/bugs/show_bug.cgi?id=24818
Differential Revision: http://reviews.llvm.org/D14469
llvm-svn: 252639
2015-11-11 03:24:31 +08:00
|
|
|
bool isCheapToSpeculateCttz() const override;
|
|
|
|
bool isCheapToSpeculateCtlz() const override;
|
|
|
|
|
2017-04-05 22:09:39 +08:00
|
|
|
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
|
|
|
|
return VT.isScalarInteger();
|
|
|
|
}
|
|
|
|
|
2016-04-12 05:08:06 +08:00
|
|
|
bool supportSwiftError() const override {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-18 14:48:25 +08:00
|
|
|
bool hasStandaloneRem(EVT VT) const override {
|
|
|
|
return HasStandaloneRem;
|
|
|
|
}
|
|
|
|
|
2016-12-16 18:35:20 +08:00
|
|
|
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
|
|
|
|
CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
|
|
|
|
|
2017-04-11 02:34:37 +08:00
|
|
|
/// Returns true if \p VecTy is a legal interleaved access type. This
|
|
|
|
/// function checks the vector element type and the overall width of the
|
|
|
|
/// vector.
|
|
|
|
bool isLegalInterleavedAccessType(VectorType *VecTy,
|
|
|
|
const DataLayout &DL) const;
|
|
|
|
|
|
|
|
/// Returns the number of interleaved accesses that will be generated when
|
|
|
|
/// lowering accesses of the given type.
|
|
|
|
unsigned getNumInterleavedAccesses(VectorType *VecTy,
|
|
|
|
const DataLayout &DL) const;
|
|
|
|
|
2017-05-06 06:04:05 +08:00
|
|
|
void finalizeLowering(MachineFunction &MF) const override;
|
|
|
|
|
2010-07-20 06:15:08 +08:00
|
|
|
protected:
|
2015-02-26 08:00:24 +08:00
|
|
|
std::pair<const TargetRegisterClass *, uint8_t>
|
|
|
|
findRepresentativeClass(const TargetRegisterInfo *TRI,
|
|
|
|
MVT VT) const override;
|
2010-07-20 06:15:08 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
private:
|
|
|
|
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const ARMSubtarget *Subtarget;
|
|
|
|
|
2010-07-24 06:39:59 +08:00
|
|
|
const TargetRegisterInfo *RegInfo;
|
|
|
|
|
2010-09-10 09:29:16 +08:00
|
|
|
const InstrItineraryData *Itins;
|
|
|
|
|
2009-07-14 02:11:36 +08:00
|
|
|
/// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
|
2007-01-19 15:51:42 +08:00
|
|
|
///
|
|
|
|
unsigned ARMPCLabelIndex;
|
|
|
|
|
2016-03-17 06:12:04 +08:00
|
|
|
// TODO: remove this, and have shouldInsertFencesForAtomic do the proper
|
|
|
|
// check.
|
|
|
|
bool InsertFencesForAtomic;
|
|
|
|
|
2016-07-18 14:48:25 +08:00
|
|
|
bool HasStandaloneRem = true;
|
|
|
|
|
2012-08-12 11:16:37 +08:00
|
|
|
void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
|
|
|
|
void addDRTypeForNEON(MVT VT);
|
|
|
|
void addQRTypeForNEON(MVT VT);
|
2014-05-10 01:02:49 +08:00
|
|
|
std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
|
2017-01-11 09:45:03 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
|
|
|
|
SDValue &Arg, RegsToPassVector &RegsToPass,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &StackPtr,
|
2013-07-14 12:42:23 +08:00
|
|
|
SmallVectorImpl<SDValue> &MemOpChains,
|
2010-04-17 23:26:15 +08:00
|
|
|
ISD::ArgFlagsTy Flags) const;
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue &Root, SelectionDAG &DAG,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl) const;
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2014-05-09 22:01:47 +08:00
|
|
|
CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
|
|
|
|
bool isVarArg) const;
|
2010-06-03 05:53:11 +08:00
|
|
|
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
|
|
|
|
bool isVarArg) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const CCValAssign &VA,
|
2010-04-17 23:26:15 +08:00
|
|
|
ISD::ArgFlagsTy Flags) const;
|
2010-05-27 04:22:18 +08:00
|
|
|
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
|
2010-05-22 09:06:18 +08:00
|
|
|
SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
|
2015-07-17 06:34:16 +08:00
|
|
|
SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
|
2010-02-09 07:22:00 +08:00
|
|
|
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
|
2010-04-17 23:26:15 +08:00
|
|
|
const ARMSubtarget *Subtarget) const;
|
|
|
|
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
|
2017-06-20 15:20:52 +08:00
|
|
|
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
|
2014-05-09 08:58:32 +08:00
|
|
|
SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
|
2010-04-17 23:26:15 +08:00
|
|
|
SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
|
2012-05-04 17:40:39 +08:00
|
|
|
SelectionDAG &DAG,
|
|
|
|
TLSModel::Model model) const;
|
2016-01-07 17:03:03 +08:00
|
|
|
SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
|
2016-02-04 02:21:59 +08:00
|
|
|
SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
|
2016-01-07 17:03:03 +08:00
|
|
|
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
|
2017-08-03 23:41:26 +08:00
|
|
|
SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
|
Consider this code snippet:
float t1(int argc) {
return (argc == 1123) ? 1.234f : 2.38213f;
}
We would generate truly awful code on ARM (those with a weak stomach should look
away):
_t1:
movw r1, #1123
movs r2, #1
movs r3, #0
cmp r0, r1
mov.w r0, #0
it eq
moveq r0, r2
movs r1, #4
cmp r0, #0
it ne
movne r3, r1
adr r0, #LCPI1_0
ldr r0, [r0, r3]
bx lr
The problem was that legalization was creating a cascade of SELECT_CC nodes, for
for the comparison of "argc == 1123" which was fed into a SELECT node for the ?:
statement which was itself converted to a SELECT_CC node. This is because the
ARM back-end doesn't have custom lowering for SELECT nodes, so it used the
default "Expand".
I added a fairly simple "LowerSELECT" to the ARM back-end. It takes care of this
testcase, but can obviously be expanded to include more cases.
Now we generate this, which looks optimal to me:
_t1:
movw r1, #1123
movs r2, #0
cmp r0, r1
adr r0, #LCPI0_0
it eq
moveq r2, #4
ldr r0, [r0, r2]
bx lr
.align 2
LCPI0_0:
.long 1075344593 @ float 2.382130e+00
.long 1067316150 @ float 1.234000e+00
llvm-svn: 110799
2010-08-11 16:43:16 +08:00
|
|
|
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
|
2010-07-08 10:08:50 +08:00
|
|
|
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
|
2010-05-22 09:47:14 +08:00
|
|
|
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
2010-04-17 23:26:15 +08:00
|
|
|
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
|
2010-08-04 05:31:55 +08:00
|
|
|
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
|
2012-03-16 02:49:02 +08:00
|
|
|
SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) const;
|
2011-04-23 11:24:11 +08:00
|
|
|
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
2011-01-08 05:37:30 +08:00
|
|
|
const ARMSubtarget *ST) const;
|
2013-11-03 14:14:38 +08:00
|
|
|
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
|
2013-07-16 17:32:17 +08:00
|
|
|
SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
|
2015-11-26 23:34:03 +08:00
|
|
|
SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
|
|
|
|
void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
|
2015-09-25 13:15:46 +08:00
|
|
|
SmallVectorImpl<SDValue> &Results) const;
|
2015-11-26 23:34:03 +08:00
|
|
|
SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
|
2015-09-25 13:15:46 +08:00
|
|
|
SDValue &Chain) const;
|
2015-08-24 17:17:18 +08:00
|
|
|
SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
|
2014-06-10 04:18:42 +08:00
|
|
|
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
|
2014-08-21 20:50:31 +08:00
|
|
|
SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
2011-01-08 05:37:30 +08:00
|
|
|
|
2015-07-10 01:40:29 +08:00
|
|
|
unsigned getRegisterByName(const char* RegName, EVT VT,
|
|
|
|
SelectionDAG &DAG) const override;
|
2014-05-07 00:51:25 +08:00
|
|
|
|
2013-07-10 09:54:24 +08:00
|
|
|
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
|
|
|
|
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
|
|
|
|
/// expanded to FMAs when this method returns true, otherwise fmuladd is
|
|
|
|
/// expanded to fmul + fadd.
|
|
|
|
///
|
|
|
|
/// ARM supports both fused and unfused multiply-add operations; we already
|
2013-07-10 09:57:39 +08:00
|
|
|
/// lower a pair of fmul and fadd to the latter so it's not clear that there
|
2013-07-10 09:54:24 +08:00
|
|
|
/// would be a gain or that the gain would be worthwhile enough to risk
|
|
|
|
/// correctness bugs.
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; }
|
2013-07-10 09:54:24 +08:00
|
|
|
|
2011-01-08 05:37:30 +08:00
|
|
|
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
|
2008-07-28 05:46:04 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
|
|
|
|
SDValue ThisVal) const;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2016-01-12 08:47:18 +08:00
|
|
|
bool supportSplitCSR(MachineFunction *MF) const override {
|
|
|
|
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
|
|
|
|
MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
|
|
|
|
}
|
2017-01-11 09:45:03 +08:00
|
|
|
|
2016-01-12 08:47:18 +08:00
|
|
|
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
|
|
|
|
void insertCopiesSplitCSR(
|
|
|
|
MachineBasicBlock *Entry,
|
|
|
|
const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
|
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
SDValue
|
2016-06-12 23:39:02 +08:00
|
|
|
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
|
|
|
|
|
|
|
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
|
|
|
|
SDValue &Chain, const Value *OrigArg,
|
|
|
|
unsigned InRegsParamRecordIdx, int ArgOffset,
|
2015-03-12 02:54:22 +08:00
|
|
|
unsigned ArgSize) const;
|
2013-04-30 15:19:58 +08:00
|
|
|
|
2011-04-21 00:47:52 +08:00
|
|
|
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &dl, SDValue &Chain,
|
|
|
|
unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
|
2013-04-30 15:19:58 +08:00
|
|
|
bool ForceMutable = false) const;
|
2011-04-21 00:47:52 +08:00
|
|
|
|
2017-01-11 09:45:03 +08:00
|
|
|
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
|
2011-03-01 01:17:53 +08:00
|
|
|
/// HandleByVal - Target-specific cleanup for ByVal support.
|
2014-03-10 10:09:33 +08:00
|
|
|
void HandleByVal(CCState *, unsigned &, unsigned) const override;
|
2011-03-01 01:17:53 +08:00
|
|
|
|
2010-06-04 05:09:53 +08:00
|
|
|
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
|
|
|
/// for tail call optimization. Targets which want to do tail call
|
|
|
|
/// optimization should implement this function.
|
|
|
|
bool IsEligibleForTailCallOptimization(SDValue Callee,
|
|
|
|
CallingConv::ID CalleeCC,
|
|
|
|
bool isVarArg,
|
|
|
|
bool isCalleeStructRet,
|
|
|
|
bool isCallerStructRet,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2010-07-07 23:54:55 +08:00
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
2010-06-04 05:09:53 +08:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
SelectionDAG& DAG) const;
|
2012-11-29 04:55:10 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool CanLowerReturn(CallingConv::ID CallConv,
|
|
|
|
MachineFunction &MF, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
LLVMContext &Context) const override;
|
2012-11-29 04:55:10 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG) const override;
|
2009-11-12 15:13:11 +08:00
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
|
2010-12-01 07:55:39 +08:00
|
|
|
|
2017-04-19 05:16:46 +08:00
|
|
|
bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
|
2011-03-21 09:19:09 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
|
2014-08-21 20:50:31 +08:00
|
|
|
SDValue ARMcc, SDValue CCR, SDValue Cmp,
|
|
|
|
SelectionDAG &DAG) const;
|
2009-11-12 15:13:11 +08:00
|
|
|
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
|
|
|
|
SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
|
2017-02-13 20:32:47 +08:00
|
|
|
const SDLoc &dl, bool InvalidOnQNaN) const;
|
2011-03-08 09:17:20 +08:00
|
|
|
SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
|
2010-07-14 03:27:42 +08:00
|
|
|
|
|
|
|
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
|
2009-12-11 09:42:04 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
|
2011-10-07 06:18:16 +08:00
|
|
|
MachineBasicBlock *DispatchBB, int FI) const;
|
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
|
2011-10-04 05:25:38 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
|
2012-06-02 03:33:18 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *EmitStructByval(MachineInstr &MI,
|
2012-06-02 03:33:18 +08:00
|
|
|
MachineBasicBlock *MBB) const;
|
2014-06-10 04:18:42 +08:00
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
|
2014-06-10 04:18:42 +08:00
|
|
|
MachineBasicBlock *MBB) const;
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
|
2015-09-25 13:15:46 +08:00
|
|
|
MachineBasicBlock *MBB) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2010-11-06 05:57:54 +08:00
|
|
|
enum NEONModImmType {
|
|
|
|
VMOVModImm,
|
|
|
|
VMVNModImm,
|
|
|
|
OtherModImm
|
|
|
|
};
|
2011-04-23 11:24:11 +08:00
|
|
|
|
2010-07-22 06:26:11 +08:00
|
|
|
namespace ARM {
|
2017-01-11 09:45:03 +08:00
|
|
|
|
2012-08-03 12:06:28 +08:00
|
|
|
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2017-01-11 09:45:03 +08:00
|
|
|
} // end namespace ARM
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
|