2007-01-19 15:51:42 +08:00
|
|
|
//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that ARM uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARM.h"
|
|
|
|
#include "ARMAddressingModes.h"
|
|
|
|
#include "ARMConstantPoolValue.h"
|
|
|
|
#include "ARMISelLowering.h"
|
|
|
|
#include "ARMMachineFunctionInfo.h"
|
|
|
|
#include "ARMRegisterInfo.h"
|
|
|
|
#include "ARMSubtarget.h"
|
|
|
|
#include "ARMTargetMachine.h"
|
2009-08-02 08:34:36 +08:00
|
|
|
#include "ARMTargetObjectFile.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CallingConv.h"
|
|
|
|
#include "llvm/Constants.h"
|
2009-04-18 03:07:39 +08:00
|
|
|
#include "llvm/Function.h"
|
2007-03-16 16:43:56 +08:00
|
|
|
#include "llvm/Instruction.h"
|
2007-11-09 01:20:05 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/GlobalValue.h"
|
2009-04-18 03:07:39 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-04-18 03:07:39 +08:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2007-01-31 16:40:13 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/ADT/VectorExtras.h"
|
2009-07-09 02:01:40 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2007-03-13 07:30:29 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-18 03:07:39 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-18 03:07:39 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-18 03:07:39 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-18 03:07:39 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State);
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
|
|
|
|
EVT PromotedBitwiseVT) {
|
2009-06-23 07:27:02 +08:00
|
|
|
if (VT != PromotedLdStVT) {
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
|
2009-08-11 04:46:15 +08:00
|
|
|
AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
|
|
|
|
PromotedLdStVT.getSimpleVT());
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
|
2009-08-11 23:33:49 +08:00
|
|
|
AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
|
2009-08-11 04:46:15 +08:00
|
|
|
PromotedLdStVT.getSimpleVT());
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT ElemTy = VT.getVectorElementType();
|
2009-08-12 04:47:22 +08:00
|
|
|
if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
|
2009-08-12 04:47:22 +08:00
|
|
|
if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
|
2009-06-23 07:27:02 +08:00
|
|
|
if (VT.isInteger()) {
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
|
|
|
|
setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Promote all bit-wise operations.
|
|
|
|
if (VT.isInteger() && VT != PromotedBitwiseVT) {
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
|
2009-08-11 04:46:15 +08:00
|
|
|
AddPromotedToType (ISD::AND, VT.getSimpleVT(),
|
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
|
2009-08-11 23:33:49 +08:00
|
|
|
AddPromotedToType (ISD::OR, VT.getSimpleVT(),
|
2009-08-11 04:46:15 +08:00
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-08-11 04:18:46 +08:00
|
|
|
setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
|
2009-08-11 23:33:49 +08:00
|
|
|
AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
|
2009-08-11 04:46:15 +08:00
|
|
|
PromotedBitwiseVT.getSimpleVT());
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
|
2009-06-23 07:27:02 +08:00
|
|
|
addRegisterClass(VT, ARM::DPRRegisterClass);
|
2009-08-12 04:47:22 +08:00
|
|
|
addTypeForNEON(VT, MVT::f64, MVT::v2i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
|
2009-06-23 07:27:02 +08:00
|
|
|
addRegisterClass(VT, ARM::QPRRegisterClass);
|
2009-08-12 04:47:22 +08:00
|
|
|
addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-07-28 11:13:23 +08:00
|
|
|
static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
|
|
|
|
if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
|
2009-08-01 01:42:42 +08:00
|
|
|
return new TargetLoweringObjectFileMachO();
|
2009-08-02 08:34:36 +08:00
|
|
|
return new ARMElfTargetObjectFile();
|
2009-07-28 11:13:23 +08:00
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
2009-07-28 11:13:23 +08:00
|
|
|
: TargetLowering(TM, createTLOF(TM)), ARMPCLabelIndex(0) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Subtarget = &TM.getSubtarget<ARMSubtarget>();
|
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
if (Subtarget->isTargetDarwin()) {
|
|
|
|
// Uses VFP for Thumb libfuncs if available.
|
|
|
|
if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
|
|
|
|
// Single-precision floating-point arithmetic.
|
|
|
|
setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
|
|
|
|
setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
|
|
|
|
setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
|
|
|
|
setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
// Double-precision floating-point arithmetic.
|
|
|
|
setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
|
|
|
|
setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
|
|
|
|
setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
|
|
|
|
setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
|
2007-01-31 17:30:58 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
// Single-precision comparisons.
|
|
|
|
setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
|
|
|
|
setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
|
|
|
|
setLibcallName(RTLIB::UO_F32, "__unordsf2vfp");
|
|
|
|
setLibcallName(RTLIB::O_F32, "__unordsf2vfp");
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
|
2007-01-31 17:30:58 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
// Double-precision comparisons.
|
|
|
|
setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
|
|
|
|
setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
|
|
|
|
setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
|
|
|
|
setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
|
|
|
|
setLibcallName(RTLIB::UO_F64, "__unorddf2vfp");
|
|
|
|
setLibcallName(RTLIB::O_F64, "__unorddf2vfp");
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
|
|
|
|
setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
// Floating-point to integer conversions.
|
|
|
|
// i64 conversions are done via library routines even when generating VFP
|
|
|
|
// instructions, so use the same ones.
|
|
|
|
setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
|
|
|
|
setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-04-27 16:15:43 +08:00
|
|
|
// Conversions between floating types.
|
|
|
|
setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
|
|
|
|
setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp");
|
|
|
|
|
|
|
|
// Integer to floating-point conversions.
|
|
|
|
// i64 conversions are done via library routines even when generating VFP
|
|
|
|
// instructions, so use the same ones.
|
2009-03-21 07:16:43 +08:00
|
|
|
// FIXME: There appears to be some naming inconsistency in ARM libgcc:
|
|
|
|
// e.g., __floatunsidf vs. __floatunssidfvfp.
|
2007-04-27 16:15:43 +08:00
|
|
|
setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
|
|
|
|
setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
|
|
|
|
setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
|
|
|
|
setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-05-23 01:38:41 +08:00
|
|
|
// These libcalls are not available in 32-bit.
|
|
|
|
setLibcallName(RTLIB::SHL_I128, 0);
|
|
|
|
setLibcallName(RTLIB::SRL_I128, 0);
|
|
|
|
setLibcallName(RTLIB::SRA_I128, 0);
|
|
|
|
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
2009-08-12 04:47:22 +08:00
|
|
|
addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
|
2009-04-08 04:34:09 +08:00
|
|
|
else
|
2009-08-12 04:47:22 +08:00
|
|
|
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
|
2009-07-09 07:10:31 +08:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
if (Subtarget->hasNEON()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
addDRTypeForNEON(MVT::v2f32);
|
|
|
|
addDRTypeForNEON(MVT::v8i8);
|
|
|
|
addDRTypeForNEON(MVT::v4i16);
|
|
|
|
addDRTypeForNEON(MVT::v2i32);
|
|
|
|
addDRTypeForNEON(MVT::v1i64);
|
|
|
|
|
|
|
|
addQRTypeForNEON(MVT::v4f32);
|
|
|
|
addQRTypeForNEON(MVT::v2f64);
|
|
|
|
addQRTypeForNEON(MVT::v16i8);
|
|
|
|
addQRTypeForNEON(MVT::v8i16);
|
|
|
|
addQRTypeForNEON(MVT::v4i32);
|
|
|
|
addQRTypeForNEON(MVT::v2i64);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
|
|
|
|
setTargetDAGCombine(ISD::SHL);
|
|
|
|
setTargetDAGCombine(ISD::SRL);
|
|
|
|
setTargetDAGCombine(ISD::SRA);
|
|
|
|
setTargetDAGCombine(ISD::SIGN_EXTEND);
|
|
|
|
setTargetDAGCombine(ISD::ZERO_EXTEND);
|
|
|
|
setTargetDAGCombine(ISD::ANY_EXTEND);
|
|
|
|
}
|
|
|
|
|
2007-05-18 08:19:34 +08:00
|
|
|
computeRegisterProperties();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
// ARM does not have f32 extending load.
|
2009-08-12 04:47:22 +08:00
|
|
|
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2008-01-24 04:39:46 +08:00
|
|
|
// ARM does not have i1 sign extending load.
|
2009-08-12 04:47:22 +08:00
|
|
|
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
2008-01-24 04:39:46 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// ARM supports all 4 flavors of integer indexed load / store.
|
2009-07-02 15:28:31 +08:00
|
|
|
if (!Subtarget->isThumb1Only()) {
|
|
|
|
for (unsigned im = (unsigned)ISD::PRE_INC;
|
|
|
|
im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
|
2009-08-12 04:47:22 +08:00
|
|
|
setIndexedLoadAction(im, MVT::i1, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i8, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i16, Legal);
|
|
|
|
setIndexedLoadAction(im, MVT::i32, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i1, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i8, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i16, Legal);
|
|
|
|
setIndexedStoreAction(im, MVT::i32, Legal);
|
2009-07-02 15:28:31 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// i64 operation support.
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::MUL, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
} else {
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::MUL, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
2009-08-01 08:16:10 +08:00
|
|
|
if (!Subtarget->hasV6Ops())
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SRL, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::SRA, MVT::i64, Custom);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
// ARM does not have ROTL.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::ROTL, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
|
2009-06-27 04:47:43 +08:00
|
|
|
if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-03-17 06:54:16 +08:00
|
|
|
// Only ARMv6 has BSWAP.
|
|
|
|
if (!Subtarget->hasV6Ops())
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
|
2007-03-17 06:54:16 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// These are expanded into libcalls.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::SDIV, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UDIV, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// Support label based line numbers.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
// Use the default implementation.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::VASTART, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VAARG, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VAEND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
2009-08-13 01:38:44 +08:00
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
|
|
|
|
// FIXME: Shouldn't need this, since no register is used, but the legalizer
|
|
|
|
// doesn't yet know how to not do that for SjLj.
|
|
|
|
setExceptionSelectorRegister(ARM::R0);
|
2009-08-07 08:34:42 +08:00
|
|
|
if (Subtarget->isThumb())
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
|
2009-08-07 08:34:42 +08:00
|
|
|
else
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-07-03 09:43:10 +08:00
|
|
|
if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-07-09 07:10:31 +08:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
|
2008-11-05 03:57:48 +08:00
|
|
|
// Turn f64->i64 into FMRRD, i64 -> f64 to FMDRR iff target supports vfp2.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
|
2007-11-09 01:20:05 +08:00
|
|
|
|
|
|
|
// We want to custom lower some of our intrinsics.
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::SETCC, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-10-12 07:21:31 +08:00
|
|
|
// We don't support sin/cos/fmod/copysign/pow
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::FSIN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FSIN, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM, MVT::f32, Expand);
|
2009-07-09 07:10:31 +08:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
2008-04-01 09:50:16 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::FPOW, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FPOW, MVT::f32, Expand);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// int <-> fp are custom expanded into bit_convert + ARMISD ops.
|
2009-07-09 07:10:31 +08:00
|
|
|
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
|
2008-04-01 09:50:16 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2007-11-28 06:36:16 +08:00
|
|
|
// We have target-specific dag combine patterns for the following nodes:
|
|
|
|
// ARMISD::FMRRD - No need to call setTargetDAGCombine
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
setTargetDAGCombine(ISD::ADD);
|
|
|
|
setTargetDAGCombine(ISD::SUB);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
setStackPointerRegisterToSaveRestore(ARM::SP);
|
|
|
|
setSchedulingPreference(SchedulingForRegPressure);
|
2007-05-18 08:19:34 +08:00
|
|
|
setIfCvtBlockSizeLimit(Subtarget->isThumb() ? 0 : 10);
|
2007-06-20 07:55:02 +08:00
|
|
|
setIfCvtDupBlockSizeLimit(Subtarget->isThumb() ? 0 : 2);
|
2007-05-18 05:31:21 +08:00
|
|
|
|
2009-06-19 09:51:50 +08:00
|
|
|
if (!Subtarget->isThumb()) {
|
|
|
|
// Use branch latency information to determine if-conversion limits.
|
2009-06-19 15:06:07 +08:00
|
|
|
// FIXME: If-converter should use instruction latency of the branch being
|
|
|
|
// eliminated to compute the threshold. For ARMv6, the branch "latency"
|
|
|
|
// varies depending on whether it's dynamically or statically predicted
|
|
|
|
// and on whether the destination is in the prefetch buffer.
|
2009-06-19 09:51:50 +08:00
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
|
const InstrItineraryData &InstrItins = Subtarget->getInstrItineraryData();
|
2009-06-19 14:56:26 +08:00
|
|
|
unsigned Latency= InstrItins.getLatency(TII->get(ARM::Bcc).getSchedClass());
|
2009-06-19 09:51:50 +08:00
|
|
|
if (Latency > 1) {
|
|
|
|
setIfCvtBlockSizeLimit(Latency-1);
|
|
|
|
if (Latency > 2)
|
|
|
|
setIfCvtDupBlockSizeLimit(Latency-2);
|
|
|
|
} else {
|
|
|
|
setIfCvtBlockSizeLimit(10);
|
|
|
|
setIfCvtDupBlockSizeLimit(2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-18 05:31:21 +08:00
|
|
|
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
|
2009-05-19 04:55:32 +08:00
|
|
|
// Do not enable CodePlacementOpt for now: it currently runs after the
|
|
|
|
// ARMConstantIslandPass and messes up branch relaxation and placement
|
|
|
|
// of constant islands.
|
|
|
|
// benefitFromCodePlacementOpt = true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
|
switch (Opcode) {
|
|
|
|
default: return 0;
|
|
|
|
case ARMISD::Wrapper: return "ARMISD::Wrapper";
|
|
|
|
case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
|
|
|
|
case ARMISD::CALL: return "ARMISD::CALL";
|
2007-06-20 05:05:09 +08:00
|
|
|
case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
|
2007-01-19 15:51:42 +08:00
|
|
|
case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
|
|
|
|
case ARMISD::tCALL: return "ARMISD::tCALL";
|
|
|
|
case ARMISD::BRCOND: return "ARMISD::BRCOND";
|
|
|
|
case ARMISD::BR_JT: return "ARMISD::BR_JT";
|
2009-07-29 10:18:14 +08:00
|
|
|
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
|
2007-01-19 15:51:42 +08:00
|
|
|
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
|
|
|
|
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
|
|
|
|
case ARMISD::CMP: return "ARMISD::CMP";
|
2009-06-29 23:33:01 +08:00
|
|
|
case ARMISD::CMPZ: return "ARMISD::CMPZ";
|
2007-01-19 15:51:42 +08:00
|
|
|
case ARMISD::CMPFP: return "ARMISD::CMPFP";
|
|
|
|
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
|
|
|
|
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
|
|
|
|
case ARMISD::CMOV: return "ARMISD::CMOV";
|
|
|
|
case ARMISD::CNEG: return "ARMISD::CNEG";
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
case ARMISD::FTOSI: return "ARMISD::FTOSI";
|
|
|
|
case ARMISD::FTOUI: return "ARMISD::FTOUI";
|
|
|
|
case ARMISD::SITOF: return "ARMISD::SITOF";
|
|
|
|
case ARMISD::UITOF: return "ARMISD::UITOF";
|
|
|
|
|
|
|
|
case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
|
|
|
|
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
|
|
|
|
case ARMISD::RRX: return "ARMISD::RRX";
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
case ARMISD::FMRRD: return "ARMISD::FMRRD";
|
|
|
|
case ARMISD::FMDRR: return "ARMISD::FMDRR";
|
2007-04-27 21:54:47 +08:00
|
|
|
|
|
|
|
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2009-08-07 08:34:42 +08:00
|
|
|
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
case ARMISD::VCEQ: return "ARMISD::VCEQ";
|
|
|
|
case ARMISD::VCGE: return "ARMISD::VCGE";
|
|
|
|
case ARMISD::VCGEU: return "ARMISD::VCGEU";
|
|
|
|
case ARMISD::VCGT: return "ARMISD::VCGT";
|
|
|
|
case ARMISD::VCGTU: return "ARMISD::VCGTU";
|
|
|
|
case ARMISD::VTST: return "ARMISD::VTST";
|
|
|
|
|
|
|
|
case ARMISD::VSHL: return "ARMISD::VSHL";
|
|
|
|
case ARMISD::VSHRs: return "ARMISD::VSHRs";
|
|
|
|
case ARMISD::VSHRu: return "ARMISD::VSHRu";
|
|
|
|
case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
|
|
|
|
case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
|
|
|
|
case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
|
|
|
|
case ARMISD::VSHRN: return "ARMISD::VSHRN";
|
|
|
|
case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
|
|
|
|
case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
|
|
|
|
case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
|
|
|
|
case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
|
|
|
|
case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
|
|
|
|
case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
|
|
|
|
case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
|
|
|
|
case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
|
|
|
|
case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
|
|
|
|
case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
|
|
|
|
case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
|
|
|
|
case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
|
|
|
|
case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
|
|
|
|
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
|
|
|
|
case ARMISD::VDUPLANEQ: return "ARMISD::VDUPLANEQ";
|
2009-08-04 08:36:16 +08:00
|
|
|
case ARMISD::VLD2D: return "ARMISD::VLD2D";
|
|
|
|
case ARMISD::VLD3D: return "ARMISD::VLD3D";
|
|
|
|
case ARMISD::VLD4D: return "ARMISD::VLD4D";
|
2009-08-07 02:47:44 +08:00
|
|
|
case ARMISD::VST2D: return "ARMISD::VST2D";
|
|
|
|
case ARMISD::VST3D: return "ARMISD::VST3D";
|
|
|
|
case ARMISD::VST4D: return "ARMISD::VST4D";
|
2009-08-13 06:31:50 +08:00
|
|
|
case ARMISD::VREV64: return "ARMISD::VREV64";
|
|
|
|
case ARMISD::VREV32: return "ARMISD::VREV32";
|
|
|
|
case ARMISD::VREV16: return "ARMISD::VREV16";
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-02 02:50:55 +08:00
|
|
|
/// getFunctionAlignment - Return the Log2 alignment of this function.
|
2009-07-01 06:38:32 +08:00
|
|
|
unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
|
|
|
|
return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2;
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Lowering Code
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
|
|
|
|
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
|
|
|
|
switch (CC) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown condition code!");
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::SETNE: return ARMCC::NE;
|
|
|
|
case ISD::SETEQ: return ARMCC::EQ;
|
|
|
|
case ISD::SETGT: return ARMCC::GT;
|
|
|
|
case ISD::SETGE: return ARMCC::GE;
|
|
|
|
case ISD::SETLT: return ARMCC::LT;
|
|
|
|
case ISD::SETLE: return ARMCC::LE;
|
|
|
|
case ISD::SETUGT: return ARMCC::HI;
|
|
|
|
case ISD::SETUGE: return ARMCC::HS;
|
|
|
|
case ISD::SETULT: return ARMCC::LO;
|
|
|
|
case ISD::SETULE: return ARMCC::LS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. It
|
|
|
|
/// returns true if the operands should be inverted to form the proper
|
|
|
|
/// comparison.
|
|
|
|
static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
|
|
|
|
ARMCC::CondCodes &CondCode2) {
|
|
|
|
bool Invert = false;
|
|
|
|
CondCode2 = ARMCC::AL;
|
|
|
|
switch (CC) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown FP condition!");
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::SETEQ:
|
|
|
|
case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
|
|
|
|
case ISD::SETGT:
|
|
|
|
case ISD::SETOGT: CondCode = ARMCC::GT; break;
|
|
|
|
case ISD::SETGE:
|
|
|
|
case ISD::SETOGE: CondCode = ARMCC::GE; break;
|
|
|
|
case ISD::SETOLT: CondCode = ARMCC::MI; break;
|
|
|
|
case ISD::SETOLE: CondCode = ARMCC::GT; Invert = true; break;
|
|
|
|
case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
|
|
|
|
case ISD::SETO: CondCode = ARMCC::VC; break;
|
|
|
|
case ISD::SETUO: CondCode = ARMCC::VS; break;
|
|
|
|
case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
|
|
|
|
case ISD::SETUGT: CondCode = ARMCC::HI; break;
|
|
|
|
case ISD::SETUGE: CondCode = ARMCC::PL; break;
|
|
|
|
case ISD::SETLT:
|
|
|
|
case ISD::SETULT: CondCode = ARMCC::LT; break;
|
|
|
|
case ISD::SETLE:
|
|
|
|
case ISD::SETULE: CondCode = ARMCC::LE; break;
|
|
|
|
case ISD::SETNE:
|
|
|
|
case ISD::SETUNE: CondCode = ARMCC::NE; break;
|
|
|
|
}
|
|
|
|
return Invert;
|
|
|
|
}
|
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMGenCallingConv.inc"
|
|
|
|
|
|
|
|
// APCS f64 is in register pairs, possibly split to stack
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
CCState &State, bool CanFail) {
|
|
|
|
static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
|
|
|
|
|
|
|
|
// Try to get the first register.
|
|
|
|
if (unsigned Reg = State.AllocateReg(RegList, 4))
|
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
|
|
|
else {
|
|
|
|
// For the 2nd half of a v2f64, do not fail.
|
|
|
|
if (CanFail)
|
|
|
|
return false;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
// Put the whole thing on the stack.
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(8, 4),
|
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
// Try to get the second register.
|
|
|
|
if (unsigned Reg = State.AllocateReg(RegList, 4))
|
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-18 04:40:45 +08:00
|
|
|
else
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(4, 4),
|
2009-06-23 07:27:02 +08:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
|
|
|
|
return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
if (LocVT == MVT::v2f64 &&
|
2009-06-23 07:27:02 +08:00
|
|
|
!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
|
|
|
|
return false;
|
2009-04-18 04:40:45 +08:00
|
|
|
return true; // we handled it
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// AAPCS f64 is in aligned register pairs
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
CCState &State, bool CanFail) {
|
2009-04-18 03:07:39 +08:00
|
|
|
static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
|
|
|
|
static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
|
|
|
|
|
2009-04-18 04:40:45 +08:00
|
|
|
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
|
2009-06-23 07:27:02 +08:00
|
|
|
if (Reg == 0) {
|
|
|
|
// For the 2nd half of a v2f64, do not just fail.
|
|
|
|
if (CanFail)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Put the whole thing on the stack.
|
|
|
|
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
|
|
|
|
State.AllocateStack(8, 8),
|
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-04-18 04:40:45 +08:00
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < 2; ++i)
|
|
|
|
if (HiRegList[i] == Reg)
|
|
|
|
break;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-18 04:40:45 +08:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
|
2009-06-23 07:27:02 +08:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
|
|
|
|
return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
if (LocVT == MVT::v2f64 &&
|
2009-06-23 07:27:02 +08:00
|
|
|
!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
|
|
|
|
return false;
|
2009-04-18 04:40:45 +08:00
|
|
|
return true; // we handled it
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo, CCState &State) {
|
2009-04-18 03:07:39 +08:00
|
|
|
static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
|
|
|
|
static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
|
|
|
|
|
2009-04-18 04:40:45 +08:00
|
|
|
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
|
|
|
|
if (Reg == 0)
|
|
|
|
return false; // we didn't handle it
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-04-18 04:40:45 +08:00
|
|
|
unsigned i;
|
|
|
|
for (i = 0; i < 2; ++i)
|
|
|
|
if (HiRegList[i] == Reg)
|
|
|
|
break;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
2009-04-18 04:40:45 +08:00
|
|
|
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
|
2009-06-23 07:27:02 +08:00
|
|
|
LocVT, LocInfo));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-06-23 07:27:02 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
|
|
|
|
return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
|
2009-06-23 07:27:02 +08:00
|
|
|
return false;
|
2009-04-18 04:40:45 +08:00
|
|
|
return true; // we handled it
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
|
2009-04-18 03:07:39 +08:00
|
|
|
CCValAssign::LocInfo &LocInfo,
|
|
|
|
ISD::ArgFlagsTy &ArgFlags,
|
|
|
|
CCState &State) {
|
|
|
|
return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
|
|
|
|
State);
|
|
|
|
}
|
|
|
|
|
2009-06-17 02:50:49 +08:00
|
|
|
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
|
|
|
|
/// given CallingConvention value.
|
|
|
|
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(unsigned CC,
|
2009-08-06 03:04:42 +08:00
|
|
|
bool Return,
|
|
|
|
bool isVarArg) const {
|
2009-06-17 02:50:49 +08:00
|
|
|
switch (CC) {
|
|
|
|
default:
|
2009-08-06 03:04:42 +08:00
|
|
|
llvm_unreachable("Unsupported calling convention");
|
2009-06-17 02:50:49 +08:00
|
|
|
case CallingConv::C:
|
|
|
|
case CallingConv::Fast:
|
2009-08-06 03:04:42 +08:00
|
|
|
// Use target triple & subtarget features to do actual dispatch.
|
|
|
|
if (Subtarget->isAAPCS_ABI()) {
|
|
|
|
if (Subtarget->hasVFP2() &&
|
|
|
|
FloatABIType == FloatABI::Hard && !isVarArg)
|
|
|
|
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
|
|
|
|
else
|
|
|
|
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
|
|
|
|
} else
|
|
|
|
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
|
2009-06-17 02:50:49 +08:00
|
|
|
case CallingConv::ARM_AAPCS_VFP:
|
2009-08-06 03:04:42 +08:00
|
|
|
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
|
2009-06-17 02:50:49 +08:00
|
|
|
case CallingConv::ARM_AAPCS:
|
2009-08-06 03:04:42 +08:00
|
|
|
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
|
2009-06-17 02:50:49 +08:00
|
|
|
case CallingConv::ARM_APCS:
|
2009-08-06 03:04:42 +08:00
|
|
|
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
|
2009-06-17 02:50:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
/// LowerCallResult - Lower the result values of a call into the
|
|
|
|
/// appropriate copies out of appropriate physical registers.
|
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
|
|
|
unsigned CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// Assign locations to each value returned by this call.
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
2009-07-22 08:24:57 +08:00
|
|
|
RVLocs, *DAG.getContext());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
CCInfo.AnalyzeCallResult(Ins,
|
2009-08-06 03:04:42 +08:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ true,
|
|
|
|
isVarArg));
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
CCValAssign VA = RVLocs[i];
|
|
|
|
|
2009-04-25 08:33:20 +08:00
|
|
|
SDValue Val;
|
2009-04-18 03:07:39 +08:00
|
|
|
if (VA.needsCustom()) {
|
2009-06-23 07:27:02 +08:00
|
|
|
// Handle f64 or half of a v2f64.
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
|
2009-04-18 03:07:39 +08:00
|
|
|
InFlag);
|
2009-04-25 01:00:36 +08:00
|
|
|
Chain = Lo.getValue(1);
|
|
|
|
InFlag = Lo.getValue(2);
|
2009-04-18 03:07:39 +08:00
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
|
2009-04-25 01:00:36 +08:00
|
|
|
InFlag);
|
|
|
|
Chain = Hi.getValue(1);
|
|
|
|
InFlag = Hi.getValue(2);
|
2009-08-12 04:47:22 +08:00
|
|
|
Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
|
|
|
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
|
|
|
|
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-12 04:47:22 +08:00
|
|
|
Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
|
2009-06-23 07:27:02 +08:00
|
|
|
Chain = Lo.getValue(1);
|
|
|
|
InFlag = Lo.getValue(2);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
2009-08-12 04:47:22 +08:00
|
|
|
Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
|
2009-06-23 07:27:02 +08:00
|
|
|
Chain = Hi.getValue(1);
|
|
|
|
InFlag = Hi.getValue(2);
|
2009-08-12 04:47:22 +08:00
|
|
|
Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
|
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
} else {
|
2009-04-25 08:33:20 +08:00
|
|
|
Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
|
|
|
|
InFlag);
|
2009-04-25 01:00:36 +08:00
|
|
|
Chain = Val.getValue(1);
|
|
|
|
InFlag = Val.getValue(2);
|
2009-04-25 08:33:20 +08:00
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-04-25 08:33:20 +08:00
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-25 08:33:20 +08:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
|
|
|
|
break;
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
2009-04-25 08:33:20 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
InVals.push_back(Val);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
return Chain;
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
|
|
|
|
/// by "Src" to address "Dst" of size "Size". Alignment information is
|
2009-04-18 04:35:10 +08:00
|
|
|
/// specified by the specific parameter attribute. The copy will be passed as
|
2009-04-18 03:07:39 +08:00
|
|
|
/// a byval function parameter.
|
|
|
|
/// Sometimes what we are copying is the end of a larger object, the part that
|
|
|
|
/// does not fit in registers.
|
|
|
|
static SDValue
|
|
|
|
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
|
|
|
|
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
|
|
|
|
DebugLoc dl) {
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
|
2009-04-18 03:07:39 +08:00
|
|
|
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
|
|
|
|
/*AlwaysInline=*/false, NULL, 0, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
/// LowerMemOpCallTo - Store the argument to the stack.
|
2009-04-18 03:07:39 +08:00
|
|
|
SDValue
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
|
|
|
|
SDValue StackPtr, SDValue Arg,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
const CCValAssign &VA,
|
|
|
|
ISD::ArgFlagsTy Flags) {
|
2009-04-18 03:07:39 +08:00
|
|
|
unsigned LocMemOffset = VA.getLocMemOffset();
|
|
|
|
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
|
|
|
|
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
|
|
|
|
if (Flags.isByVal()) {
|
|
|
|
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
|
|
|
|
}
|
|
|
|
return DAG.getStore(Chain, dl, Arg, PtrOff,
|
|
|
|
PseudoSourceValue::getStack(), LocMemOffset);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue Chain, SDValue &Arg,
|
|
|
|
RegsToPassVector &RegsToPass,
|
|
|
|
CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &StackPtr,
|
|
|
|
SmallVector<SDValue, 8> &MemOpChains,
|
|
|
|
ISD::ArgFlagsTy Flags) {
|
|
|
|
|
|
|
|
SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Arg);
|
2009-06-23 07:27:02 +08:00
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
|
|
|
|
|
|
|
|
if (NextVA.isRegLoc())
|
|
|
|
RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
|
|
|
|
else {
|
|
|
|
assert(NextVA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
|
|
|
|
dl, DAG, NextVA,
|
|
|
|
Flags));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
/// LowerCall - Lowering a call into a callseq_start <-
|
2007-02-03 16:53:01 +08:00
|
|
|
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
|
|
|
|
/// nodes.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|
|
|
unsigned CallConv, bool isVarArg,
|
|
|
|
bool isTailCall,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
CCInfo.AnalyzeCallOperands(Outs,
|
2009-08-06 03:04:42 +08:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ false,
|
|
|
|
isVarArg));
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
// Adjust the stack pointer for the new arguments...
|
|
|
|
// These operations are automatically eliminated by the prolog/epilog pass
|
2008-10-12 06:08:30 +08:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
RegsToPassVector RegsToPass;
|
2009-04-18 03:07:39 +08:00
|
|
|
SmallVector<SDValue, 8> MemOpChains;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads. In the case
|
2009-04-18 04:35:10 +08:00
|
|
|
// of tail call optimization, arguments are handled later.
|
2009-04-18 03:07:39 +08:00
|
|
|
for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
|
|
|
|
i != e;
|
|
|
|
++i, ++realArgIdx) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue Arg = Outs[realArgIdx].Val;
|
|
|
|
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-18 03:07:39 +08:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-08-06 03:04:42 +08:00
|
|
|
// f64 and v2f64 might be passed in i32 pairs and must be split into pieces
|
2009-04-18 03:07:39 +08:00
|
|
|
if (VA.needsCustom()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
|
|
|
SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
|
2009-06-23 07:27:02 +08:00
|
|
|
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
|
|
|
|
|
|
|
|
VA = ArgLocs[++i]; // skip ahead to next loc
|
|
|
|
if (VA.isRegLoc()) {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
|
2009-06-23 07:27:02 +08:00
|
|
|
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
|
|
|
|
dl, DAG, VA, Flags));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
} else {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
|
2009-06-23 07:27:02 +08:00
|
|
|
StackPtr, MemOpChains, Flags);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
} else if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
2007-01-19 15:51:42 +08:00
|
|
|
} else {
|
2009-04-18 03:07:39 +08:00
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.getNode() == 0)
|
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
2007-01-19 15:51:42 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
|
|
|
|
dl, DAG, VA, Flags));
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemOpChains.empty())
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
2007-01-19 15:51:42 +08:00
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into the appropriate regs.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue InFlag;
|
2007-01-19 15:51:42 +08:00
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
2009-03-21 06:42:55 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
2009-02-05 04:06:27 +08:00
|
|
|
RegsToPass[i].second, InFlag);
|
2007-01-19 15:51:42 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2008-09-17 05:48:12 +08:00
|
|
|
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
|
|
|
|
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
|
|
|
// node so that legalize doesn't hack it.
|
2007-01-19 15:51:42 +08:00
|
|
|
bool isDirect = false;
|
|
|
|
bool isARMFunc = false;
|
2007-06-20 05:05:09 +08:00
|
|
|
bool isLocalARMFunc = false;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
|
|
|
GlobalValue *GV = G->getGlobal();
|
|
|
|
isDirect = true;
|
2009-07-15 12:12:33 +08:00
|
|
|
bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
|
2007-01-20 03:28:01 +08:00
|
|
|
bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
|
2007-01-19 15:51:42 +08:00
|
|
|
getTargetMachine().getRelocationModel() != Reloc::Static;
|
|
|
|
isARMFunc = !Subtarget->isThumb() || isStub;
|
2007-06-20 05:05:09 +08:00
|
|
|
// ARM call to a local ARM function is predicable.
|
|
|
|
isLocalARMFunc = !Subtarget->isThumb() && !isExt;
|
2007-01-31 04:37:08 +08:00
|
|
|
// tBX takes a register source operand.
|
2009-07-09 07:10:31 +08:00
|
|
|
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
2007-01-31 04:37:08 +08:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
|
|
|
|
ARMCP::CPStub, 4);
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-03-21 06:42:55 +08:00
|
|
|
Callee = DAG.getLoad(getPointerTy(), dl,
|
|
|
|
DAG.getEntryNode(), CPAddr, NULL, 0);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-03-21 06:42:55 +08:00
|
|
|
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
|
2009-02-05 04:06:27 +08:00
|
|
|
getPointerTy(), Callee, PICLabel);
|
2007-01-31 04:37:08 +08:00
|
|
|
} else
|
|
|
|
Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
|
2008-09-17 05:48:12 +08:00
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
isDirect = true;
|
2007-01-20 03:28:01 +08:00
|
|
|
bool isStub = Subtarget->isTargetDarwin() &&
|
2007-01-19 15:51:42 +08:00
|
|
|
getTargetMachine().getRelocationModel() != Reloc::Static;
|
|
|
|
isARMFunc = !Subtarget->isThumb() || isStub;
|
2007-01-31 04:37:08 +08:00
|
|
|
// tBX takes a register source operand.
|
|
|
|
const char *Sym = S->getSymbol();
|
2009-07-09 07:10:31 +08:00
|
|
|
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
2007-01-31 04:37:08 +08:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex,
|
|
|
|
ARMCP::CPStub, 4);
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-02-05 04:06:27 +08:00
|
|
|
Callee = DAG.getLoad(getPointerTy(), dl,
|
2009-03-21 06:42:55 +08:00
|
|
|
DAG.getEntryNode(), CPAddr, NULL, 0);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-03-21 06:42:55 +08:00
|
|
|
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
|
2009-02-05 04:06:27 +08:00
|
|
|
getPointerTy(), Callee, PICLabel);
|
2007-01-31 04:37:08 +08:00
|
|
|
} else
|
2008-09-17 05:48:12 +08:00
|
|
|
Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2007-03-21 01:57:23 +08:00
|
|
|
// FIXME: handle tail calls differently.
|
|
|
|
unsigned CallOpc;
|
2009-08-01 08:16:10 +08:00
|
|
|
if (Subtarget->isThumb()) {
|
|
|
|
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
|
2007-03-21 01:57:23 +08:00
|
|
|
CallOpc = ARMISD::CALL_NOLINK;
|
|
|
|
else
|
|
|
|
CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
|
|
|
|
} else {
|
|
|
|
CallOpc = (isDirect || Subtarget->hasV5TOps())
|
2007-06-20 05:05:09 +08:00
|
|
|
? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
|
|
|
|
: ARMISD::CALL_NOLINK;
|
2007-03-21 01:57:23 +08:00
|
|
|
}
|
2009-07-09 07:10:31 +08:00
|
|
|
if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
|
2007-03-28 00:19:21 +08:00
|
|
|
// implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
|
2007-03-21 01:57:23 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
std::vector<SDValue> Ops;
|
2007-01-19 15:51:42 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
if (InFlag.getNode())
|
2007-01-19 15:51:42 +08:00
|
|
|
Ops.push_back(InFlag);
|
2008-07-03 01:40:58 +08:00
|
|
|
// Returns a chain and a flag for retval copy to use.
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
|
2008-07-03 01:40:58 +08:00
|
|
|
&Ops[0], Ops.size());
|
2007-01-19 15:51:42 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2008-10-12 06:08:30 +08:00
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
|
|
|
|
DAG.getIntPtrConstant(0, true), InFlag);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
if (!Ins.empty())
|
2007-01-19 15:51:42 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
|
|
|
|
dl, DAG, InVals);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerReturn(SDValue Chain,
|
|
|
|
unsigned CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG) {
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
// CCValAssign - represent the assignment of the return value to a location.
|
2009-04-18 03:07:39 +08:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
// CCState - Info about the registers and stack slots.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
|
|
|
*DAG.getContext());
|
2009-04-18 03:07:39 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
// Analyze outgoing return values.
|
2009-08-06 03:04:42 +08:00
|
|
|
CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
|
|
|
|
isVarArg));
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
// If this is the first return lowered for this function, add
|
|
|
|
// the regs to the liveout set for the function.
|
|
|
|
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
|
|
|
if (RVLocs[i].isRegLoc())
|
|
|
|
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
SDValue Flag;
|
|
|
|
|
|
|
|
// Copy the result values into the output registers.
|
|
|
|
for (unsigned i = 0, realRVLocIdx = 0;
|
|
|
|
i != RVLocs.size();
|
|
|
|
++i, ++realRVLocIdx) {
|
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
assert(VA.isRegLoc() && "Can only return in registers!");
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
SDValue Arg = Outs[realRVLocIdx].Val;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-18 03:07:39 +08:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
|
|
|
break;
|
2008-07-12 04:53:00 +08:00
|
|
|
}
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
if (VA.needsCustom()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
2009-06-23 07:27:02 +08:00
|
|
|
// Extract the first half and return it in two registers.
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue HalfGPRs = DAG.getNode(ARMISD::FMRRD, dl,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Half);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
|
|
|
HalfGPRs.getValue(1), Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
|
|
|
|
// Extract the 2nd half and fall through to handle it as an f64 value.
|
2009-08-12 04:47:22 +08:00
|
|
|
Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
// Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
|
|
|
|
// available.
|
2009-04-18 03:07:39 +08:00
|
|
|
SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
|
2009-04-18 03:07:39 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
|
2009-04-25 01:00:36 +08:00
|
|
|
Flag = Chain.getValue(1);
|
2009-04-18 03:07:39 +08:00
|
|
|
VA = RVLocs[++i]; // skip ahead to next loc
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
|
|
|
|
Flag);
|
|
|
|
} else
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
|
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
// Guarantee that all emitted copies are
|
|
|
|
// stuck together, avoiding something bad.
|
2009-04-18 03:07:39 +08:00
|
|
|
Flag = Chain.getValue(1);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
SDValue result;
|
|
|
|
if (Flag.getNode())
|
2009-08-12 04:47:22 +08:00
|
|
|
result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
|
2009-04-18 03:07:39 +08:00
|
|
|
else // Return Void
|
2009-08-12 04:47:22 +08:00
|
|
|
result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
return result;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-03-21 06:42:55 +08:00
|
|
|
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
|
2009-07-14 02:11:36 +08:00
|
|
|
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
|
2008-09-17 05:48:12 +08:00
|
|
|
// one of the above mentioned nodes. It has to be wrapped because otherwise
|
|
|
|
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
|
|
|
|
// be used to form addressing mode. These wrapped nodes will be selected
|
|
|
|
// into MOVi.
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = Op.getValueType();
|
2009-02-07 08:55:49 +08:00
|
|
|
// FIXME there is no actual debug info here
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Res;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (CP->isMachineConstantPoolEntry())
|
|
|
|
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
|
|
|
|
CP->getAlignment());
|
|
|
|
else
|
|
|
|
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
|
|
|
|
CP->getAlignment());
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2007-04-27 21:54:47 +08:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "general dynamic" model
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue
|
2007-04-27 21:54:47 +08:00
|
|
|
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
|
|
|
|
SelectionDAG &DAG) {
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = GA->getDebugLoc();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = getPointerTy();
|
2007-04-27 21:54:47 +08:00
|
|
|
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
|
|
|
|
ARMConstantPoolValue *CPV =
|
|
|
|
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
|
|
|
|
PCAdj, "tlsgd", true);
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
|
2009-02-05 04:06:27 +08:00
|
|
|
Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, NULL, 0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = Argument.getValue(1);
|
2007-04-27 21:54:47 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-02-05 04:06:27 +08:00
|
|
|
Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
|
2007-04-27 21:54:47 +08:00
|
|
|
|
|
|
|
// call __tls_get_addr.
|
|
|
|
ArgListTy Args;
|
|
|
|
ArgListEntry Entry;
|
|
|
|
Entry.Node = Argument;
|
|
|
|
Entry.Ty = (const Type *) Type::Int32Ty;
|
|
|
|
Args.push_back(Entry);
|
2009-01-31 07:10:59 +08:00
|
|
|
// FIXME: is there useful debug info available here?
|
2008-07-28 05:46:04 +08:00
|
|
|
std::pair<SDValue, SDValue> CallResult =
|
2008-09-27 03:31:26 +08:00
|
|
|
LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false, false, false,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
0, CallingConv::C, false, /*isReturnValueUsed=*/true,
|
2009-02-05 04:06:27 +08:00
|
|
|
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
|
2007-04-27 21:54:47 +08:00
|
|
|
return CallResult.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower ISD::GlobalTLSAddress using the "initial exec" or
|
|
|
|
// "local exec" model.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue
|
2007-04-27 21:54:47 +08:00
|
|
|
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
|
2009-03-21 06:42:55 +08:00
|
|
|
SelectionDAG &DAG) {
|
2007-04-27 21:54:47 +08:00
|
|
|
GlobalValue *GV = GA->getGlobal();
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = GA->getDebugLoc();
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Offset;
|
|
|
|
SDValue Chain = DAG.getEntryNode();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = getPointerTy();
|
2007-04-27 21:54:47 +08:00
|
|
|
// Get the Thread Pointer
|
2009-02-05 04:06:27 +08:00
|
|
|
SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
|
2007-04-27 21:54:47 +08:00
|
|
|
|
2009-07-15 12:12:33 +08:00
|
|
|
if (GV->isDeclaration()) {
|
2007-04-27 21:54:47 +08:00
|
|
|
// initial exec model
|
|
|
|
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
|
|
|
|
ARMConstantPoolValue *CPV =
|
|
|
|
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
|
|
|
|
PCAdj, "gottpoff", true);
|
2009-03-13 15:51:59 +08:00
|
|
|
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
|
2009-02-05 04:06:27 +08:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
|
2007-04-27 21:54:47 +08:00
|
|
|
Chain = Offset.getValue(1);
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-02-05 04:06:27 +08:00
|
|
|
Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
|
2007-04-27 21:54:47 +08:00
|
|
|
|
2009-02-05 04:06:27 +08:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
|
2007-04-27 21:54:47 +08:00
|
|
|
} else {
|
|
|
|
// local exec model
|
|
|
|
ARMConstantPoolValue *CPV =
|
|
|
|
new ARMConstantPoolValue(GV, ARMCP::CPValue, "tpoff");
|
2009-03-13 15:51:59 +08:00
|
|
|
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
|
2009-02-05 04:06:27 +08:00
|
|
|
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
|
2007-04-27 21:54:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The address of the thread local variable is the add of the thread
|
|
|
|
// pointer with the offset of the variable.
|
2009-02-05 04:06:27 +08:00
|
|
|
return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
|
2007-04-27 21:54:47 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
|
2007-04-27 21:54:47 +08:00
|
|
|
// TODO: implement the "local dynamic" model
|
|
|
|
assert(Subtarget->isTargetELF() &&
|
|
|
|
"TLS not implemented for non-ELF targets");
|
|
|
|
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
|
|
|
|
// If the relocation model is PIC, use the "General Dynamic" TLS Model,
|
|
|
|
// otherwise use the "Local Exec" TLS Model
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
|
|
|
|
return LowerToTLSGeneralDynamicModel(GA, DAG);
|
|
|
|
else
|
|
|
|
return LowerToTLSExecModels(GA, DAG);
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
|
2009-03-21 06:42:55 +08:00
|
|
|
SelectionDAG &DAG) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-04-22 08:04:12 +08:00
|
|
|
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-01-16 04:18:42 +08:00
|
|
|
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
|
2007-04-22 08:04:12 +08:00
|
|
|
ARMConstantPoolValue *CPV =
|
|
|
|
new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT");
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-03-21 06:42:55 +08:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
|
2009-02-05 04:06:27 +08:00
|
|
|
CPAddr, NULL, 0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = Result.getValue(1);
|
2009-02-07 08:55:49 +08:00
|
|
|
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
|
2009-02-05 04:06:27 +08:00
|
|
|
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
|
2007-04-22 08:04:12 +08:00
|
|
|
if (!UseGOTOFF)
|
2009-02-05 04:06:27 +08:00
|
|
|
Result = DAG.getLoad(PtrVT, dl, Chain, Result, NULL, 0);
|
2007-04-22 08:04:12 +08:00
|
|
|
return Result;
|
|
|
|
} else {
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-02-05 04:06:27 +08:00
|
|
|
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
|
2007-04-22 08:04:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol
|
2007-05-04 08:26:58 +08:00
|
|
|
/// even in non-static mode.
|
|
|
|
static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) {
|
2008-12-05 09:06:39 +08:00
|
|
|
// If symbol visibility is hidden, the extra load is not needed if
|
|
|
|
// the symbol is definitely defined in the current translation unit.
|
2009-07-15 12:12:33 +08:00
|
|
|
bool isDecl = GV->isDeclaration() || GV->hasAvailableExternallyLinkage();
|
2008-12-05 09:06:39 +08:00
|
|
|
if (GV->hasHiddenVisibility() && (!isDecl && !GV->hasCommonLinkage()))
|
|
|
|
return false;
|
Introduce new linkage types linkonce_odr, weak_odr, common_odr
and extern_weak_odr. These are the same as the non-odr versions,
except that they indicate that the global will only be overridden
by an *equivalent* global. In C, a function with weak linkage can
be overridden by a function which behaves completely differently.
This means that IP passes have to skip weak functions, since any
deductions made from the function definition might be wrong, since
the definition could be replaced by something completely different
at link time. This is not allowed in C++, thanks to the ODR
(One-Definition-Rule): if a function is replaced by another at
link-time, then the new function must be the same as the original
function. If a language knows that a function or other global can
only be overridden by an equivalent global, it can give it the
weak_odr linkage type, and the optimizers will understand that it
is alright to make deductions based on the function body. The
code generators on the other hand map weak and weak_odr linkage
to the same thing.
llvm-svn: 66339
2009-03-07 23:45:40 +08:00
|
|
|
return RelocM != Reloc::Static && (isDecl || GV->isWeakForLinker());
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
|
2009-03-21 06:42:55 +08:00
|
|
|
SelectionDAG &DAG) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
2007-05-04 08:26:58 +08:00
|
|
|
bool IsIndirect = GVIsIndirectSymbol(GV, RelocM);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue CPAddr;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (RelocM == Reloc::Static)
|
2009-03-13 15:51:59 +08:00
|
|
|
CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
|
2007-01-19 15:51:42 +08:00
|
|
|
else {
|
|
|
|
unsigned PCAdj = (RelocM != Reloc::PIC_)
|
|
|
|
? 0 : (Subtarget->isThumb() ? 4 : 8);
|
2007-01-31 04:37:08 +08:00
|
|
|
ARMCP::ARMCPKind Kind = IsIndirect ? ARMCP::CPNonLazyPtr
|
|
|
|
: ARMCP::CPValue;
|
2007-01-19 15:51:42 +08:00
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
|
2007-01-31 04:37:08 +08:00
|
|
|
Kind, PCAdj);
|
2009-03-13 15:51:59 +08:00
|
|
|
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-02-05 04:06:27 +08:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = Result.getValue(1);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-02-05 04:06:27 +08:00
|
|
|
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
if (IsIndirect)
|
2009-02-05 04:06:27 +08:00
|
|
|
Result = DAG.getLoad(PtrVT, dl, Chain, Result, NULL, 0);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
|
2009-03-21 06:42:55 +08:00
|
|
|
SelectionDAG &DAG){
|
2007-04-22 08:04:12 +08:00
|
|
|
assert(Subtarget->isTargetELF() &&
|
|
|
|
"GLOBAL OFFSET TABLE not implemented for non-ELF targets");
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = getPointerTy();
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-04-22 08:04:12 +08:00
|
|
|
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
|
|
|
|
ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_",
|
|
|
|
ARMPCLabelIndex,
|
|
|
|
ARMCP::CPValue, PCAdj);
|
2009-03-13 15:51:59 +08:00
|
|
|
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-02-05 04:06:27 +08:00
|
|
|
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-02-05 04:06:27 +08:00
|
|
|
return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
2007-04-22 08:04:12 +08:00
|
|
|
}
|
|
|
|
|
2009-08-04 08:36:16 +08:00
|
|
|
static SDValue LowerNeonVLDIntrinsic(SDValue Op, SelectionDAG &DAG,
|
2009-08-05 08:49:09 +08:00
|
|
|
unsigned Opcode) {
|
2009-08-04 08:36:16 +08:00
|
|
|
SDNode *Node = Op.getNode();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Node->getValueType(0);
|
2009-08-04 08:36:16 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
|
|
|
|
if (!VT.is64BitVector())
|
|
|
|
return SDValue(); // unimplemented
|
|
|
|
|
|
|
|
SDValue Ops[] = { Node->getOperand(0),
|
2009-08-05 08:49:09 +08:00
|
|
|
Node->getOperand(2) };
|
|
|
|
return DAG.getNode(Opcode, dl, Node->getVTList(), Ops, 2);
|
2009-08-04 08:36:16 +08:00
|
|
|
}
|
|
|
|
|
2009-08-07 02:47:44 +08:00
|
|
|
static SDValue LowerNeonVSTIntrinsic(SDValue Op, SelectionDAG &DAG,
|
|
|
|
unsigned Opcode, unsigned NumVecs) {
|
|
|
|
SDNode *Node = Op.getNode();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Node->getOperand(3).getValueType();
|
2009-08-07 02:47:44 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
|
|
|
|
if (!VT.is64BitVector())
|
|
|
|
return SDValue(); // unimplemented
|
|
|
|
|
|
|
|
SmallVector<SDValue, 6> Ops;
|
|
|
|
Ops.push_back(Node->getOperand(0));
|
|
|
|
Ops.push_back(Node->getOperand(2));
|
|
|
|
for (unsigned N = 0; N < NumVecs; ++N)
|
|
|
|
Ops.push_back(Node->getOperand(N + 3));
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(Opcode, dl, MVT::Other, Ops.data(), Ops.size());
|
2009-08-07 02:47:44 +08:00
|
|
|
}
|
|
|
|
|
2009-08-04 08:36:16 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vld2:
|
2009-08-05 08:49:09 +08:00
|
|
|
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD2D);
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vld3:
|
2009-08-05 08:49:09 +08:00
|
|
|
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD3D);
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vld4:
|
2009-08-05 08:49:09 +08:00
|
|
|
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD4D);
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vst2:
|
2009-08-07 02:47:44 +08:00
|
|
|
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST2D, 2);
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vst3:
|
2009-08-07 02:47:44 +08:00
|
|
|
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST3D, 3);
|
2009-08-11 13:39:44 +08:00
|
|
|
case Intrinsic::arm_neon_vst4:
|
2009-08-07 02:47:44 +08:00
|
|
|
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST4D, 4);
|
2009-08-04 08:36:16 +08:00
|
|
|
default: return SDValue(); // Don't custom lower most intrinsics.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-13 07:59:14 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
2009-05-13 07:59:14 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-11-09 01:20:05 +08:00
|
|
|
switch (IntNo) {
|
2008-07-28 05:46:04 +08:00
|
|
|
default: return SDValue(); // Don't custom lower most intrinsics.
|
2009-08-04 08:25:01 +08:00
|
|
|
case Intrinsic::arm_thread_pointer: {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
2009-08-04 08:25:01 +08:00
|
|
|
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
|
|
|
|
}
|
2009-08-11 08:09:57 +08:00
|
|
|
case Intrinsic::eh_sjlj_lsda: {
|
|
|
|
// blah. horrible, horrible hack with the forced magic name.
|
|
|
|
// really need to clean this up. It belongs in the target-independent
|
|
|
|
// layer somehow that doesn't require the coupling with the asm
|
|
|
|
// printer.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
EVT PtrVT = getPointerTy();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
|
|
|
|
SDValue CPAddr;
|
|
|
|
unsigned PCAdj = (RelocM != Reloc::PIC_)
|
|
|
|
? 0 : (Subtarget->isThumb() ? 4 : 8);
|
|
|
|
ARMCP::ARMCPKind Kind = ARMCP::CPValue;
|
|
|
|
// Save off the LSDA name for the AsmPrinter to use when it's time
|
|
|
|
// to emit the table
|
|
|
|
std::string LSDAName = "L_lsda_";
|
|
|
|
LSDAName += MF.getFunction()->getName();
|
|
|
|
ARMConstantPoolValue *CPV =
|
|
|
|
new ARMConstantPoolValue(LSDAName.c_str(), ARMPCLabelIndex, Kind, PCAdj);
|
|
|
|
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
|
2009-08-12 04:47:22 +08:00
|
|
|
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
|
2009-08-11 08:09:57 +08:00
|
|
|
SDValue Result =
|
|
|
|
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
|
|
|
|
SDValue Chain = Result.getValue(1);
|
|
|
|
|
|
|
|
if (RelocM == Reloc::PIC_) {
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
|
2009-08-11 08:09:57 +08:00
|
|
|
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
|
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
2009-05-14 08:46:35 +08:00
|
|
|
case Intrinsic::eh_sjlj_setjmp:
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1));
|
2007-11-09 01:20:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
|
2009-03-21 06:42:55 +08:00
|
|
|
unsigned VarArgsFrameIndex) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// vastart just stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument.
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
|
2008-02-07 06:27:42 +08:00
|
|
|
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
|
2009-02-05 04:06:27 +08:00
|
|
|
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-08-07 08:34:42 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDNode *Node = Op.getNode();
|
|
|
|
DebugLoc dl = Node->getDebugLoc();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Node->getValueType(0);
|
2009-08-07 08:34:42 +08:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Size = Op.getOperand(1);
|
|
|
|
SDValue Align = Op.getOperand(2);
|
|
|
|
|
|
|
|
// Chain the dynamic stack allocation so that it doesn't modify the stack
|
|
|
|
// pointer when other instructions are using the stack.
|
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
|
|
|
|
|
|
|
|
unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue();
|
|
|
|
unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment();
|
|
|
|
if (AlignVal > StackAlign)
|
|
|
|
// Do this now since selection pass cannot introduce new target
|
|
|
|
// independent node.
|
|
|
|
Align = DAG.getConstant(-(uint64_t)AlignVal, VT);
|
|
|
|
|
|
|
|
// In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up
|
|
|
|
// using a "add r, sp, r" instead. Negate the size now so we don't have to
|
|
|
|
// do even more horrible hack later.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
if (AFI->isThumb1OnlyFunction()) {
|
|
|
|
bool Negate = true;
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
if (C) {
|
|
|
|
uint32_t Val = C->getZExtValue();
|
|
|
|
if (Val <= 508 && ((Val & 3) == 0))
|
|
|
|
Negate = false;
|
|
|
|
}
|
|
|
|
if (Negate)
|
|
|
|
Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
|
|
|
|
}
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
SDVTList VTList = DAG.getVTList(VT, MVT::Other);
|
2009-08-07 08:34:42 +08:00
|
|
|
SDValue Ops1[] = { Chain, Size, Align };
|
|
|
|
SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
|
|
|
|
Chain = Res.getValue(1);
|
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
|
|
|
|
DAG.getIntPtrConstant(0, true), SDValue());
|
|
|
|
SDValue Ops2[] = { Res, Chain };
|
|
|
|
return DAG.getMergeValues(Ops2, 2, dl);
|
|
|
|
}
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue
|
|
|
|
ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
|
|
|
SDValue &Root, SelectionDAG &DAG,
|
|
|
|
DebugLoc dl) {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
TargetRegisterClass *RC;
|
2009-07-09 07:10:31 +08:00
|
|
|
if (AFI->isThumb1OnlyFunction())
|
2009-06-23 07:27:02 +08:00
|
|
|
RC = ARM::tGPRRegisterClass;
|
|
|
|
else
|
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
|
|
|
// Transform the arguments stored in physical registers into virtual ones.
|
|
|
|
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
SDValue ArgValue2;
|
|
|
|
if (NextVA.isMemLoc()) {
|
|
|
|
unsigned ArgSize = NextVA.getLocVT().getSizeInBits()/8;
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
int FI = MFI->CreateFixedObject(ArgSize, NextVA.getLocMemOffset());
|
|
|
|
|
|
|
|
// Create load node to retrieve arguments from the stack.
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
|
2009-08-12 04:47:22 +08:00
|
|
|
ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
|
2009-06-23 07:27:02 +08:00
|
|
|
} else {
|
|
|
|
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
|
2009-08-12 04:47:22 +08:00
|
|
|
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
SDValue
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|
|
|
unsigned CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg>
|
|
|
|
&Ins,
|
|
|
|
DebugLoc dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2009-04-18 03:07:39 +08:00
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
|
2009-04-08 04:34:09 +08:00
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
// Assign locations to all of the incoming arguments.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
CCInfo.AnalyzeFormalArguments(Ins,
|
2009-08-06 03:04:42 +08:00
|
|
|
CCAssignFnForNode(CallConv, /* Return*/ false,
|
|
|
|
isVarArg));
|
2009-04-18 03:07:39 +08:00
|
|
|
|
|
|
|
SmallVector<SDValue, 16> ArgValues;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
// Arguments stored in registers.
|
2009-04-18 03:07:39 +08:00
|
|
|
if (VA.isRegLoc()) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT RegVT = VA.getLocVT();
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue ArgValue;
|
|
|
|
if (VA.needsCustom()) {
|
|
|
|
// f64 and vector types are split up into multiple registers or
|
|
|
|
// combinations of registers and stack slots.
|
2009-08-12 04:47:22 +08:00
|
|
|
RegVT = MVT::i32;
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VA.getLocVT() == MVT::v2f64) {
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
Chain, DAG, dl);
|
2009-06-23 07:27:02 +08:00
|
|
|
VA = ArgLocs[++i]; // skip ahead to next loc
|
|
|
|
SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
Chain, DAG, dl);
|
2009-08-12 04:47:22 +08:00
|
|
|
ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
|
|
|
|
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
|
2009-06-23 07:27:02 +08:00
|
|
|
ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
|
2009-08-12 04:47:22 +08:00
|
|
|
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
|
2009-06-23 07:27:02 +08:00
|
|
|
ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
|
|
|
|
} else
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
} else {
|
|
|
|
TargetRegisterClass *RC;
|
2009-08-06 03:04:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (RegVT == MVT::f32)
|
2009-06-23 07:27:02 +08:00
|
|
|
RC = ARM::SPRRegisterClass;
|
2009-08-12 04:47:22 +08:00
|
|
|
else if (RegVT == MVT::f64)
|
2009-06-23 07:27:02 +08:00
|
|
|
RC = ARM::DPRRegisterClass;
|
2009-08-12 04:47:22 +08:00
|
|
|
else if (RegVT == MVT::v2f64)
|
2009-08-06 03:04:42 +08:00
|
|
|
RC = ARM::QPRRegisterClass;
|
2009-08-12 04:47:22 +08:00
|
|
|
else if (RegVT == MVT::i32)
|
2009-08-06 04:15:19 +08:00
|
|
|
RC = (AFI->isThumb1OnlyFunction() ?
|
|
|
|
ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
|
2009-06-23 07:27:02 +08:00
|
|
|
else
|
2009-08-06 04:15:19 +08:00
|
|
|
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
// Transform the arguments in physical registers into virtual ones.
|
|
|
|
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted
|
|
|
|
// to 32 bits. Insert an assert[sz]ext to capture this, then
|
|
|
|
// truncate to the right size.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2009-04-18 03:07:39 +08:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
InVals.push_back(ArgValue);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
} else { // VA.isRegLoc()
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
// sanity check
|
|
|
|
assert(VA.isMemLoc());
|
2009-08-12 04:47:22 +08:00
|
|
|
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 03:07:39 +08:00
|
|
|
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
|
|
|
|
int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
// Create load nodes to retrieve arguments from the stack.
|
2009-04-18 03:07:39 +08:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
|
2009-04-18 03:07:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// varargs
|
2007-01-19 15:51:42 +08:00
|
|
|
if (isVarArg) {
|
|
|
|
static const unsigned GPRArgRegs[] = {
|
|
|
|
ARM::R0, ARM::R1, ARM::R2, ARM::R3
|
|
|
|
};
|
|
|
|
|
2009-04-18 04:35:10 +08:00
|
|
|
unsigned NumGPRs = CCInfo.getFirstUnallocated
|
|
|
|
(GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
|
2009-04-18 03:07:39 +08:00
|
|
|
|
2007-02-24 04:32:57 +08:00
|
|
|
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
|
|
|
|
unsigned VARegSize = (4 - NumGPRs) * 4;
|
|
|
|
unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
|
2009-04-18 03:07:39 +08:00
|
|
|
unsigned ArgOffset = 0;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (VARegSaveSize) {
|
|
|
|
// If this function is vararg, store any remaining integer argument regs
|
|
|
|
// to their spots on the stack so that they may be loaded by deferencing
|
|
|
|
// the result of va_next.
|
|
|
|
AFI->setVarArgsRegSaveSize(VARegSaveSize);
|
2009-04-18 03:07:39 +08:00
|
|
|
ArgOffset = CCInfo.getNextStackOffset();
|
2007-02-24 04:32:57 +08:00
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
|
|
|
|
VARegSaveSize - VARegSize);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SmallVector<SDValue, 4> MemOps;
|
2007-01-19 15:51:42 +08:00
|
|
|
for (; NumGPRs < 4; ++NumGPRs) {
|
2009-04-18 03:07:39 +08:00
|
|
|
TargetRegisterClass *RC;
|
2009-07-09 07:10:31 +08:00
|
|
|
if (AFI->isThumb1OnlyFunction())
|
2009-04-18 03:07:39 +08:00
|
|
|
RC = ARM::tGPRRegisterClass;
|
2009-04-08 04:34:09 +08:00
|
|
|
else
|
2009-04-18 03:07:39 +08:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2009-04-21 02:36:57 +08:00
|
|
|
unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
|
2009-02-05 04:06:27 +08:00
|
|
|
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
|
2007-01-19 15:51:42 +08:00
|
|
|
MemOps.push_back(Store);
|
2009-02-05 04:06:27 +08:00
|
|
|
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
|
2007-01-19 15:51:42 +08:00
|
|
|
DAG.getConstant(4, getPointerTy()));
|
|
|
|
}
|
|
|
|
if (!MemOps.empty())
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
&MemOps[0], MemOps.size());
|
2007-01-19 15:51:42 +08:00
|
|
|
} else
|
|
|
|
// This will point to the next argument passed via stack.
|
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
return Chain;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isFloatingPointZero - Return true if this is +0.0.
|
2008-07-28 05:46:04 +08:00
|
|
|
static bool isFloatingPointZero(SDValue Op) {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
|
2007-08-31 12:03:46 +08:00
|
|
|
return CFP->getValueAPF().isPosZero();
|
2008-08-29 05:40:38 +08:00
|
|
|
else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// Maybe this has already been legalized into the constant pool?
|
|
|
|
if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue WrapperOp = Op.getOperand(1).getOperand(0);
|
2007-01-19 15:51:42 +08:00
|
|
|
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
|
|
|
|
if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
|
2007-08-31 12:03:46 +08:00
|
|
|
return CFP->getValueAPF().isPosZero();
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-07-09 07:10:31 +08:00
|
|
|
static bool isLegalCmpImmediate(unsigned C, bool isThumb1Only) {
|
|
|
|
return ( isThumb1Only && (C & ~255U) == 0) ||
|
|
|
|
(!isThumb1Only && ARM_AM::getSOImmVal(C) != -1);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
|
|
|
|
/// the given operands.
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
2009-07-09 07:10:31 +08:00
|
|
|
SDValue &ARMCC, SelectionDAG &DAG, bool isThumb1Only,
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl) {
|
2008-08-29 05:40:38 +08:00
|
|
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned C = RHSC->getZExtValue();
|
2009-07-09 07:10:31 +08:00
|
|
|
if (!isLegalCmpImmediate(C, isThumb1Only)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// Constant does not fit, try adjusting it by one?
|
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETLT:
|
|
|
|
case ISD::SETGE:
|
2009-07-09 07:10:31 +08:00
|
|
|
if (isLegalCmpImmediate(C-1, isThumb1Only)) {
|
2007-02-02 09:53:26 +08:00
|
|
|
CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
|
2009-08-12 04:47:22 +08:00
|
|
|
RHS = DAG.getConstant(C-1, MVT::i32);
|
2007-02-02 09:53:26 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETULT:
|
|
|
|
case ISD::SETUGE:
|
2009-07-09 07:10:31 +08:00
|
|
|
if (C > 0 && isLegalCmpImmediate(C-1, isThumb1Only)) {
|
2007-02-02 09:53:26 +08:00
|
|
|
CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
|
2009-08-12 04:47:22 +08:00
|
|
|
RHS = DAG.getConstant(C-1, MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETLE:
|
|
|
|
case ISD::SETGT:
|
2009-07-09 07:10:31 +08:00
|
|
|
if (isLegalCmpImmediate(C+1, isThumb1Only)) {
|
2007-02-02 09:53:26 +08:00
|
|
|
CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
|
2009-08-12 04:47:22 +08:00
|
|
|
RHS = DAG.getConstant(C+1, MVT::i32);
|
2007-02-02 09:53:26 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETUGT:
|
2009-07-09 07:10:31 +08:00
|
|
|
if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb1Only)) {
|
2007-02-02 09:53:26 +08:00
|
|
|
CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
|
2009-08-12 04:47:22 +08:00
|
|
|
RHS = DAG.getConstant(C+1, MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
|
2007-04-02 09:30:03 +08:00
|
|
|
ARMISD::NodeType CompareType;
|
|
|
|
switch (CondCode) {
|
|
|
|
default:
|
|
|
|
CompareType = ARMISD::CMP;
|
|
|
|
break;
|
|
|
|
case ARMCC::EQ:
|
|
|
|
case ARMCC::NE:
|
2009-06-29 23:33:01 +08:00
|
|
|
// Uses only Z Flag
|
|
|
|
CompareType = ARMISD::CMPZ;
|
2007-04-02 09:30:03 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
|
2009-03-21 06:42:55 +08:00
|
|
|
static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Cmp;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (!isFloatingPointZero(RHS))
|
2009-08-12 04:47:22 +08:00
|
|
|
Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
|
2007-01-19 15:51:42 +08:00
|
|
|
else
|
2009-08-12 04:47:22 +08:00
|
|
|
Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
|
|
|
|
return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
|
2009-03-21 06:42:55 +08:00
|
|
|
const ARMSubtarget *ST) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue TrueVal = Op.getOperand(2);
|
|
|
|
SDValue FalseVal = Op.getOperand(3);
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (LHS.getValueType() == MVT::i32) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMCC;
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-07-09 07:10:31 +08:00
|
|
|
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
|
2009-02-07 05:50:26 +08:00
|
|
|
return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ARMCC::CondCodes CondCode, CondCode2;
|
|
|
|
if (FPCCToARMCC(CC, CondCode, CondCode2))
|
|
|
|
std::swap(TrueVal, FalseVal);
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-02-07 05:50:26 +08:00
|
|
|
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
|
|
|
|
SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
|
2007-07-05 15:18:20 +08:00
|
|
|
ARMCC, CCR, Cmp);
|
2007-01-19 15:51:42 +08:00
|
|
|
if (CondCode2 != ARMCC::AL) {
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
|
2007-01-19 15:51:42 +08:00
|
|
|
// FIXME: Needs another CMP because flag can have but one use.
|
2009-02-07 05:50:26 +08:00
|
|
|
SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
|
2009-03-21 06:42:55 +08:00
|
|
|
Result = DAG.getNode(ARMISD::CMOV, dl, VT,
|
2009-02-07 05:50:26 +08:00
|
|
|
Result, TrueVal, ARMCC2, CCR, Cmp2);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
|
2009-03-21 06:42:55 +08:00
|
|
|
const ARMSubtarget *ST) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue LHS = Op.getOperand(2);
|
|
|
|
SDValue RHS = Op.getOperand(3);
|
|
|
|
SDValue Dest = Op.getOperand(4);
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (LHS.getValueType() == MVT::i32) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMCC;
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-07-09 07:10:31 +08:00
|
|
|
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
|
2009-02-07 05:50:26 +08:00
|
|
|
Chain, Dest, ARMCC, CCR,Cmp);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
|
2007-01-19 15:51:42 +08:00
|
|
|
ARMCC::CondCodes CondCode, CondCode2;
|
|
|
|
if (FPCCToARMCC(CC, CondCode, CondCode2))
|
|
|
|
// Swap the LHS/RHS of the comparison if needed.
|
|
|
|
std::swap(LHS, RHS);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2009-02-07 05:50:26 +08:00
|
|
|
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
|
|
|
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
|
2009-02-07 05:50:26 +08:00
|
|
|
SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
|
2007-01-19 15:51:42 +08:00
|
|
|
if (CondCode2 != ARMCC::AL) {
|
2009-08-12 04:47:22 +08:00
|
|
|
ARMCC = DAG.getConstant(CondCode2, MVT::i32);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
|
2009-02-07 05:50:26 +08:00
|
|
|
Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Table = Op.getOperand(1);
|
|
|
|
SDValue Index = Op.getOperand(2);
|
2009-02-05 04:06:27 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT PTy = getPointerTy();
|
2007-01-19 15:51:42 +08:00
|
|
|
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
|
|
|
|
ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
|
2009-07-15 02:44:34 +08:00
|
|
|
SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
|
2009-08-12 04:47:22 +08:00
|
|
|
Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
|
2009-07-29 04:53:24 +08:00
|
|
|
Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
|
|
|
|
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
llvm-svn: 77024
2009-07-25 08:33:29 +08:00
|
|
|
if (Subtarget->isThumb2()) {
|
|
|
|
// Thumb2 uses a two-level jump. That is, it jumps into the jump table
|
|
|
|
// which does another jump to the destination. This also makes it easier
|
|
|
|
// to translate it to TBB / TBH later.
|
|
|
|
// FIXME: This might not work if the function is extremely large.
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
|
2009-07-29 10:18:14 +08:00
|
|
|
Addr, Op.getOperand(2), JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
llvm-svn: 77024
2009-07-25 08:33:29 +08:00
|
|
|
}
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
|
2009-08-12 04:47:22 +08:00
|
|
|
Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, NULL, 0);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
llvm-svn: 77024
2009-07-25 08:33:29 +08:00
|
|
|
Chain = Addr.getValue(1);
|
2009-02-05 04:06:27 +08:00
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
llvm-svn: 77024
2009-07-25 08:33:29 +08:00
|
|
|
} else {
|
|
|
|
Addr = DAG.getLoad(PTy, dl, Chain, Addr, NULL, 0);
|
|
|
|
Chain = Addr.getValue(1);
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
|
Change Thumb2 jumptable codegen to one that uses two level jumps:
Before:
adr r12, #LJTI3_0_0
ldr pc, [r12, +r0, lsl #2]
LJTI3_0_0:
.long LBB3_24
.long LBB3_30
.long LBB3_31
.long LBB3_32
After:
adr r12, #LJTI3_0_0
add pc, r12, +r0, lsl #2
LJTI3_0_0:
b.w LBB3_24
b.w LBB3_30
b.w LBB3_31
b.w LBB3_32
This has several advantages.
1. This will make it easier to optimize this to a TBB / TBH instruction +
(smaller) table.
2. This eliminate the need for ugly asm printer hack to force the address
into thumb addresses (bit 0 is one).
3. Same codegen for pic and non-pic.
4. This eliminate the need to align the table so constantpool island pass
won't have to over-estimate the size.
Based on my calculation, the later is probably slightly faster as well since
ldr pc with shifter address is very slow. That is, it should be a win as long
as the HW implementation can do a reasonable job of branch predict the second
branch.
llvm-svn: 77024
2009-07-25 08:33:29 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned Opc =
|
|
|
|
Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
|
2009-08-12 04:47:22 +08:00
|
|
|
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned Opc =
|
|
|
|
Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
|
2009-02-07 05:50:26 +08:00
|
|
|
return DAG.getNode(Opc, dl, VT, Op);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// Implement fcopysign with a fabs and a conditional fneg.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Tmp0 = Op.getOperand(0);
|
|
|
|
SDValue Tmp1 = Op.getOperand(1);
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
EVT SrcVT = Tmp1.getValueType();
|
2009-02-07 05:50:26 +08:00
|
|
|
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
|
|
|
|
SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
|
|
|
|
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
2009-02-07 05:50:26 +08:00
|
|
|
return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-05-13 07:59:14 +08:00
|
|
|
SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
|
|
|
|
MFI->setFrameAddressIsTaken(true);
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2009-05-13 07:59:14 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
|
|
|
|
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
2009-06-19 07:14:30 +08:00
|
|
|
unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
|
2009-05-13 07:59:14 +08:00
|
|
|
? ARM::R7 : ARM::R11;
|
|
|
|
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
|
|
|
|
while (Depth--)
|
|
|
|
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0);
|
|
|
|
return FrameAddr;
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue
|
2009-02-04 06:26:09 +08:00
|
|
|
ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain,
|
|
|
|
SDValue Dst, SDValue Src,
|
|
|
|
SDValue Size, unsigned Align,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
bool AlwaysInline,
|
2008-04-29 01:15:20 +08:00
|
|
|
const Value *DstSV, uint64_t DstSVOff,
|
|
|
|
const Value *SrcSV, uint64_t SrcSVOff){
|
2007-10-23 06:11:27 +08:00
|
|
|
// Do repeated 4-byte loads and stores. To be improved.
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
// This requires 4-byte alignment.
|
|
|
|
if ((Align & 3) != 0)
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
// This requires the copy size to be a constant, preferrably
|
|
|
|
// within a subtarget-specific limit.
|
|
|
|
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
if (!ConstantSize)
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
2008-09-13 00:56:44 +08:00
|
|
|
uint64_t SizeVal = ConstantSize->getZExtValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
|
|
|
|
unsigned BytesLeft = SizeVal & 3;
|
|
|
|
unsigned NumMemOps = SizeVal >> 2;
|
2007-05-18 05:31:21 +08:00
|
|
|
unsigned EmittedNumMemOps = 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
EVT VT = MVT::i32;
|
2007-05-18 05:31:21 +08:00
|
|
|
unsigned VTSize = 4;
|
2007-10-23 06:11:27 +08:00
|
|
|
unsigned i = 0;
|
2007-05-18 09:19:57 +08:00
|
|
|
const unsigned MAX_LOADS_IN_LDM = 6;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue TFOps[MAX_LOADS_IN_LDM];
|
|
|
|
SDValue Loads[MAX_LOADS_IN_LDM];
|
2008-04-29 01:15:20 +08:00
|
|
|
uint64_t SrcOff = 0, DstOff = 0;
|
2007-05-18 05:31:21 +08:00
|
|
|
|
2007-10-23 06:11:27 +08:00
|
|
|
// Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
|
|
|
|
// same number of stores. The loads and stores will get combined into
|
2007-05-18 05:31:21 +08:00
|
|
|
// ldm/stm later on.
|
2007-10-23 06:11:27 +08:00
|
|
|
while (EmittedNumMemOps < NumMemOps) {
|
|
|
|
for (i = 0;
|
|
|
|
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
|
2009-02-04 06:26:09 +08:00
|
|
|
Loads[i] = DAG.getLoad(VT, dl, Chain,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
|
|
|
|
DAG.getConstant(SrcOff, MVT::i32)),
|
2008-04-29 01:15:20 +08:00
|
|
|
SrcSV, SrcSVOff + SrcOff);
|
2007-10-23 06:11:27 +08:00
|
|
|
TFOps[i] = Loads[i].getValue(1);
|
2007-05-18 05:31:21 +08:00
|
|
|
SrcOff += VTSize;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-05-18 05:31:21 +08:00
|
|
|
|
2007-10-23 06:11:27 +08:00
|
|
|
for (i = 0;
|
|
|
|
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
|
2009-02-04 06:26:09 +08:00
|
|
|
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
|
|
|
|
DAG.getConstant(DstOff, MVT::i32)),
|
2008-04-29 01:15:20 +08:00
|
|
|
DstSV, DstSVOff + DstOff);
|
2007-05-18 05:31:21 +08:00
|
|
|
DstOff += VTSize;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-10-23 06:11:27 +08:00
|
|
|
|
2007-05-18 05:31:21 +08:00
|
|
|
EmittedNumMemOps += i;
|
|
|
|
}
|
|
|
|
|
2009-03-21 06:42:55 +08:00
|
|
|
if (BytesLeft == 0)
|
2007-10-23 06:11:27 +08:00
|
|
|
return Chain;
|
|
|
|
|
|
|
|
// Issue loads / stores for the trailing (1 - 3) bytes.
|
|
|
|
unsigned BytesLeftSave = BytesLeft;
|
|
|
|
i = 0;
|
|
|
|
while (BytesLeft) {
|
|
|
|
if (BytesLeft >= 2) {
|
2009-08-12 04:47:22 +08:00
|
|
|
VT = MVT::i16;
|
2007-10-23 06:11:27 +08:00
|
|
|
VTSize = 2;
|
|
|
|
} else {
|
2009-08-12 04:47:22 +08:00
|
|
|
VT = MVT::i8;
|
2007-10-23 06:11:27 +08:00
|
|
|
VTSize = 1;
|
|
|
|
}
|
|
|
|
|
2009-02-04 06:26:09 +08:00
|
|
|
Loads[i] = DAG.getLoad(VT, dl, Chain,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
|
|
|
|
DAG.getConstant(SrcOff, MVT::i32)),
|
2008-04-29 01:15:20 +08:00
|
|
|
SrcSV, SrcSVOff + SrcOff);
|
2007-10-23 06:11:27 +08:00
|
|
|
TFOps[i] = Loads[i].getValue(1);
|
|
|
|
++i;
|
|
|
|
SrcOff += VTSize;
|
|
|
|
BytesLeft -= VTSize;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-10-23 06:11:27 +08:00
|
|
|
|
|
|
|
i = 0;
|
|
|
|
BytesLeft = BytesLeftSave;
|
|
|
|
while (BytesLeft) {
|
|
|
|
if (BytesLeft >= 2) {
|
2009-08-12 04:47:22 +08:00
|
|
|
VT = MVT::i16;
|
2007-10-23 06:11:27 +08:00
|
|
|
VTSize = 2;
|
|
|
|
} else {
|
2009-08-12 04:47:22 +08:00
|
|
|
VT = MVT::i8;
|
2007-10-23 06:11:27 +08:00
|
|
|
VTSize = 1;
|
|
|
|
}
|
|
|
|
|
2009-02-04 06:26:09 +08:00
|
|
|
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
|
|
|
|
DAG.getConstant(DstOff, MVT::i32)),
|
2008-04-29 01:15:20 +08:00
|
|
|
DstSV, DstSVOff + DstOff);
|
2007-10-23 06:11:27 +08:00
|
|
|
++i;
|
|
|
|
DstOff += VTSize;
|
|
|
|
BytesLeft -= VTSize;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
|
2007-05-18 05:31:21 +08:00
|
|
|
}
|
|
|
|
|
2008-12-01 19:39:25 +08:00
|
|
|
static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Op = N->getOperand(0);
|
2009-02-07 05:50:26 +08:00
|
|
|
DebugLoc dl = N->getDebugLoc();
|
2009-08-12 04:47:22 +08:00
|
|
|
if (N->getValueType(0) == MVT::f64) {
|
2008-11-05 03:57:48 +08:00
|
|
|
// Turn i64->f64 into FMDRR.
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
|
|
|
return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
|
2008-11-05 03:57:48 +08:00
|
|
|
}
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2008-11-05 03:57:48 +08:00
|
|
|
// Turn f64->i64 into FMRRD.
|
2009-03-21 06:42:55 +08:00
|
|
|
SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// Merge the pieces into a single i64 value.
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
|
2007-11-24 15:07:01 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
/// getZeroVector - Returns a vector of specified type with all zero elements.
|
|
|
|
///
|
2009-08-11 06:56:29 +08:00
|
|
|
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-23 07:27:02 +08:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
|
|
|
|
|
|
|
// Zero vectors are used to represent vector negation and in those cases
|
|
|
|
// will be implemented with the NEON VNEG instruction. However, VNEG does
|
|
|
|
// not support i64 elements, so sometimes the zero vectors will need to be
|
|
|
|
// explicitly constructed. For those cases, and potentially other uses in
|
|
|
|
// the future, always build zero vectors as <4 x i32> or <2 x i32> bitcasted
|
|
|
|
// to their dest type. This ensures they get CSE'd.
|
|
|
|
SDValue Vec;
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
if (VT.getSizeInBits() == 64)
|
2009-08-12 04:47:22 +08:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
|
2009-06-23 07:27:02 +08:00
|
|
|
else
|
2009-08-12 04:47:22 +08:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getOnesVector - Returns a vector of specified type with all bits set.
|
|
|
|
///
|
2009-08-11 06:56:29 +08:00
|
|
|
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-23 07:27:02 +08:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
|
|
|
|
|
|
|
// Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
|
|
|
|
// type. This ensures they get CSE'd.
|
|
|
|
SDValue Vec;
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
if (VT.getSizeInBits() == 64)
|
2009-08-12 04:47:22 +08:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
|
2009-06-23 07:27:02 +08:00
|
|
|
else
|
2009-08-12 04:47:22 +08:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
|
|
|
}
|
|
|
|
|
|
|
|
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-06-23 07:27:02 +08:00
|
|
|
DebugLoc dl = N->getDebugLoc();
|
|
|
|
|
|
|
|
// Lower vector shifts on NEON to use VSHL.
|
|
|
|
if (VT.isVector()) {
|
|
|
|
assert(ST->hasNEON() && "unexpected vector shift");
|
|
|
|
|
|
|
|
// Left shifts translate directly to the vshiftu intrinsic.
|
|
|
|
if (N->getOpcode() == ISD::SHL)
|
|
|
|
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
|
2009-06-23 07:27:02 +08:00
|
|
|
N->getOperand(0), N->getOperand(1));
|
|
|
|
|
|
|
|
assert((N->getOpcode() == ISD::SRA ||
|
|
|
|
N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
|
|
|
|
|
|
|
|
// NEON uses the same intrinsics for both left and right shifts. For
|
|
|
|
// right shifts, the shift amounts are negative, so negate the vector of
|
|
|
|
// shift amounts.
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT ShiftVT = N->getOperand(1).getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
|
|
|
|
getZeroVector(ShiftVT, DAG, dl),
|
|
|
|
N->getOperand(1));
|
|
|
|
Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
|
|
|
|
Intrinsic::arm_neon_vshifts :
|
|
|
|
Intrinsic::arm_neon_vshiftu);
|
|
|
|
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getConstant(vshiftInt, MVT::i32),
|
2009-06-23 07:27:02 +08:00
|
|
|
N->getOperand(0), NegatedCount);
|
|
|
|
}
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
assert(VT == MVT::i64 &&
|
2007-11-24 15:07:01 +08:00
|
|
|
(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
|
|
|
|
"Unknown shift to lower!");
|
2008-12-01 19:39:25 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// We only lower SRA, SRL of 1 here, all others use generic lowering.
|
|
|
|
if (!isa<ConstantSDNode>(N->getOperand(1)) ||
|
2008-09-13 00:56:44 +08:00
|
|
|
cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
|
2008-12-01 19:39:25 +08:00
|
|
|
return SDValue();
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// If we are in thumb mode, we don't have RRX.
|
2009-07-09 07:10:31 +08:00
|
|
|
if (ST->isThumb1Only()) return SDValue();
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
|
|
|
|
DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
|
|
|
|
DAG.getConstant(1, MVT::i32));
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
|
|
|
|
// captures the result into a carry flag.
|
|
|
|
unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
|
2009-08-12 04:47:22 +08:00
|
|
|
Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// The low part is an ARMISD::RRX operand, which shifts the carry in.
|
2009-08-12 04:47:22 +08:00
|
|
|
Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// Merge the pieces into a single i64 value.
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
2007-11-24 15:07:01 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue TmpOp0, TmpOp1;
|
|
|
|
bool Invert = false;
|
|
|
|
bool Swap = false;
|
|
|
|
unsigned Opc = 0;
|
|
|
|
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
SDValue CC = Op.getOperand(2);
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
|
|
|
|
if (Op.getOperand(1).getValueType().isFloatingPoint()) {
|
|
|
|
switch (SetCCOpcode) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Illegal FP comparison"); break;
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::SETUNE:
|
|
|
|
case ISD::SETNE: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETOEQ:
|
|
|
|
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
|
|
|
case ISD::SETOLT:
|
|
|
|
case ISD::SETLT: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETOGT:
|
|
|
|
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETOLE:
|
|
|
|
case ISD::SETLE: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETOGE:
|
|
|
|
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETUGE: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETUGT: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETUEQ: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETONE:
|
|
|
|
// Expand this to (OLT | OGT).
|
|
|
|
TmpOp0 = Op0;
|
|
|
|
TmpOp1 = Op1;
|
|
|
|
Opc = ISD::OR;
|
|
|
|
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
|
|
|
|
Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
|
|
|
|
break;
|
|
|
|
case ISD::SETUO: Invert = true; // Fallthrough
|
|
|
|
case ISD::SETO:
|
|
|
|
// Expand this to (OLT | OGE).
|
|
|
|
TmpOp0 = Op0;
|
|
|
|
TmpOp1 = Op1;
|
|
|
|
Opc = ISD::OR;
|
|
|
|
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
|
|
|
|
Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Integer comparisons.
|
|
|
|
switch (SetCCOpcode) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Illegal integer comparison"); break;
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::SETNE: Invert = true;
|
|
|
|
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
|
|
|
case ISD::SETLT: Swap = true;
|
|
|
|
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
|
|
|
case ISD::SETLE: Swap = true;
|
|
|
|
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
|
|
|
case ISD::SETULT: Swap = true;
|
|
|
|
case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
|
|
|
|
case ISD::SETULE: Swap = true;
|
|
|
|
case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
|
|
|
|
}
|
|
|
|
|
2009-07-08 11:04:38 +08:00
|
|
|
// Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
|
2009-06-23 07:27:02 +08:00
|
|
|
if (Opc == ARMISD::VCEQ) {
|
|
|
|
|
|
|
|
SDValue AndOp;
|
|
|
|
if (ISD::isBuildVectorAllZeros(Op1.getNode()))
|
|
|
|
AndOp = Op0;
|
|
|
|
else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
|
|
|
|
AndOp = Op1;
|
|
|
|
|
|
|
|
// Ignore bitconvert.
|
|
|
|
if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
AndOp = AndOp.getOperand(0);
|
|
|
|
|
|
|
|
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
|
|
|
|
Opc = ARMISD::VTST;
|
|
|
|
Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
|
|
|
|
Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
|
|
|
|
Invert = !Invert;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Swap)
|
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
|
|
|
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
|
|
|
|
|
|
|
|
if (Invert)
|
|
|
|
Result = DAG.getNOT(dl, Result, VT);
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
|
|
|
|
/// VMOV instruction, and if so, return the constant being splatted.
|
|
|
|
static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
|
|
|
|
unsigned SplatBitSize, SelectionDAG &DAG) {
|
|
|
|
switch (SplatBitSize) {
|
|
|
|
case 8:
|
|
|
|
// Any 1-byte value is OK.
|
|
|
|
assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i8);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
case 16:
|
|
|
|
// NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
|
|
|
|
if ((SplatBits & ~0xff) == 0 ||
|
|
|
|
(SplatBits & ~0xff00) == 0)
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i16);
|
2009-06-23 07:27:02 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 32:
|
|
|
|
// NEON's 32-bit VMOV supports splat values where:
|
|
|
|
// * only one byte is nonzero, or
|
|
|
|
// * the least significant byte is 0xff and the second byte is nonzero, or
|
|
|
|
// * the least significant 2 bytes are 0xff and the third is nonzero.
|
|
|
|
if ((SplatBits & ~0xff) == 0 ||
|
|
|
|
(SplatBits & ~0xff00) == 0 ||
|
|
|
|
(SplatBits & ~0xff0000) == 0 ||
|
|
|
|
(SplatBits & ~0xff000000) == 0)
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(SplatBits, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
if ((SplatBits & ~0xffff) == 0 &&
|
|
|
|
((SplatBits | SplatUndef) & 0xff) == 0xff)
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
if ((SplatBits & ~0xffffff) == 0 &&
|
|
|
|
((SplatBits | SplatUndef) & 0xffff) == 0xffff)
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
// Note: there are a few 32-bit splat values (specifically: 00ffff00,
|
|
|
|
// ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
|
|
|
|
// VMOV.I32. A (very) minor optimization would be to replicate the value
|
|
|
|
// and fall through here to test for a valid 64-bit splat. But, then the
|
|
|
|
// caller would also need to check and handle the change in size.
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 64: {
|
|
|
|
// NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
|
|
|
|
uint64_t BitMask = 0xff;
|
|
|
|
uint64_t Val = 0;
|
|
|
|
for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
|
|
|
|
if (((SplatBits | SplatUndef) & BitMask) == BitMask)
|
|
|
|
Val |= BitMask;
|
|
|
|
else if ((SplatBits & BitMask) != 0)
|
|
|
|
return SDValue();
|
|
|
|
BitMask <<= 8;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
return DAG.getTargetConstant(Val, MVT::i64);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("unexpected size for isVMOVSplat");
|
2009-06-23 07:27:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getVMOVImm - If this is a build_vector of constants which can be
|
|
|
|
/// formed by using a VMOV instruction of the specified element size,
|
|
|
|
/// return the constant being splatted. The ByteSize field indicates the
|
|
|
|
/// number of bytes of each element [1248].
|
|
|
|
SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
|
|
|
|
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
|
|
|
|
HasAnyUndefs, ByteSize * 8))
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
if (SplatBitSize > ByteSize * 8)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
|
|
|
|
SplatBitSize, DAG);
|
|
|
|
}
|
|
|
|
|
2009-07-26 08:39:34 +08:00
|
|
|
/// isVREVMask - Check if a vector shuffle corresponds to a VREV
|
|
|
|
/// instruction with the specified blocksize. (The order of the elements
|
|
|
|
/// within each block of the vector is reversed.)
|
2009-08-13 06:31:50 +08:00
|
|
|
static bool isVREVMask(ShuffleVectorSDNode *N, unsigned BlockSize) {
|
2009-07-26 08:39:34 +08:00
|
|
|
assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
|
|
|
|
"Only possible block sizes for VREV are: 16, 32, 64");
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-07-26 08:39:34 +08:00
|
|
|
unsigned NumElts = VT.getVectorNumElements();
|
|
|
|
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
|
|
|
|
unsigned BlockElts = N->getMaskElt(0) + 1;
|
|
|
|
|
|
|
|
if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumElts; ++i) {
|
|
|
|
if ((unsigned) N->getMaskElt(i) !=
|
|
|
|
(i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
2009-06-23 07:27:02 +08:00
|
|
|
// Canonicalize all-zeros and all-ones vectors.
|
2009-08-13 09:57:47 +08:00
|
|
|
ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
|
2009-06-23 07:27:02 +08:00
|
|
|
if (ConstVal->isNullValue())
|
|
|
|
return getZeroVector(VT, DAG, dl);
|
|
|
|
if (ConstVal->isAllOnesValue())
|
|
|
|
return getOnesVector(VT, DAG, dl);
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT CanonicalVT;
|
2009-06-23 07:27:02 +08:00
|
|
|
if (VT.is64BitVector()) {
|
|
|
|
switch (Val.getValueType().getSizeInBits()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
case 8: CanonicalVT = MVT::v8i8; break;
|
|
|
|
case 16: CanonicalVT = MVT::v4i16; break;
|
|
|
|
case 32: CanonicalVT = MVT::v2i32; break;
|
|
|
|
case 64: CanonicalVT = MVT::v1i64; break;
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("unexpected splat element type"); break;
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(VT.is128BitVector() && "unknown splat vector size");
|
|
|
|
switch (Val.getValueType().getSizeInBits()) {
|
2009-08-12 04:47:22 +08:00
|
|
|
case 8: CanonicalVT = MVT::v16i8; break;
|
|
|
|
case 16: CanonicalVT = MVT::v8i16; break;
|
|
|
|
case 32: CanonicalVT = MVT::v4i32; break;
|
|
|
|
case 64: CanonicalVT = MVT::v2i64; break;
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("unexpected splat element type"); break;
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a canonical splat for this value.
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
Ops.assign(CanonicalVT.getVectorNumElements(), Val);
|
|
|
|
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
|
|
|
|
Ops.size());
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a case we can't handle, return null and let the default
|
|
|
|
// expansion code take care of it.
|
|
|
|
static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
2009-08-13 09:57:47 +08:00
|
|
|
BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
|
2009-06-23 07:27:02 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
|
|
|
|
SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
|
|
|
|
SplatUndef.getZExtValue(), SplatBitSize, DAG);
|
|
|
|
if (Val.getNode())
|
2009-07-30 08:31:25 +08:00
|
|
|
return BuildSplat(Val, VT, DAG, dl);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there are only 2 elements in a 128-bit vector, insert them into an
|
|
|
|
// undef vector. This handles the common case for 128-bit vector argument
|
|
|
|
// passing, where the insertions should be translated to subreg accesses
|
|
|
|
// with no real instructions.
|
|
|
|
if (VT.is128BitVector() && Op.getNumOperands() == 2) {
|
|
|
|
SDValue Val = DAG.getUNDEF(VT);
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
if (Op0.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0,
|
|
|
|
DAG.getIntPtrConstant(0));
|
|
|
|
if (Op1.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1,
|
|
|
|
DAG.getIntPtrConstant(1));
|
|
|
|
return Val;
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
2009-08-13 09:57:47 +08:00
|
|
|
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
|
2009-08-13 06:31:50 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
|
2009-08-13 10:13:04 +08:00
|
|
|
// Convert shuffles that are directly supported on NEON to target-specific
|
|
|
|
// DAG nodes, instead of keeping them as shuffles and matching them again
|
|
|
|
// during code selection. This is more efficient and avoids the possibility
|
|
|
|
// of inconsistencies between legalization and selection.
|
2009-08-13 06:31:50 +08:00
|
|
|
if (isVREVMask(SVN, 64))
|
|
|
|
return DAG.getNode(ARMISD::VREV64, dl, VT, SVN->getOperand(0));
|
|
|
|
if (isVREVMask(SVN, 32))
|
|
|
|
return DAG.getNode(ARMISD::VREV32, dl, VT, SVN->getOperand(0));
|
|
|
|
if (isVREVMask(SVN, 16))
|
|
|
|
return DAG.getNode(ARMISD::VREV16, dl, VT, SVN->getOperand(0));
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = Op.getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-12 04:47:22 +08:00
|
|
|
assert((VT == MVT::i8 || VT == MVT::i16) &&
|
2009-06-23 07:27:02 +08:00
|
|
|
"unexpected type for custom-lowering vector extract");
|
|
|
|
SDValue Vec = Op.getOperand(0);
|
|
|
|
SDValue Lane = Op.getOperand(1);
|
2009-08-12 04:47:22 +08:00
|
|
|
Op = DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
|
|
|
|
Op = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Op, DAG.getValueType(VT));
|
2009-06-23 07:27:02 +08:00
|
|
|
return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
|
|
|
|
}
|
|
|
|
|
2009-08-04 04:36:38 +08:00
|
|
|
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
// The only time a CONCAT_VECTORS operation can have legal types is when
|
|
|
|
// two 64-bit vectors are concatenated to a 128-bit vector.
|
|
|
|
assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
|
|
|
|
"unexpected CONCAT_VECTORS");
|
|
|
|
DebugLoc dl = Op.getDebugLoc();
|
2009-08-12 04:47:22 +08:00
|
|
|
SDValue Val = DAG.getUNDEF(MVT::v2f64);
|
2009-08-04 04:36:38 +08:00
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
if (Op0.getOpcode() != ISD::UNDEF)
|
2009-08-12 04:47:22 +08:00
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
|
2009-08-04 04:36:38 +08:00
|
|
|
DAG.getIntPtrConstant(0));
|
|
|
|
if (Op1.getOpcode() != ISD::UNDEF)
|
2009-08-12 04:47:22 +08:00
|
|
|
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
|
2009-08-04 04:36:38 +08:00
|
|
|
DAG.getIntPtrConstant(1));
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (Op.getOpcode()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Don't know how to custom lower this!");
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
|
2007-04-22 08:04:12 +08:00
|
|
|
case ISD::GlobalAddress:
|
|
|
|
return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
|
|
|
|
LowerGlobalAddressELF(Op, DAG);
|
2007-04-27 21:54:47 +08:00
|
|
|
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget);
|
|
|
|
case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget);
|
|
|
|
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
|
2009-08-07 08:34:42 +08:00
|
|
|
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
2007-01-19 15:51:42 +08:00
|
|
|
case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
|
|
|
|
case ISD::SINT_TO_FP:
|
|
|
|
case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
|
|
|
|
case ISD::FP_TO_SINT:
|
|
|
|
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
|
|
|
|
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
|
2007-01-30 06:58:52 +08:00
|
|
|
case ISD::RETURNADDR: break;
|
2009-05-13 07:59:14 +08:00
|
|
|
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
2007-04-22 08:04:12 +08:00
|
|
|
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
|
2009-08-07 02:47:44 +08:00
|
|
|
case ISD::INTRINSIC_VOID:
|
2009-08-04 08:36:16 +08:00
|
|
|
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
|
2007-11-09 01:20:05 +08:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
2008-12-01 19:39:25 +08:00
|
|
|
case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::SHL:
|
2007-11-24 15:07:01 +08:00
|
|
|
case ISD::SRL:
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
|
|
|
|
case ISD::VSETCC: return LowerVSETCC(Op, DAG);
|
|
|
|
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
|
|
|
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
|
|
|
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
|
|
|
|
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
|
2009-08-04 04:36:38 +08:00
|
|
|
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-12-01 19:39:25 +08:00
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
|
|
|
|
SmallVectorImpl<SDValue>&Results,
|
|
|
|
SelectionDAG &DAG) {
|
2007-11-24 15:07:01 +08:00
|
|
|
switch (N->getOpcode()) {
|
2008-12-01 19:39:25 +08:00
|
|
|
default:
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("Don't know how to custom expand this!");
|
2008-12-01 19:39:25 +08:00
|
|
|
return;
|
|
|
|
case ISD::BIT_CONVERT:
|
|
|
|
Results.push_back(ExpandBIT_CONVERT(N, DAG));
|
|
|
|
return;
|
2007-11-24 15:07:01 +08:00
|
|
|
case ISD::SRL:
|
2008-12-01 19:39:25 +08:00
|
|
|
case ISD::SRA: {
|
2009-06-23 07:27:02 +08:00
|
|
|
SDValue Res = LowerShift(N, DAG, Subtarget);
|
2008-12-01 19:39:25 +08:00
|
|
|
if (Res.getNode())
|
|
|
|
Results.push_back(Res);
|
|
|
|
return;
|
|
|
|
}
|
2007-11-24 15:07:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Scheduler Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
MachineBasicBlock *
|
2008-01-31 02:18:23 +08:00
|
|
|
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
2009-02-08 00:15:20 +08:00
|
|
|
MachineBasicBlock *BB) const {
|
2007-01-19 15:51:42 +08:00
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
2009-02-13 10:25:56 +08:00
|
|
|
DebugLoc dl = MI->getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (MI->getOpcode()) {
|
2009-08-07 08:34:42 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instr type to insert");
|
2009-08-12 13:17:19 +08:00
|
|
|
case ARM::tMOVCCr_pseudo: {
|
2007-01-19 15:51:42 +08:00
|
|
|
// To "insert" a SELECT_CC instruction, we actually have to insert the
|
|
|
|
// diamond control-flow pattern. The incoming instruction knows the
|
|
|
|
// destination vreg to set, the condition code register to branch on, the
|
|
|
|
// true/false values to select between, and a branch opcode to use.
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
2008-07-08 07:14:23 +08:00
|
|
|
MachineFunction::iterator It = BB;
|
2007-01-19 15:51:42 +08:00
|
|
|
++It;
|
|
|
|
|
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// TrueVal = ...
|
|
|
|
// cmpTY ccX, r1, r2
|
|
|
|
// bCC copy1MBB
|
|
|
|
// fallthrough --> copy0MBB
|
|
|
|
MachineBasicBlock *thisMBB = BB;
|
2008-07-08 07:14:23 +08:00
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
2009-02-13 10:25:56 +08:00
|
|
|
BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
|
2007-07-05 15:18:20 +08:00
|
|
|
.addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
|
2008-07-08 07:14:23 +08:00
|
|
|
F->insert(It, copy0MBB);
|
|
|
|
F->insert(It, sinkMBB);
|
2007-01-19 15:51:42 +08:00
|
|
|
// Update machine-CFG edges by first adding all successors of the current
|
|
|
|
// block to the new block which will contain the Phi node for the select.
|
|
|
|
for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
|
|
|
|
e = BB->succ_end(); i != e; ++i)
|
|
|
|
sinkMBB->addSuccessor(*i);
|
|
|
|
// Next, remove all successors of the current block, and add the true
|
|
|
|
// and fallthrough blocks as its successors.
|
|
|
|
while(!BB->succ_empty())
|
|
|
|
BB->removeSuccessor(BB->succ_begin());
|
|
|
|
BB->addSuccessor(copy0MBB);
|
|
|
|
BB->addSuccessor(sinkMBB);
|
|
|
|
|
|
|
|
// copy0MBB:
|
|
|
|
// %FalseValue = ...
|
|
|
|
// # fallthrough to sinkMBB
|
|
|
|
BB = copy0MBB;
|
|
|
|
|
|
|
|
// Update machine-CFG edges
|
|
|
|
BB->addSuccessor(sinkMBB);
|
|
|
|
|
|
|
|
// sinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
|
|
|
|
// ...
|
|
|
|
BB = sinkMBB;
|
2009-02-13 10:25:56 +08:00
|
|
|
BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
|
2007-01-19 15:51:42 +08:00
|
|
|
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
|
|
|
|
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
|
|
|
|
|
2008-07-08 07:14:23 +08:00
|
|
|
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
2007-01-19 15:51:42 +08:00
|
|
|
return BB;
|
|
|
|
}
|
2009-08-07 08:34:42 +08:00
|
|
|
|
|
|
|
case ARM::tANDsp:
|
|
|
|
case ARM::tADDspr_:
|
|
|
|
case ARM::tSUBspi_:
|
|
|
|
case ARM::t2SUBrSPi_:
|
|
|
|
case ARM::t2SUBrSPi12_:
|
|
|
|
case ARM::t2SUBrSPs_: {
|
|
|
|
MachineFunction *MF = BB->getParent();
|
|
|
|
unsigned DstReg = MI->getOperand(0).getReg();
|
|
|
|
unsigned SrcReg = MI->getOperand(1).getReg();
|
|
|
|
bool DstIsDead = MI->getOperand(0).isDead();
|
|
|
|
bool SrcIsKill = MI->getOperand(1).isKill();
|
|
|
|
|
|
|
|
if (SrcReg != ARM::SP) {
|
|
|
|
// Copy the source to SP from virtual register.
|
|
|
|
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
|
|
|
|
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
|
|
|
|
? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
|
|
|
|
BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
|
|
|
|
.addReg(SrcReg, getKillRegState(SrcIsKill));
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpOpc = 0;
|
|
|
|
bool NeedPred = false, NeedCC = false, NeedOp3 = false;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected pseudo instruction!");
|
|
|
|
case ARM::tANDsp:
|
|
|
|
OpOpc = ARM::tAND;
|
|
|
|
NeedPred = true;
|
|
|
|
break;
|
|
|
|
case ARM::tADDspr_:
|
|
|
|
OpOpc = ARM::tADDspr;
|
|
|
|
break;
|
|
|
|
case ARM::tSUBspi_:
|
|
|
|
OpOpc = ARM::tSUBspi;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPi_:
|
|
|
|
OpOpc = ARM::t2SUBrSPi;
|
|
|
|
NeedPred = true; NeedCC = true;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPi12_:
|
|
|
|
OpOpc = ARM::t2SUBrSPi12;
|
|
|
|
NeedPred = true;
|
|
|
|
break;
|
|
|
|
case ARM::t2SUBrSPs_:
|
|
|
|
OpOpc = ARM::t2SUBrSPs;
|
|
|
|
NeedPred = true; NeedCC = true; NeedOp3 = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
|
|
|
|
if (OpOpc == ARM::tAND)
|
|
|
|
AddDefaultT1CC(MIB);
|
|
|
|
MIB.addReg(ARM::SP);
|
|
|
|
MIB.addOperand(MI->getOperand(2));
|
|
|
|
if (NeedOp3)
|
|
|
|
MIB.addOperand(MI->getOperand(3));
|
|
|
|
if (NeedPred)
|
|
|
|
AddDefaultPred(MIB);
|
|
|
|
if (NeedCC)
|
|
|
|
AddDefaultCC(MIB);
|
|
|
|
|
|
|
|
// Copy the result from SP to virtual register.
|
|
|
|
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
|
|
|
|
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
|
|
|
|
? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
|
|
|
|
BuildMI(BB, dl, TII->get(CopyOpc))
|
|
|
|
.addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
|
|
|
|
.addReg(ARM::SP);
|
|
|
|
MF->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
|
|
|
return BB;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Optimization Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
static
|
|
|
|
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
SelectionDAG &DAG = DCI.DAG;
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
unsigned Opc = N->getOpcode();
|
|
|
|
bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
|
|
|
|
SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
|
|
|
|
SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
|
|
|
|
ISD::CondCode CC = ISD::SETCC_INVALID;
|
|
|
|
|
|
|
|
if (isSlctCC) {
|
|
|
|
CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
|
|
|
|
} else {
|
|
|
|
SDValue CCOp = Slct.getOperand(0);
|
|
|
|
if (CCOp.getOpcode() == ISD::SETCC)
|
|
|
|
CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DoXform = false;
|
|
|
|
bool InvCC = false;
|
|
|
|
assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
|
|
|
|
"Bad input!");
|
|
|
|
|
|
|
|
if (LHS.getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(LHS)->isNullValue()) {
|
|
|
|
DoXform = true;
|
|
|
|
} else if (CC != ISD::SETCC_INVALID &&
|
|
|
|
RHS.getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(RHS)->isNullValue()) {
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
SDValue Op0 = Slct.getOperand(0);
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT OpVT = isSlctCC ? Op0.getValueType() :
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
Op0.getOperand(0).getValueType();
|
|
|
|
bool isInt = OpVT.isInteger();
|
|
|
|
CC = ISD::getSetCCInverse(CC, isInt);
|
|
|
|
|
|
|
|
if (!TLI.isCondCodeLegal(CC, OpVT))
|
|
|
|
return SDValue(); // Inverse operator isn't legal.
|
|
|
|
|
|
|
|
DoXform = true;
|
|
|
|
InvCC = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DoXform) {
|
|
|
|
SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
|
|
|
|
if (isSlctCC)
|
|
|
|
return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
|
|
|
|
Slct.getOperand(0), Slct.getOperand(1), CC);
|
|
|
|
SDValue CCOp = Slct.getOperand(0);
|
|
|
|
if (InvCC)
|
|
|
|
CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
|
|
|
|
CCOp.getOperand(0), CCOp.getOperand(1), CC);
|
|
|
|
return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
|
|
|
|
CCOp, OtherOp, Result);
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
|
|
|
|
static SDValue PerformADDCombine(SDNode *N,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
// added by evan in r37685 with no testcase.
|
|
|
|
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
|
|
|
|
if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
|
|
|
if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
2009-03-21 06:42:55 +08:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
|
|
|
|
static SDValue PerformSUBCombine(SDNode *N,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
|
|
|
// added by evan in r37685 with no testcase.
|
|
|
|
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
|
2009-03-21 06:42:55 +08:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
|
|
|
|
if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
|
|
|
|
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
|
|
|
|
if (Result.getNode()) return Result;
|
|
|
|
}
|
2009-03-21 06:42:55 +08:00
|
|
|
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-28 06:36:16 +08:00
|
|
|
/// PerformFMRRDCombine - Target-specific dag combine xforms for ARMISD::FMRRD.
|
2009-03-21 06:42:55 +08:00
|
|
|
static SDValue PerformFMRRDCombine(SDNode *N,
|
|
|
|
TargetLowering::DAGCombinerInfo &DCI) {
|
2007-11-28 06:36:16 +08:00
|
|
|
// fmrrd(fmdrr x, y) -> x,y
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue InDouble = N->getOperand(0);
|
2007-11-28 06:36:16 +08:00
|
|
|
if (InDouble.getOpcode() == ARMISD::FMDRR)
|
|
|
|
return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
2007-11-28 06:36:16 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 07:27:02 +08:00
|
|
|
/// getVShiftImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift operation, where all the elements of the
|
|
|
|
/// build_vector must have the same constant integer value.
|
|
|
|
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
|
|
|
|
// Ignore bit_converts.
|
|
|
|
while (Op.getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
|
|
|
|
APInt SplatBits, SplatUndef;
|
|
|
|
unsigned SplatBitSize;
|
|
|
|
bool HasAnyUndefs;
|
|
|
|
if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
|
|
|
|
HasAnyUndefs, ElementBits) ||
|
|
|
|
SplatBitSize > ElementBits)
|
|
|
|
return false;
|
|
|
|
Cnt = SplatBits.getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVShiftLImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift left operation. That value must be in the range:
|
|
|
|
/// 0 <= Value < ElementBits for a left shift; or
|
|
|
|
/// 0 <= Value <= ElementBits for a long left shift.
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
|
2009-06-23 07:27:02 +08:00
|
|
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
|
|
|
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
|
|
|
return false;
|
|
|
|
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVShiftRImm - Check if this is a valid build_vector for the immediate
|
|
|
|
/// operand of a vector shift right operation. For a shift opcode, the value
|
|
|
|
/// is positive, but for an intrinsic the value count must be negative. The
|
|
|
|
/// absolute value must be in the range:
|
|
|
|
/// 1 <= |Value| <= ElementBits for a right shift; or
|
|
|
|
/// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
|
2009-06-23 07:27:02 +08:00
|
|
|
int64_t &Cnt) {
|
|
|
|
assert(VT.isVector() && "vector shift count is not a vector type");
|
|
|
|
unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
|
|
|
|
if (! getVShiftImm(Op, ElementBits, Cnt))
|
|
|
|
return false;
|
|
|
|
if (isIntrinsic)
|
|
|
|
Cnt = -Cnt;
|
|
|
|
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
|
|
|
|
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
default:
|
|
|
|
// Don't do anything for most intrinsics.
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Vector shifts: check for immediate versions and lower them.
|
|
|
|
// Note: This is done during DAG combining instead of DAG legalizing because
|
|
|
|
// the build_vectors for 64-bit vector element shift counts are generally
|
|
|
|
// not legal, and it is hard to see their values after they get legalized to
|
|
|
|
// loads from a constant pool.
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu: {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getOperand(1).getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
int64_t Cnt;
|
|
|
|
unsigned VShiftOpc = 0;
|
|
|
|
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
|
|
|
|
VShiftOpc = ARMISD::VSHL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
|
|
|
|
VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
|
|
|
|
ARMISD::VSHRs : ARMISD::VSHRu);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
|
|
|
|
break;
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("invalid shift count for vshll intrinsic");
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
|
|
|
|
break;
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
|
|
|
|
break;
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
|
|
|
|
break;
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("invalid shift count for vqshlu intrinsic");
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu:
|
|
|
|
// Narrowing shifts require an immediate right shift.
|
|
|
|
if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
|
|
|
|
break;
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
default:
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("unhandled vector shift");
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::arm_neon_vshifts:
|
|
|
|
case Intrinsic::arm_neon_vshiftu:
|
|
|
|
// Opcode already set above.
|
|
|
|
break;
|
|
|
|
case Intrinsic::arm_neon_vshiftls:
|
|
|
|
case Intrinsic::arm_neon_vshiftlu:
|
|
|
|
if (Cnt == VT.getVectorElementType().getSizeInBits())
|
|
|
|
VShiftOpc = ARMISD::VSHLLi;
|
|
|
|
else
|
|
|
|
VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
|
|
|
|
ARMISD::VSHLLs : ARMISD::VSHLLu);
|
|
|
|
break;
|
|
|
|
case Intrinsic::arm_neon_vshiftn:
|
|
|
|
VShiftOpc = ARMISD::VSHRN; break;
|
|
|
|
case Intrinsic::arm_neon_vrshifts:
|
|
|
|
VShiftOpc = ARMISD::VRSHRs; break;
|
|
|
|
case Intrinsic::arm_neon_vrshiftu:
|
|
|
|
VShiftOpc = ARMISD::VRSHRu; break;
|
|
|
|
case Intrinsic::arm_neon_vrshiftn:
|
|
|
|
VShiftOpc = ARMISD::VRSHRN; break;
|
|
|
|
case Intrinsic::arm_neon_vqshifts:
|
|
|
|
VShiftOpc = ARMISD::VQSHLs; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftu:
|
|
|
|
VShiftOpc = ARMISD::VQSHLu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftsu:
|
|
|
|
VShiftOpc = ARMISD::VQSHLsu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftns:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNs; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftnu:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNu; break;
|
|
|
|
case Intrinsic::arm_neon_vqshiftnsu:
|
|
|
|
VShiftOpc = ARMISD::VQSHRNsu; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftns:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNs; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnu:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNu; break;
|
|
|
|
case Intrinsic::arm_neon_vqrshiftnsu:
|
|
|
|
VShiftOpc = ARMISD::VQRSHRNsu; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
|
2009-08-12 04:47:22 +08:00
|
|
|
N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vshiftins: {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getOperand(1).getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
int64_t Cnt;
|
|
|
|
unsigned VShiftOpc = 0;
|
|
|
|
|
|
|
|
if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
|
|
|
|
VShiftOpc = ARMISD::VSLI;
|
|
|
|
else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
|
|
|
|
VShiftOpc = ARMISD::VSRI;
|
|
|
|
else {
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
|
|
|
|
N->getOperand(1), N->getOperand(2),
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::arm_neon_vqrshifts:
|
|
|
|
case Intrinsic::arm_neon_vqrshiftu:
|
|
|
|
// No immediate versions of these to check for.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformShiftCombine - Checks for immediate versions of vector shifts and
|
|
|
|
/// lowers them. As with the vector shift intrinsics, this is done during DAG
|
|
|
|
/// combining instead of DAG legalizing because the build_vectors for 64-bit
|
|
|
|
/// vector element shift counts are generally not legal, and it is hard to see
|
|
|
|
/// their values after they get legalized to loads from a constant pool.
|
|
|
|
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
// Nothing to be done for scalar shifts.
|
|
|
|
if (! VT.isVector())
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
assert(ST->hasNEON() && "unexpected vector shift");
|
|
|
|
int64_t Cnt;
|
|
|
|
|
|
|
|
switch (N->getOpcode()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("unexpected shift opcode");
|
2009-06-23 07:27:02 +08:00
|
|
|
|
|
|
|
case ISD::SHL:
|
|
|
|
if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
|
|
|
|
return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
|
|
|
|
unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
|
|
|
|
ARMISD::VSHRs : ARMISD::VSHRu);
|
|
|
|
return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
|
2009-08-12 04:47:22 +08:00
|
|
|
DAG.getConstant(Cnt, MVT::i32));
|
2009-06-23 07:27:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
|
|
|
|
/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
|
|
|
|
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const ARMSubtarget *ST) {
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
|
|
|
|
// Check for sign- and zero-extensions of vector extract operations of 8-
|
|
|
|
// and 16-bit vector elements. NEON supports these directly. They are
|
|
|
|
// handled during DAG combining because type legalization will promote them
|
|
|
|
// to 32-bit types and it is messy to recognize the operations after that.
|
|
|
|
if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
|
|
|
|
SDValue Vec = N0.getOperand(0);
|
|
|
|
SDValue Lane = N0.getOperand(1);
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = N->getValueType(0);
|
|
|
|
EVT EltVT = N0.getValueType();
|
2009-06-23 07:27:02 +08:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::i32 &&
|
|
|
|
(EltVT == MVT::i8 || EltVT == MVT::i16) &&
|
2009-06-23 07:27:02 +08:00
|
|
|
TLI.isTypeLegal(Vec.getValueType())) {
|
|
|
|
|
|
|
|
unsigned Opc = 0;
|
|
|
|
switch (N->getOpcode()) {
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("unexpected opcode");
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::SIGN_EXTEND:
|
|
|
|
Opc = ARMISD::VGETLANEs;
|
|
|
|
break;
|
|
|
|
case ISD::ZERO_EXTEND:
|
|
|
|
case ISD::ANY_EXTEND:
|
|
|
|
Opc = ARMISD::VGETLANEu;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
|
2009-03-21 06:42:55 +08:00
|
|
|
DAGCombinerInfo &DCI) const {
|
2007-11-28 06:36:16 +08:00
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
Move 3 "(add (select cc, 0, c), x) -> (select cc, x, (add, x, c))"
related transformations out of target-specific dag combine into the
ARM backend. These were added by Evan in r37685 with no testcases
and only seems to help ARM (e.g. test/CodeGen/ARM/select_xform.ll).
Add some simple X86-specific (for now) DAG combines that turn things
like cond ? 8 : 0 -> (zext(cond) << 3). This happens frequently
with the recently added cp constant select optimization, but is a
very general xform. For example, we now compile the second example
in const-select.ll to:
_test:
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
seta %al
movzbl %al, %eax
movl 4(%esp), %ecx
movsbl (%ecx,%eax,4), %eax
ret
instead of:
_test:
movl 4(%esp), %eax
leal 4(%eax), %ecx
movsd LCPI2_0, %xmm0
ucomisd 8(%esp), %xmm0
cmovbe %eax, %ecx
movsbl (%ecx), %eax
ret
This passes multisource and dejagnu.
llvm-svn: 66779
2009-03-12 14:52:53 +08:00
|
|
|
case ISD::ADD: return PerformADDCombine(N, DCI);
|
|
|
|
case ISD::SUB: return PerformSUBCombine(N, DCI);
|
2007-11-28 06:36:16 +08:00
|
|
|
case ARMISD::FMRRD: return PerformFMRRDCombine(N, DCI);
|
2009-06-23 07:27:02 +08:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN:
|
|
|
|
return PerformIntrinsicCombine(N, DCI.DAG);
|
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
return PerformShiftCombine(N, DCI.DAG, Subtarget);
|
|
|
|
case ISD::SIGN_EXTEND:
|
|
|
|
case ISD::ZERO_EXTEND:
|
|
|
|
case ISD::ANY_EXTEND:
|
|
|
|
return PerformExtendCombine(N, DCI.DAG, Subtarget);
|
2007-11-28 06:36:16 +08:00
|
|
|
}
|
2008-07-28 05:46:04 +08:00
|
|
|
return SDValue();
|
2007-11-28 06:36:16 +08:00
|
|
|
}
|
|
|
|
|
2007-03-13 07:30:29 +08:00
|
|
|
/// isLegalAddressImmediate - Return true if the integer value can be used
|
|
|
|
/// as the offset of the target addressing mode for load / store of the
|
|
|
|
/// given type.
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool isLegalAddressImmediate(int64_t V, EVT VT,
|
2007-04-10 07:33:39 +08:00
|
|
|
const ARMSubtarget *Subtarget) {
|
2007-03-14 04:37:59 +08:00
|
|
|
if (V == 0)
|
|
|
|
return true;
|
|
|
|
|
2009-03-10 03:15:00 +08:00
|
|
|
if (!VT.isSimple())
|
|
|
|
return false;
|
|
|
|
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) { // FIXME for thumb2
|
2007-03-13 07:30:29 +08:00
|
|
|
if (V < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Scale = 1;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2007-03-13 07:30:29 +08:00
|
|
|
default: return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
2007-03-13 07:30:29 +08:00
|
|
|
// Scale == 1;
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2007-03-13 07:30:29 +08:00
|
|
|
// Scale == 2;
|
|
|
|
Scale = 2;
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i32:
|
2007-03-13 07:30:29 +08:00
|
|
|
// Scale == 4;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((V & (Scale - 1)) != 0)
|
|
|
|
return false;
|
|
|
|
V /= Scale;
|
2008-02-20 19:22:39 +08:00
|
|
|
return V == (V & ((1LL << 5) - 1));
|
2007-03-13 07:30:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (V < 0)
|
|
|
|
V = - V;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2007-03-13 07:30:29 +08:00
|
|
|
default: return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i32:
|
2007-03-13 07:30:29 +08:00
|
|
|
// +- imm12
|
2008-02-20 19:22:39 +08:00
|
|
|
return V == (V & ((1LL << 12) - 1));
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2007-03-13 07:30:29 +08:00
|
|
|
// +- imm8
|
2008-02-20 19:22:39 +08:00
|
|
|
return V == (V & ((1LL << 8) - 1));
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f32:
|
|
|
|
case MVT::f64:
|
2007-03-13 07:30:29 +08:00
|
|
|
if (!Subtarget->hasVFP2())
|
|
|
|
return false;
|
2007-05-03 10:00:18 +08:00
|
|
|
if ((V & 3) != 0)
|
2007-03-13 07:30:29 +08:00
|
|
|
return false;
|
|
|
|
V >>= 2;
|
2008-02-20 19:22:39 +08:00
|
|
|
return V == (V & ((1LL << 8) - 1));
|
2007-03-13 07:30:29 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2007-04-10 07:33:39 +08:00
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2009-03-21 06:42:55 +08:00
|
|
|
bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
2007-04-10 07:33:39 +08:00
|
|
|
const Type *Ty) const {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = getValueType(Ty, true);
|
2009-04-09 01:55:28 +08:00
|
|
|
if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
|
2007-03-13 07:30:29 +08:00
|
|
|
return false;
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-04-10 07:33:39 +08:00
|
|
|
// Can never fold addr of global into load/store.
|
2009-03-21 06:42:55 +08:00
|
|
|
if (AM.BaseGV)
|
2007-04-10 07:33:39 +08:00
|
|
|
return false;
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-04-10 07:33:39 +08:00
|
|
|
switch (AM.Scale) {
|
|
|
|
case 0: // no scale reg, must be "r+i" or "r", or "i".
|
|
|
|
break;
|
|
|
|
case 1:
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) // FIXME for thumb2
|
2007-04-10 07:33:39 +08:00
|
|
|
return false;
|
2007-04-13 14:50:55 +08:00
|
|
|
// FALL THROUGH.
|
2007-04-10 07:33:39 +08:00
|
|
|
default:
|
2007-04-13 14:50:55 +08:00
|
|
|
// ARM doesn't support any R+R*scale+imm addr modes.
|
|
|
|
if (AM.BaseOffs)
|
|
|
|
return false;
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2009-04-09 01:55:28 +08:00
|
|
|
if (!VT.isSimple())
|
|
|
|
return false;
|
|
|
|
|
2007-04-10 11:48:29 +08:00
|
|
|
int Scale = AM.Scale;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2007-04-10 07:33:39 +08:00
|
|
|
default: return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i1:
|
|
|
|
case MVT::i8:
|
|
|
|
case MVT::i32:
|
|
|
|
case MVT::i64:
|
2007-04-10 07:33:39 +08:00
|
|
|
// This assumes i64 is legalized to a pair of i32. If not (i.e.
|
|
|
|
// ldrd / strd are used, then its address mode is same as i16.
|
|
|
|
// r + r
|
2007-04-10 11:48:29 +08:00
|
|
|
if (Scale < 0) Scale = -Scale;
|
|
|
|
if (Scale == 1)
|
2007-04-10 07:33:39 +08:00
|
|
|
return true;
|
|
|
|
// r + r << imm
|
2007-04-12 00:17:12 +08:00
|
|
|
return isPowerOf2_32(Scale & ~1);
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2007-04-10 07:33:39 +08:00
|
|
|
// r + r
|
2007-04-10 11:48:29 +08:00
|
|
|
if (((unsigned)AM.HasBaseReg + Scale) <= 2)
|
2007-04-10 07:33:39 +08:00
|
|
|
return true;
|
2007-04-12 00:17:12 +08:00
|
|
|
return false;
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::isVoid:
|
2007-04-10 07:33:39 +08:00
|
|
|
// Note, we allow "void" uses (basically, uses that aren't loads or
|
|
|
|
// stores), because arm allows folding a scale into many arithmetic
|
|
|
|
// operations. This should be made more precise and revisited later.
|
2009-03-21 06:42:55 +08:00
|
|
|
|
2007-04-10 07:33:39 +08:00
|
|
|
// Allow r << imm, but the imm has to be a multiple of two.
|
|
|
|
if (AM.Scale & 1) return false;
|
|
|
|
return isPowerOf2_32(AM.Scale);
|
|
|
|
}
|
|
|
|
break;
|
2007-03-13 07:30:29 +08:00
|
|
|
}
|
2007-04-10 07:33:39 +08:00
|
|
|
return true;
|
2007-03-13 07:30:29 +08:00
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
|
2009-07-02 15:28:31 +08:00
|
|
|
bool isSEXTLoad, SDValue &Base,
|
|
|
|
SDValue &Offset, bool &isInc,
|
|
|
|
SelectionDAG &DAG) {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
|
|
|
|
return false;
|
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// AddressingMode 3
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2007-01-19 15:51:42 +08:00
|
|
|
if (RHSC < 0 && RHSC > -256) {
|
2009-07-02 15:28:31 +08:00
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
2007-01-19 15:51:42 +08:00
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
isInc = (Ptr->getOpcode() == ISD::ADD);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
return true;
|
2009-08-12 04:47:22 +08:00
|
|
|
} else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// AddressingMode 2
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
2007-01-19 15:51:42 +08:00
|
|
|
if (RHSC < 0 && RHSC > -0x1000) {
|
2009-07-02 15:28:31 +08:00
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
2007-01-19 15:51:42 +08:00
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Ptr->getOpcode() == ISD::ADD) {
|
|
|
|
isInc = true;
|
|
|
|
ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
|
|
|
|
if (ShOpcVal != ARM_AM::no_shift) {
|
|
|
|
Base = Ptr->getOperand(1);
|
|
|
|
Offset = Ptr->getOperand(0);
|
|
|
|
} else {
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
isInc = (Ptr->getOpcode() == ISD::ADD);
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
Offset = Ptr->getOperand(1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Use FLDM / FSTM to emulate indexed FP load / store.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
|
2009-07-02 15:28:31 +08:00
|
|
|
bool isSEXTLoad, SDValue &Base,
|
|
|
|
SDValue &Offset, bool &isInc,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Base = Ptr->getOperand(0);
|
|
|
|
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
|
|
|
|
int RHSC = (int)RHS->getZExtValue();
|
|
|
|
if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
|
|
|
|
assert(Ptr->getOpcode() == ISD::ADD);
|
|
|
|
isInc = false;
|
|
|
|
Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
} else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
|
|
|
|
isInc = Ptr->getOpcode() == ISD::ADD;
|
|
|
|
Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
|
|
|
bool
|
2008-07-28 05:46:04 +08:00
|
|
|
ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|
|
|
SDValue &Offset,
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-16 00:29:45 +08:00
|
|
|
SelectionDAG &DAG) const {
|
2009-07-02 15:28:31 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-01-19 15:51:42 +08:00
|
|
|
return false;
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ptr;
|
2007-01-19 15:51:42 +08:00
|
|
|
bool isSEXTLoad = false;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
|
|
|
Ptr = LD->getBasePtr();
|
2008-01-30 08:15:11 +08:00
|
|
|
VT = LD->getMemoryVT();
|
2007-01-19 15:51:42 +08:00
|
|
|
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
|
|
|
Ptr = ST->getBasePtr();
|
2008-01-30 08:15:11 +08:00
|
|
|
VT = ST->getMemoryVT();
|
2007-01-19 15:51:42 +08:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isInc;
|
2009-07-02 15:28:31 +08:00
|
|
|
bool isLegal = false;
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
2009-07-02 15:28:31 +08:00
|
|
|
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
|
|
|
Offset, isInc, DAG);
|
2009-08-11 23:33:49 +08:00
|
|
|
else
|
2009-07-02 15:28:31 +08:00
|
|
|
isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
|
2009-07-02 14:44:30 +08:00
|
|
|
Offset, isInc, DAG);
|
2009-07-02 15:28:31 +08:00
|
|
|
if (!isLegal)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
|
|
|
|
return true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// getPostIndexedAddressParts - returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if this node can be
|
|
|
|
/// combined with a load / store to form a post-indexed load / store.
|
|
|
|
bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset,
|
2007-01-19 15:51:42 +08:00
|
|
|
ISD::MemIndexedMode &AM,
|
2009-01-16 00:29:45 +08:00
|
|
|
SelectionDAG &DAG) const {
|
2009-07-02 15:28:31 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
2007-01-19 15:51:42 +08:00
|
|
|
return false;
|
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ptr;
|
2007-01-19 15:51:42 +08:00
|
|
|
bool isSEXTLoad = false;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
2008-01-30 08:15:11 +08:00
|
|
|
VT = LD->getMemoryVT();
|
2007-01-19 15:51:42 +08:00
|
|
|
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
2008-01-30 08:15:11 +08:00
|
|
|
VT = ST->getMemoryVT();
|
2007-01-19 15:51:42 +08:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isInc;
|
2009-07-02 15:28:31 +08:00
|
|
|
bool isLegal = false;
|
2009-07-07 09:17:28 +08:00
|
|
|
if (Subtarget->isThumb() && Subtarget->hasThumb2())
|
2009-07-02 15:28:31 +08:00
|
|
|
isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
2007-01-19 15:51:42 +08:00
|
|
|
isInc, DAG);
|
2009-08-11 23:33:49 +08:00
|
|
|
else
|
2009-07-02 15:28:31 +08:00
|
|
|
isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
|
|
|
|
isInc, DAG);
|
|
|
|
if (!isLegal)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
|
|
|
|
return true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
2008-02-14 06:28:48 +08:00
|
|
|
const APInt &Mask,
|
2009-03-21 06:42:55 +08:00
|
|
|
APInt &KnownZero,
|
2008-02-13 08:35:47 +08:00
|
|
|
APInt &KnownOne,
|
2007-06-22 22:59:07 +08:00
|
|
|
const SelectionDAG &DAG,
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned Depth) const {
|
2008-02-13 08:35:47 +08:00
|
|
|
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARMISD::CMOV: {
|
|
|
|
// Bits are known zero/one if known on the LHS and RHS.
|
2007-06-22 22:59:07 +08:00
|
|
|
DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
|
2007-01-19 15:51:42 +08:00
|
|
|
if (KnownZero == 0 && KnownOne == 0) return;
|
|
|
|
|
2008-02-13 08:35:47 +08:00
|
|
|
APInt KnownZeroRHS, KnownOneRHS;
|
2007-06-22 22:59:07 +08:00
|
|
|
DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
|
|
|
|
KnownZeroRHS, KnownOneRHS, Depth+1);
|
2007-01-19 15:51:42 +08:00
|
|
|
KnownZero &= KnownZeroRHS;
|
|
|
|
KnownOne &= KnownOneRHS;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ARM Inline Assembly Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// getConstraintType - Given a constraint letter, return the type of
|
|
|
|
/// constraint it is for this target.
|
|
|
|
ARMTargetLowering::ConstraintType
|
2007-03-25 10:14:49 +08:00
|
|
|
ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
default: break;
|
|
|
|
case 'l': return C_RegisterClass;
|
2007-04-03 01:24:08 +08:00
|
|
|
case 'w': return C_RegisterClass;
|
2007-03-25 10:14:49 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2007-03-25 10:14:49 +08:00
|
|
|
return TargetLowering::getConstraintType(Constraint);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2009-03-21 06:42:55 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*>
|
2007-01-19 15:51:42 +08:00
|
|
|
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT) const {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// GCC RS6000 Constraint Letters
|
|
|
|
switch (Constraint[0]) {
|
2007-04-03 01:24:08 +08:00
|
|
|
case 'l':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only())
|
2009-04-08 04:34:09 +08:00
|
|
|
return std::make_pair(0U, ARM::tGPRRegisterClass);
|
|
|
|
else
|
|
|
|
return std::make_pair(0U, ARM::GPRRegisterClass);
|
2007-04-03 01:24:08 +08:00
|
|
|
case 'r':
|
|
|
|
return std::make_pair(0U, ARM::GPRRegisterClass);
|
|
|
|
case 'w':
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f32)
|
2007-04-03 01:24:08 +08:00
|
|
|
return std::make_pair(0U, ARM::SPRRegisterClass);
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f64)
|
2007-04-03 01:24:08 +08:00
|
|
|
return std::make_pair(0U, ARM::DPRRegisterClass);
|
|
|
|
break;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<unsigned> ARMTargetLowering::
|
|
|
|
getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT) const {
|
2007-01-19 15:51:42 +08:00
|
|
|
if (Constraint.size() != 1)
|
|
|
|
return std::vector<unsigned>();
|
|
|
|
|
|
|
|
switch (Constraint[0]) { // GCC ARM Constraint Letters
|
|
|
|
default: break;
|
|
|
|
case 'l':
|
2009-04-08 04:34:09 +08:00
|
|
|
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
|
|
|
|
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
|
|
|
|
0);
|
2007-01-19 15:51:42 +08:00
|
|
|
case 'r':
|
|
|
|
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
|
|
|
|
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
|
|
|
|
ARM::R8, ARM::R9, ARM::R10, ARM::R11,
|
|
|
|
ARM::R12, ARM::LR, 0);
|
2007-04-03 01:24:08 +08:00
|
|
|
case 'w':
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f32)
|
2007-04-03 01:24:08 +08:00
|
|
|
return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
|
|
|
|
ARM::S4, ARM::S5, ARM::S6, ARM::S7,
|
|
|
|
ARM::S8, ARM::S9, ARM::S10, ARM::S11,
|
|
|
|
ARM::S12,ARM::S13,ARM::S14,ARM::S15,
|
|
|
|
ARM::S16,ARM::S17,ARM::S18,ARM::S19,
|
|
|
|
ARM::S20,ARM::S21,ARM::S22,ARM::S23,
|
|
|
|
ARM::S24,ARM::S25,ARM::S26,ARM::S27,
|
|
|
|
ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f64)
|
2007-04-03 01:24:08 +08:00
|
|
|
return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
|
|
|
|
ARM::D4, ARM::D5, ARM::D6, ARM::D7,
|
|
|
|
ARM::D8, ARM::D9, ARM::D10,ARM::D11,
|
|
|
|
ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
|
|
|
|
break;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return std::vector<unsigned>();
|
|
|
|
}
|
2009-04-02 01:58:54 +08:00
|
|
|
|
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops.
|
|
|
|
void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
|
|
|
char Constraint,
|
|
|
|
bool hasMemory,
|
|
|
|
std::vector<SDValue>&Ops,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
SDValue Result(0, 0);
|
|
|
|
|
|
|
|
switch (Constraint) {
|
|
|
|
default: break;
|
|
|
|
case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O':
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
|
|
|
|
if (!C)
|
|
|
|
return;
|
|
|
|
|
|
|
|
int64_t CVal64 = C->getSExtValue();
|
|
|
|
int CVal = (int) CVal64;
|
|
|
|
// None of these constraints allow values larger than 32 bits. Check
|
|
|
|
// that the value fits in an int.
|
|
|
|
if (CVal != CVal64)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (Constraint) {
|
|
|
|
case 'I':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
|
|
|
// This must be a constant between 0 and 255, for ADD
|
|
|
|
// immediates.
|
2009-04-02 01:58:54 +08:00
|
|
|
if (CVal >= 0 && CVal <= 255)
|
|
|
|
break;
|
2009-07-09 07:10:31 +08:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant that can be used as an immediate value in a
|
|
|
|
// data-processing instruction.
|
|
|
|
if (ARM_AM::getT2SOImmVal(CVal) != -1)
|
|
|
|
break;
|
2009-04-02 01:58:54 +08:00
|
|
|
} else {
|
|
|
|
// A constant that can be used as an immediate value in a
|
|
|
|
// data-processing instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'J':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-02 01:58:54 +08:00
|
|
|
// This must be a constant between -255 and -1, for negated ADD
|
|
|
|
// immediates. This can be used in GCC with an "n" modifier that
|
|
|
|
// prints the negated value, for use with SUB instructions. It is
|
|
|
|
// not useful otherwise but is implemented for compatibility.
|
|
|
|
if (CVal >= -255 && CVal <= -1)
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// This must be a constant between -4095 and 4095. It is not clear
|
|
|
|
// what this constraint is intended for. Implemented for
|
|
|
|
// compatibility with GCC.
|
|
|
|
if (CVal >= -4095 && CVal <= 4095)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'K':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-04-02 01:58:54 +08:00
|
|
|
// A 32-bit value where only one byte has a nonzero value. Exclude
|
|
|
|
// zero to match GCC. This constraint is used by GCC internally for
|
|
|
|
// constants that can be loaded with a move/shift combination.
|
|
|
|
// It is not useful otherwise but is implemented for compatibility.
|
|
|
|
if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
|
|
|
|
break;
|
2009-07-09 07:10:31 +08:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant whose bitwise inverse can be used as an immediate
|
|
|
|
// value in a data-processing instruction. This can be used in GCC
|
|
|
|
// with a "B" modifier that prints the inverted value, for use with
|
|
|
|
// BIC and MVN instructions. It is not useful otherwise but is
|
|
|
|
// implemented for compatibility.
|
|
|
|
if (ARM_AM::getT2SOImmVal(~CVal) != -1)
|
|
|
|
break;
|
2009-04-02 01:58:54 +08:00
|
|
|
} else {
|
|
|
|
// A constant whose bitwise inverse can be used as an immediate
|
|
|
|
// value in a data-processing instruction. This can be used in GCC
|
|
|
|
// with a "B" modifier that prints the inverted value, for use with
|
|
|
|
// BIC and MVN instructions. It is not useful otherwise but is
|
|
|
|
// implemented for compatibility.
|
|
|
|
if (ARM_AM::getSOImmVal(~CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'L':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb1Only()) {
|
2009-04-02 01:58:54 +08:00
|
|
|
// This must be a constant between -7 and 7,
|
|
|
|
// for 3-operand ADD/SUB immediate instructions.
|
|
|
|
if (CVal >= -7 && CVal < 7)
|
|
|
|
break;
|
2009-07-09 07:10:31 +08:00
|
|
|
} else if (Subtarget->isThumb2()) {
|
|
|
|
// A constant whose negation can be used as an immediate value in a
|
|
|
|
// data-processing instruction. This can be used in GCC with an "n"
|
|
|
|
// modifier that prints the negated value, for use with SUB
|
|
|
|
// instructions. It is not useful otherwise but is implemented for
|
|
|
|
// compatibility.
|
|
|
|
if (ARM_AM::getT2SOImmVal(-CVal) != -1)
|
|
|
|
break;
|
2009-04-02 01:58:54 +08:00
|
|
|
} else {
|
|
|
|
// A constant whose negation can be used as an immediate value in a
|
|
|
|
// data-processing instruction. This can be used in GCC with an "n"
|
|
|
|
// modifier that prints the negated value, for use with SUB
|
|
|
|
// instructions. It is not useful otherwise but is implemented for
|
|
|
|
// compatibility.
|
|
|
|
if (ARM_AM::getSOImmVal(-CVal) != -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'M':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-02 01:58:54 +08:00
|
|
|
// This must be a multiple of 4 between 0 and 1020, for
|
|
|
|
// ADD sp + immediate.
|
|
|
|
if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// A power of two or a constant between 0 and 32. This is used in
|
|
|
|
// GCC for the shift amount on shifted register operands, but it is
|
|
|
|
// useful in general for any shift amounts.
|
|
|
|
if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'N':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-02 01:58:54 +08:00
|
|
|
// This must be a constant between 0 and 31, for shift amounts.
|
|
|
|
if (CVal >= 0 && CVal <= 31)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case 'O':
|
2009-07-09 07:10:31 +08:00
|
|
|
if (Subtarget->isThumb()) { // FIXME thumb2
|
2009-04-02 01:58:54 +08:00
|
|
|
// This must be a multiple of 4 between -508 and 508, for
|
|
|
|
// ADD/SUB sp = sp + immediate.
|
|
|
|
if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Result = DAG.getTargetConstant(CVal, Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Result.getNode()) {
|
|
|
|
Ops.push_back(Result);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
|
|
|
|
Ops, DAG);
|
|
|
|
}
|