2007-10-13 05:53:12 +08:00
|
|
|
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
|
2005-11-15 08:40:23 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file was developed by Chris Lattner and is distributed under
|
|
|
|
// the University of Illinois Open Source License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that X86 uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86.h"
|
2006-01-17 05:21:29 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "X86ISelLowering.h"
|
2006-06-07 07:30:24 +08:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "X86TargetMachine.h"
|
|
|
|
#include "llvm/CallingConv.h"
|
2006-02-01 06:28:30 +08:00
|
|
|
#include "llvm/Constants.h"
|
2006-04-29 05:29:37 +08:00
|
|
|
#include "llvm/DerivedTypes.h"
|
2007-04-21 05:38:10 +08:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "llvm/Function.h"
|
2006-04-06 07:38:46 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2007-12-11 09:46:18 +08:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2006-03-14 07:18:16 +08:00
|
|
|
#include "llvm/ADT/VectorExtras.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2007-02-27 12:43:02 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2006-01-11 08:33:36 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
|
|
|
#include "llvm/CodeGen/SSARegMap.h"
|
2006-01-31 11:14:29 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2007-10-12 03:40:01 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-12-11 09:46:18 +08:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2006-11-01 03:42:44 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2007-07-28 04:02:49 +08:00
|
|
|
#include "llvm/ParameterAttributes.h"
|
2005-11-15 08:40:23 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
|
|
|
|
: TargetLowering(TM) {
|
2006-01-27 16:10:46 +08:00
|
|
|
Subtarget = &TM.getSubtarget<X86Subtarget>();
|
2007-09-23 22:52:20 +08:00
|
|
|
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
|
|
|
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
2006-09-08 14:48:29 +08:00
|
|
|
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
|
2007-10-12 03:40:01 +08:00
|
|
|
|
2006-01-27 16:10:46 +08:00
|
|
|
|
2007-07-14 22:06:15 +08:00
|
|
|
RegInfo = TM.getRegisterInfo();
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
// Set up the TargetLowering object.
|
|
|
|
|
|
|
|
// X86 is weird, it always uses i8 for shift amounts and setcc results.
|
|
|
|
setShiftAmountType(MVT::i8);
|
|
|
|
setSetCCResultType(MVT::i8);
|
|
|
|
setSetCCResultContents(ZeroOrOneSetCCResult);
|
2006-01-25 17:15:17 +08:00
|
|
|
setSchedulingPreference(SchedulingForRegPressure);
|
2005-11-15 08:40:23 +08:00
|
|
|
setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
|
2006-09-08 14:48:29 +08:00
|
|
|
setStackPointerRegisterToSaveRestore(X86StackPtr);
|
2006-03-17 05:47:42 +08:00
|
|
|
|
2006-12-11 07:12:42 +08:00
|
|
|
if (Subtarget->isTargetDarwin()) {
|
2006-03-18 04:31:41 +08:00
|
|
|
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
|
2006-12-11 07:12:42 +08:00
|
|
|
setUseUnderscoreSetJmp(false);
|
|
|
|
setUseUnderscoreLongJmp(false);
|
2007-01-03 19:43:14 +08:00
|
|
|
} else if (Subtarget->isTargetMingw()) {
|
2006-12-11 07:12:42 +08:00
|
|
|
// MS runtime is weird: it exports _setjmp, but longjmp!
|
|
|
|
setUseUnderscoreSetJmp(true);
|
|
|
|
setUseUnderscoreLongJmp(false);
|
|
|
|
} else {
|
|
|
|
setUseUnderscoreSetJmp(true);
|
|
|
|
setUseUnderscoreLongJmp(true);
|
|
|
|
}
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
// Set up the register classes.
|
2006-05-16 15:21:53 +08:00
|
|
|
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
|
|
|
|
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
|
|
|
|
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-10-04 08:56:09 +08:00
|
|
|
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
|
|
|
|
// operation.
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
|
2006-01-17 10:32:49 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
|
2006-01-17 10:32:49 +08:00
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
|
2006-09-08 14:48:29 +08:00
|
|
|
} else {
|
2007-09-23 22:52:20 +08:00
|
|
|
if (X86ScalarSSEf64)
|
2006-09-08 14:48:29 +08:00
|
|
|
// If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
|
|
|
|
else
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
|
|
|
|
}
|
2005-11-15 08:40:23 +08:00
|
|
|
|
|
|
|
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
|
|
|
|
// this operation.
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
|
2006-02-17 08:03:04 +08:00
|
|
|
// SSE has no i16 to fp conversion, only i32
|
2007-09-23 22:52:20 +08:00
|
|
|
if (X86ScalarSSEf32) {
|
2006-01-31 06:13:22 +08:00
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
|
2007-09-15 06:26:36 +08:00
|
|
|
// f32 and f64 cases are Legal, f80 case is not
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
|
|
|
|
} else {
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
|
|
|
|
}
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2007-09-20 07:55:34 +08:00
|
|
|
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
|
|
|
|
// are Legal, f80 is custom lowered.
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
|
2006-01-30 16:02:57 +08:00
|
|
|
|
2006-01-31 06:13:22 +08:00
|
|
|
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
|
|
|
|
// this operation.
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
|
|
|
|
|
2007-09-23 22:52:20 +08:00
|
|
|
if (X86ScalarSSEf32) {
|
2006-01-31 06:13:22 +08:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
|
2007-09-15 06:26:36 +08:00
|
|
|
// f32 and f64 cases are Legal, f80 case is not
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
|
2006-01-31 06:13:22 +08:00
|
|
|
} else {
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
|
2006-01-31 06:13:22 +08:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Handle FP_TO_UINT by promoting the destination to a larger signed
|
|
|
|
// conversion.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
|
2006-09-08 14:48:29 +08:00
|
|
|
} else {
|
2007-09-23 22:52:20 +08:00
|
|
|
if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
|
2006-09-08 14:48:29 +08:00
|
|
|
// Expand FP_TO_UINT into a select.
|
|
|
|
// FIXME: We would like to use a Custom expander here eventually to do
|
|
|
|
// the optimal thing for SSE vs. the default expansion in the legalizer.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
|
|
|
|
else
|
|
|
|
// With SSE3 we can use fisttpll to convert to a signed i64.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
|
|
|
|
}
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-12-06 02:22:22 +08:00
|
|
|
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
|
2007-09-23 22:52:20 +08:00
|
|
|
if (!X86ScalarSSEf64) {
|
2006-12-06 02:45:06 +08:00
|
|
|
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
|
|
|
|
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
|
|
|
|
}
|
2005-12-23 13:15:23 +08:00
|
|
|
|
2007-10-09 02:33:35 +08:00
|
|
|
// Scalar integer multiply, multiply-high, divide, and remainder are
|
|
|
|
// lowered to use operations that produce two results, to match the
|
|
|
|
// available instructions. This exposes the two-result form to trivial
|
|
|
|
// CSE, which is able to combine x/y and x%y into a single instruction,
|
|
|
|
// for example. The single-result multiply instructions are introduced
|
|
|
|
// in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part
|
|
|
|
// is not needed.
|
|
|
|
setOperationAction(ISD::MUL , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::MUL , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::MUL , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::MUL , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i64 , Expand);
|
2007-09-26 02:23:27 +08:00
|
|
|
|
2006-10-30 16:02:39 +08:00
|
|
|
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
|
2006-02-01 15:19:44 +08:00
|
|
|
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit())
|
2007-08-11 05:48:46 +08:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
|
|
|
|
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
|
|
|
|
setOperationAction(ISD::FREM , MVT::f64 , Expand);
|
2007-11-16 09:31:51 +08:00
|
|
|
setOperationAction(ISD::FLT_ROUNDS , MVT::i32 , Custom);
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
|
|
|
|
}
|
|
|
|
|
2005-11-21 05:41:10 +08:00
|
|
|
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
|
2006-01-14 11:14:10 +08:00
|
|
|
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
|
2006-01-12 05:21:00 +08:00
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
// These should be promoted to a larger select which is supported.
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
|
2006-02-17 08:03:04 +08:00
|
|
|
// X86 wants to expand cmov itself.
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
|
2007-09-15 06:26:36 +08:00
|
|
|
setOperationAction(ISD::SELECT , MVT::f80 , Custom);
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
|
2007-09-15 06:26:36 +08:00
|
|
|
setOperationAction(ISD::SETCC , MVT::f80 , Custom);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
|
|
|
|
}
|
2006-02-17 08:03:04 +08:00
|
|
|
// X86 ret instruction may pop stack.
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::RET , MVT::Other, Custom);
|
2007-07-14 22:06:15 +08:00
|
|
|
if (!Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
|
|
|
|
|
2006-02-17 08:03:04 +08:00
|
|
|
// Darwin ABI issue.
|
2006-02-18 08:15:05 +08:00
|
|
|
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
|
2006-04-23 02:53:45 +08:00
|
|
|
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
|
2007-04-21 05:38:10 +08:00
|
|
|
setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
|
2006-02-24 04:41:18 +08:00
|
|
|
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
|
|
|
|
}
|
2006-02-17 08:03:04 +08:00
|
|
|
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
|
2006-02-17 08:03:04 +08:00
|
|
|
// X86 wants to expand memset / memcpy itself.
|
2006-02-17 15:01:52 +08:00
|
|
|
setOperationAction(ISD::MEMSET , MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2007-09-25 23:10:49 +08:00
|
|
|
// Use the default ISD::LOCATION expansion.
|
2005-11-29 14:16:21 +08:00
|
|
|
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
|
2006-03-07 10:02:57 +08:00
|
|
|
// FIXME - use subtarget debug flags
|
2006-10-31 16:31:24 +08:00
|
|
|
if (!Subtarget->isTargetDarwin() &&
|
|
|
|
!Subtarget->isTargetELF() &&
|
2007-01-03 19:43:14 +08:00
|
|
|
!Subtarget->isTargetCygMing())
|
2007-01-26 22:34:52 +08:00
|
|
|
setOperationAction(ISD::LABEL, MVT::Other, Expand);
|
2005-11-29 14:16:21 +08:00
|
|
|
|
2007-05-03 03:53:33 +08:00
|
|
|
setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
// FIXME: Verify
|
|
|
|
setExceptionPointerRegister(X86::RAX);
|
|
|
|
setExceptionSelectorRegister(X86::RDX);
|
|
|
|
} else {
|
|
|
|
setExceptionPointerRegister(X86::EAX);
|
|
|
|
setExceptionSelectorRegister(X86::EDX);
|
|
|
|
}
|
2007-09-03 08:36:06 +08:00
|
|
|
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
|
2007-05-03 03:53:33 +08:00
|
|
|
|
2007-09-11 22:10:23 +08:00
|
|
|
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
|
2007-07-28 04:02:49 +08:00
|
|
|
|
2006-01-26 02:21:52 +08:00
|
|
|
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
|
|
|
|
setOperationAction(ISD::VASTART , MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VAARG , MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VAEND , MVT::Other, Expand);
|
2007-03-03 07:16:35 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
|
|
|
|
else
|
|
|
|
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
|
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
2006-01-15 17:00:21 +08:00
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
|
2007-04-17 17:20:00 +08:00
|
|
|
if (Subtarget->isTargetCygMing())
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
|
|
|
|
else
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
2006-01-13 10:42:53 +08:00
|
|
|
|
2007-09-23 22:52:20 +08:00
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
// f32 and f64 use SSE.
|
2005-11-15 08:40:23 +08:00
|
|
|
// Set up the FP register classes.
|
2006-01-12 16:27:59 +08:00
|
|
|
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-02-01 06:28:30 +08:00
|
|
|
// Use ANDPD to simulate FABS.
|
|
|
|
setOperationAction(ISD::FABS , MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FABS , MVT::f32, Custom);
|
|
|
|
|
|
|
|
// Use XORP to simulate FNEG.
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f32, Custom);
|
|
|
|
|
2007-01-05 15:55:56 +08:00
|
|
|
// Use ANDPD and ORPD to simulate FCOPYSIGN.
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
|
|
|
|
2006-02-02 08:28:23 +08:00
|
|
|
// We don't support sin/cos/fmod
|
2005-11-15 08:40:23 +08:00
|
|
|
setOperationAction(ISD::FSIN , MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FREM , MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FREM , MVT::f32, Expand);
|
|
|
|
|
2006-01-29 14:26:08 +08:00
|
|
|
// Expand FP immediates into loads from the stack, except for the special
|
|
|
|
// cases we handle.
|
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
|
2007-09-23 22:52:20 +08:00
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // xorpd
|
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // xorps
|
2007-08-09 09:04:01 +08:00
|
|
|
|
|
|
|
// Conversions to long double (in X87) go through memory.
|
|
|
|
setConvertAction(MVT::f32, MVT::f80, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f80, Expand);
|
|
|
|
|
|
|
|
// Conversions from long double (in X87) go through memory.
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
2007-09-23 22:52:20 +08:00
|
|
|
} else if (X86ScalarSSEf32) {
|
|
|
|
// Use SSE for f32, x87 for f64.
|
|
|
|
// Set up the FP register classes.
|
|
|
|
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
|
|
|
|
|
|
|
|
// Use ANDPS to simulate FABS.
|
|
|
|
setOperationAction(ISD::FABS , MVT::f32, Custom);
|
|
|
|
|
|
|
|
// Use XORP to simulate FNEG.
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f32, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
|
|
|
|
|
|
|
|
// Use ANDPS and ORPS to simulate FCOPYSIGN.
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
|
|
|
|
|
|
|
// We don't support sin/cos/fmod
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FREM , MVT::f32, Expand);
|
|
|
|
|
|
|
|
// Expand FP immediates into loads from the stack, except for the special
|
|
|
|
// cases we handle.
|
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
|
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // xorps
|
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
|
|
|
|
|
|
|
|
// SSE->x87 conversions go through memory.
|
|
|
|
setConvertAction(MVT::f32, MVT::f64, Expand);
|
|
|
|
setConvertAction(MVT::f32, MVT::f80, Expand);
|
|
|
|
|
|
|
|
// x87->SSE truncations need to go through memory.
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f32, Expand);
|
|
|
|
// And x87->x87 truncations also.
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
|
|
|
|
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
|
|
|
|
}
|
2005-11-15 08:40:23 +08:00
|
|
|
} else {
|
2007-09-23 22:52:20 +08:00
|
|
|
// f32 and f64 in x87.
|
2005-11-15 08:40:23 +08:00
|
|
|
// Set up the FP register classes.
|
2007-07-03 08:53:03 +08:00
|
|
|
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
|
|
|
|
addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2007-01-05 15:55:56 +08:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
|
2007-07-03 08:53:03 +08:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
|
2007-01-05 15:55:56 +08:00
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
|
2007-08-09 09:04:01 +08:00
|
|
|
|
|
|
|
// Floating truncations need to go through memory.
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
|
|
|
|
}
|
|
|
|
|
2006-01-29 14:26:08 +08:00
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
|
2007-07-03 08:53:03 +08:00
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
|
2007-08-30 08:23:21 +08:00
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
|
2007-09-23 22:52:20 +08:00
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0f)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
2006-02-22 10:26:30 +08:00
|
|
|
|
2007-08-06 02:49:15 +08:00
|
|
|
// Long double always uses X87.
|
|
|
|
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
|
2007-09-15 06:26:36 +08:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
|
2007-09-27 05:10:55 +08:00
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f80 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f80 , Expand);
|
|
|
|
}
|
2007-08-06 02:49:15 +08:00
|
|
|
|
2007-10-12 07:21:31 +08:00
|
|
|
// Always use a library call for pow.
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f32 , Expand);
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f80 , Expand);
|
|
|
|
|
2006-03-01 09:11:20 +08:00
|
|
|
// First set operation action for all vector types to expand. Then we
|
|
|
|
// will selectively turn on ones that can be effectively codegen'd.
|
2007-05-19 02:44:07 +08:00
|
|
|
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
|
|
|
|
VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
|
2006-03-01 09:11:20 +08:00
|
|
|
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
|
2006-10-28 02:49:08 +08:00
|
|
|
setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
|
2007-06-29 08:18:15 +08:00
|
|
|
setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
|
2006-10-28 02:49:08 +08:00
|
|
|
setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
|
2006-03-01 09:11:20 +08:00
|
|
|
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
|
2006-10-28 02:49:08 +08:00
|
|
|
setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
|
2006-03-01 09:11:20 +08:00
|
|
|
setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
|
2006-04-01 03:22:53 +08:00
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
|
2006-03-22 04:51:05 +08:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
|
2006-04-01 03:22:53 +08:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
|
2007-07-10 08:05:58 +08:00
|
|
|
setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
|
2007-10-09 02:33:35 +08:00
|
|
|
setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
|
2007-10-12 07:21:31 +08:00
|
|
|
setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
|
2007-10-12 22:09:42 +08:00
|
|
|
setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
|
2006-03-01 09:11:20 +08:00
|
|
|
}
|
|
|
|
|
2006-03-23 03:22:18 +08:00
|
|
|
if (Subtarget->hasMMX()) {
|
2006-02-22 10:26:30 +08:00
|
|
|
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
|
|
|
|
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
|
|
|
|
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
llvm-svn: 35351
2007-03-26 15:53:08 +08:00
|
|
|
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
|
2006-02-22 10:26:30 +08:00
|
|
|
|
2006-03-01 09:11:20 +08:00
|
|
|
// FIXME: add MMX packed arithmetics
|
2007-03-09 06:09:11 +08:00
|
|
|
|
|
|
|
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v2i32, Legal);
|
2007-04-12 12:14:49 +08:00
|
|
|
setOperationAction(ISD::ADD, MVT::v1i64, Legal);
|
2007-03-09 06:09:11 +08:00
|
|
|
|
2007-03-10 17:57:05 +08:00
|
|
|
setOperationAction(ISD::SUB, MVT::v8i8, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v2i32, Legal);
|
2007-10-30 09:18:38 +08:00
|
|
|
setOperationAction(ISD::SUB, MVT::v1i64, Legal);
|
2007-03-10 17:57:05 +08:00
|
|
|
|
2007-03-16 05:24:36 +08:00
|
|
|
setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::MUL, MVT::v4i16, Legal);
|
|
|
|
|
2007-03-16 17:44:46 +08:00
|
|
|
setOperationAction(ISD::AND, MVT::v8i8, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
|
2007-03-16 17:44:46 +08:00
|
|
|
setOperationAction(ISD::AND, MVT::v4i16, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::AND, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::AND, MVT::v1i64, Legal);
|
2007-03-16 17:44:46 +08:00
|
|
|
|
|
|
|
setOperationAction(ISD::OR, MVT::v8i8, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
|
2007-03-16 17:44:46 +08:00
|
|
|
setOperationAction(ISD::OR, MVT::v4i16, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::OR, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::OR, MVT::v1i64, Legal);
|
2007-03-16 17:44:46 +08:00
|
|
|
|
|
|
|
setOperationAction(ISD::XOR, MVT::v8i8, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
|
2007-03-16 17:44:46 +08:00
|
|
|
setOperationAction(ISD::XOR, MVT::v4i16, Promote);
|
2007-03-26 16:03:33 +08:00
|
|
|
AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::XOR, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::XOR, MVT::v1i64, Legal);
|
2007-03-16 17:44:46 +08:00
|
|
|
|
2007-03-09 06:09:11 +08:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
llvm-svn: 35351
2007-03-26 15:53:08 +08:00
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
|
2007-03-09 06:09:11 +08:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
llvm-svn: 35351
2007-03-26 15:53:08 +08:00
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
|
2007-03-09 06:09:11 +08:00
|
|
|
|
2007-03-28 04:22:40 +08:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
|
2007-03-23 02:42:45 +08:00
|
|
|
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
|
2007-03-28 04:22:40 +08:00
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
|
2007-03-28 08:57:11 +08:00
|
|
|
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
|
2006-02-22 10:26:30 +08:00
|
|
|
}
|
|
|
|
|
2006-03-23 03:22:18 +08:00
|
|
|
if (Subtarget->hasSSE1()) {
|
2006-02-22 10:26:30 +08:00
|
|
|
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
|
|
|
|
|
2006-10-28 02:49:08 +08:00
|
|
|
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
|
2007-07-10 08:05:58 +08:00
|
|
|
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
|
2006-04-04 04:53:28 +08:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
|
2006-02-22 10:26:30 +08:00
|
|
|
}
|
|
|
|
|
2006-03-23 03:22:18 +08:00
|
|
|
if (Subtarget->hasSSE2()) {
|
2006-02-22 10:26:30 +08:00
|
|
|
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
|
|
|
|
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v4i32, Legal);
|
2007-03-13 06:58:52 +08:00
|
|
|
setOperationAction(ISD::ADD, MVT::v2i64, Legal);
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
|
2007-03-13 06:58:52 +08:00
|
|
|
setOperationAction(ISD::SUB, MVT::v2i64, Legal);
|
2006-04-13 13:10:25 +08:00
|
|
|
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
|
2006-10-28 02:49:08 +08:00
|
|
|
setOperationAction(ISD::FADD, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
|
2007-07-10 08:05:58 +08:00
|
|
|
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
|
2006-04-13 05:21:57 +08:00
|
|
|
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
|
2006-04-13 05:21:57 +08:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
|
2006-04-18 06:04:06 +08:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
|
|
|
|
// Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
|
2006-04-13 05:21:57 +08:00
|
|
|
|
|
|
|
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
|
|
|
|
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
|
2007-12-11 09:41:33 +08:00
|
|
|
// Do not attempt to custom lower non-power-of-2 vectors
|
|
|
|
if (!isPowerOf2_32(MVT::getVectorNumElements(VT)))
|
|
|
|
continue;
|
2006-04-13 05:21:57 +08:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
|
|
|
|
}
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
|
2006-04-04 04:53:28 +08:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
|
2007-10-31 08:32:36 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
|
2006-04-13 05:21:57 +08:00
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
|
2006-04-13 05:21:57 +08:00
|
|
|
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
|
|
|
|
setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
|
2006-04-13 01:12:36 +08:00
|
|
|
setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
|
2006-04-13 05:21:57 +08:00
|
|
|
setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
|
2006-04-10 15:23:14 +08:00
|
|
|
}
|
2006-04-13 05:21:57 +08:00
|
|
|
|
|
|
|
// Custom lower v2i64 and v2f64 selects.
|
|
|
|
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
|
2006-04-13 01:12:36 +08:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
|
2006-04-10 15:23:14 +08:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
|
2006-04-13 05:21:57 +08:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
|
2006-02-22 10:26:30 +08:00
|
|
|
}
|
|
|
|
|
2006-04-06 07:38:46 +08:00
|
|
|
// We want to custom lower some of our intrinsics.
|
|
|
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
|
|
|
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
// We have target-specific dag combine patterns for the following nodes:
|
|
|
|
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
|
2006-10-04 14:57:07 +08:00
|
|
|
setTargetDAGCombine(ISD::SELECT);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
computeRegisterProperties();
|
|
|
|
|
2006-02-14 16:25:08 +08:00
|
|
|
// FIXME: These should be based on subtarget info. Plus, the values should
|
|
|
|
// be smaller when we are in optimizing for size mode.
|
2006-02-14 16:38:30 +08:00
|
|
|
maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
|
|
|
|
maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
|
|
|
|
maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
|
2005-11-15 08:40:23 +08:00
|
|
|
allowUnalignedMemoryAccesses = true; // x86 supports it!
|
|
|
|
}
|
|
|
|
|
2007-02-25 16:29:00 +08:00
|
|
|
|
Much improved pic jumptable codegen:
Then:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
imull $4, %ecx, %ecx
leal LJTI1_0-"L1$pb"(%eax), %edx
addl LJTI1_0-"L1$pb"(%ecx,%eax), %edx
jmpl *%edx
.align 2
.set L1_0_set_3,LBB1_3-LJTI1_0
.set L1_0_set_2,LBB1_2-LJTI1_0
.set L1_0_set_5,LBB1_5-LJTI1_0
.set L1_0_set_4,LBB1_4-LJTI1_0
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
Now:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
addl LJTI1_0-"L1$pb"(%eax,%ecx,4), %eax
jmpl *%eax
.align 2
.set L1_0_set_3,LBB1_3-"L1$pb"
.set L1_0_set_2,LBB1_2-"L1$pb"
.set L1_0_set_5,LBB1_5-"L1$pb"
.set L1_0_set_4,LBB1_4-"L1$pb"
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
llvm-svn: 43924
2007-11-09 09:32:10 +08:00
|
|
|
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
|
|
|
|
/// jumptable.
|
|
|
|
SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
if (usesGlobalOffsetTable())
|
|
|
|
return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
|
|
|
|
if (!Subtarget->isPICStyleRIPRel())
|
|
|
|
return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy());
|
|
|
|
return Table;
|
|
|
|
}
|
|
|
|
|
2007-02-25 16:29:00 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Return Value Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-02-28 12:55:35 +08:00
|
|
|
#include "X86GenCallingConv.inc"
|
2007-10-12 03:40:01 +08:00
|
|
|
|
|
|
|
/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it
|
|
|
|
/// exists skip possible ISD:TokenFactor.
|
|
|
|
static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) {
|
|
|
|
if (Chain.getOpcode()==X86ISD::TAILCALL) {
|
|
|
|
return Chain;
|
|
|
|
} else if (Chain.getOpcode()==ISD::TokenFactor) {
|
|
|
|
if (Chain.getNumOperands() &&
|
|
|
|
Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL)
|
|
|
|
return Chain.getOperand(0);
|
|
|
|
}
|
|
|
|
return Chain;
|
|
|
|
}
|
2007-02-27 13:28:59 +08:00
|
|
|
|
2007-02-25 17:12:39 +08:00
|
|
|
/// LowerRET - Lower an ISD::RET node.
|
|
|
|
SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
|
|
|
|
|
2007-02-27 13:28:59 +08:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
2007-06-19 08:13:10 +08:00
|
|
|
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
|
2007-02-28 15:09:55 +08:00
|
|
|
CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
|
2007-10-12 03:40:01 +08:00
|
|
|
|
2007-02-25 17:12:39 +08:00
|
|
|
// If this is the first return lowered for this function, add the regs to the
|
|
|
|
// liveout set for the function.
|
|
|
|
if (DAG.getMachineFunction().liveout_empty()) {
|
2007-02-27 13:28:59 +08:00
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
|
|
|
if (RVLocs[i].isRegLoc())
|
|
|
|
DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
|
2007-02-25 17:12:39 +08:00
|
|
|
}
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
// Handle tail call return.
|
|
|
|
Chain = GetPossiblePreceedingTailCall(Chain);
|
|
|
|
if (Chain.getOpcode() == X86ISD::TAILCALL) {
|
|
|
|
SDOperand TailCall = Chain;
|
|
|
|
SDOperand TargetAddress = TailCall.getOperand(1);
|
|
|
|
SDOperand StackAdjustment = TailCall.getOperand(2);
|
|
|
|
assert ( ((TargetAddress.getOpcode() == ISD::Register &&
|
|
|
|
(cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX ||
|
|
|
|
cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
|
|
|
|
TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
|
|
|
|
TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
|
|
|
|
"Expecting an global address, external symbol, or register");
|
|
|
|
assert( StackAdjustment.getOpcode() == ISD::Constant &&
|
|
|
|
"Expecting a const value");
|
|
|
|
|
|
|
|
SmallVector<SDOperand,8> Operands;
|
|
|
|
Operands.push_back(Chain.getOperand(0));
|
|
|
|
Operands.push_back(TargetAddress);
|
|
|
|
Operands.push_back(StackAdjustment);
|
|
|
|
// Copy registers used by the call. Last operand is a flag so it is not
|
|
|
|
// copied.
|
2007-10-16 17:05:00 +08:00
|
|
|
for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
|
2007-10-12 03:40:01 +08:00
|
|
|
Operands.push_back(Chain.getOperand(i));
|
|
|
|
}
|
2007-10-16 17:05:00 +08:00
|
|
|
return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0],
|
|
|
|
Operands.size());
|
2007-10-12 03:40:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Regular return.
|
|
|
|
SDOperand Flag;
|
|
|
|
|
2007-02-25 17:12:39 +08:00
|
|
|
// Copy the result values into the output registers.
|
2007-02-27 13:28:59 +08:00
|
|
|
if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
|
|
|
|
RVLocs[0].getLocReg() != X86::ST0) {
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
assert(VA.isRegLoc() && "Can only return in registers!");
|
|
|
|
Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1),
|
|
|
|
Flag);
|
2007-02-25 17:12:39 +08:00
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We need to handle a destination of ST0 specially, because it isn't really
|
|
|
|
// a register.
|
|
|
|
SDOperand Value = Op.getOperand(1);
|
|
|
|
|
|
|
|
// If this is an FP return with ScalarSSE, we need to move the value from
|
|
|
|
// an XMM register onto the fp-stack.
|
2007-09-23 22:52:20 +08:00
|
|
|
if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) ||
|
|
|
|
(X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) {
|
2007-02-25 17:12:39 +08:00
|
|
|
SDOperand MemLoc;
|
2007-10-12 03:40:01 +08:00
|
|
|
|
2007-02-25 17:12:39 +08:00
|
|
|
// If this is a load into a scalarsse value, don't store the loaded value
|
|
|
|
// back to the stack, only to reload it: just replace the scalar-sse load.
|
|
|
|
if (ISD::isNON_EXTLoad(Value.Val) &&
|
|
|
|
(Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
|
|
|
|
Chain = Value.getOperand(0);
|
|
|
|
MemLoc = Value.getOperand(1);
|
|
|
|
} else {
|
|
|
|
// Spill the value to memory and reload it into top of stack.
|
2007-02-27 13:28:59 +08:00
|
|
|
unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8;
|
2007-02-25 17:12:39 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
|
|
|
|
MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
|
|
|
|
Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
|
|
|
|
}
|
2007-07-03 08:53:03 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other);
|
2007-02-27 13:28:59 +08:00
|
|
|
SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())};
|
2007-02-25 17:12:39 +08:00
|
|
|
Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
|
|
|
|
Chain = Value.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
SDOperand Ops[] = { Chain, Value };
|
|
|
|
Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
|
|
|
|
if (Flag.Val)
|
|
|
|
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
|
|
|
|
else
|
|
|
|
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-25 16:59:22 +08:00
|
|
|
/// LowerCallResult - Lower the result values of an ISD::CALL into the
|
|
|
|
/// appropriate copies out of appropriate physical registers. This assumes that
|
|
|
|
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
|
|
|
|
/// being lowered. The returns a SDNode with the same number of values as the
|
|
|
|
/// ISD::CALL.
|
|
|
|
SDNode *X86TargetLowering::
|
|
|
|
LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall,
|
|
|
|
unsigned CallingConv, SelectionDAG &DAG) {
|
2007-02-28 15:09:55 +08:00
|
|
|
|
|
|
|
// Assign locations to each value returned by this call.
|
2007-02-27 13:28:59 +08:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
2007-06-19 08:13:10 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
|
|
|
|
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
|
2007-02-28 15:09:55 +08:00
|
|
|
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
|
|
|
|
|
2007-02-25 16:59:22 +08:00
|
|
|
|
2007-02-28 15:09:55 +08:00
|
|
|
SmallVector<SDOperand, 8> ResultVals;
|
2007-02-25 16:59:22 +08:00
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
2007-02-27 13:28:59 +08:00
|
|
|
if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) {
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
|
|
|
|
RVLocs[i].getValVT(), InFlag).getValue(1);
|
2007-02-25 16:59:22 +08:00
|
|
|
InFlag = Chain.getValue(2);
|
|
|
|
ResultVals.push_back(Chain.getValue(0));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Copies from the FP stack are special, as ST0 isn't a valid register
|
|
|
|
// before the fp stackifier runs.
|
|
|
|
|
|
|
|
// Copy ST0 into an RFP register with FP_GET_RESULT.
|
2007-07-03 08:53:03 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag);
|
2007-02-25 16:59:22 +08:00
|
|
|
SDOperand GROps[] = { Chain, InFlag };
|
|
|
|
SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
|
|
|
|
Chain = RetVal.getValue(1);
|
|
|
|
InFlag = RetVal.getValue(2);
|
2007-02-25 16:29:00 +08:00
|
|
|
|
2007-02-25 16:59:22 +08:00
|
|
|
// If we are using ScalarSSE, store ST(0) to the stack and reload it into
|
|
|
|
// an XMM register.
|
2007-09-23 22:52:20 +08:00
|
|
|
if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) ||
|
|
|
|
(X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) {
|
2007-02-25 16:59:22 +08:00
|
|
|
// FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
|
|
|
|
// shouldn't be necessary except that RFP cannot be live across
|
|
|
|
// multiple blocks. When stackifier is fixed, they can be uncoupled.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
|
|
|
|
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
|
|
|
SDOperand Ops[] = {
|
2007-02-27 13:28:59 +08:00
|
|
|
Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag
|
2007-02-25 16:59:22 +08:00
|
|
|
};
|
|
|
|
Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5);
|
2007-02-27 13:28:59 +08:00
|
|
|
RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0);
|
2007-02-25 16:59:22 +08:00
|
|
|
Chain = RetVal.getValue(1);
|
|
|
|
}
|
|
|
|
ResultVals.push_back(RetVal);
|
2007-02-25 16:29:00 +08:00
|
|
|
}
|
2007-02-25 16:59:22 +08:00
|
|
|
|
|
|
|
// Merge everything together with a MERGE_VALUES node.
|
|
|
|
ResultVals.push_back(Chain);
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(),
|
|
|
|
&ResultVals[0], ResultVals.size()).Val;
|
2007-02-25 16:29:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-10-12 03:40:01 +08:00
|
|
|
// C & StdCall & Fast Calling Convention implementation
|
2005-11-15 08:40:23 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-01-28 21:31:35 +08:00
|
|
|
// StdCall calling convention seems to be standard for many Windows' API
|
|
|
|
// routines and around. It differs from C calling convention just a little:
|
|
|
|
// callee should clean up the stack, not caller. Symbols should be also
|
|
|
|
// decorated in some fancy way :) It doesn't support any vector arguments.
|
2007-10-12 03:40:01 +08:00
|
|
|
// For info on fast calling convention see Fast Calling Convention (tail call)
|
|
|
|
// implementation LowerX86_32FastCCCallTo.
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-04-27 13:35:28 +08:00
|
|
|
/// AddLiveIn - This helper function adds the specified physical register to the
|
|
|
|
/// MachineFunction as a live in value. It also creates a corresponding virtual
|
|
|
|
/// register for it.
|
|
|
|
static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
|
2007-01-28 21:31:35 +08:00
|
|
|
const TargetRegisterClass *RC) {
|
2006-04-27 13:35:28 +08:00
|
|
|
assert(RC->contains(PReg) && "Not the correct regclass!");
|
|
|
|
unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
|
|
|
|
MF.addLiveIn(PReg, VReg);
|
|
|
|
return VReg;
|
|
|
|
}
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
// align stack arguments according to platform alignment needed for tail calls
|
|
|
|
unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG);
|
|
|
|
|
2007-09-14 23:48:13 +08:00
|
|
|
SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
const CCValAssign &VA,
|
|
|
|
MachineFrameInfo *MFI,
|
|
|
|
SDOperand Root, unsigned i) {
|
|
|
|
// Create the nodes corresponding to a load from this parameter slot.
|
|
|
|
int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
|
|
|
|
VA.getLocMemOffset());
|
|
|
|
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
|
|
|
|
|
|
|
|
unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
|
|
|
|
|
|
|
|
if (Flags & ISD::ParamFlags::ByVal)
|
|
|
|
return FIN;
|
|
|
|
else
|
|
|
|
return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2007-01-28 21:31:35 +08:00
|
|
|
SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
bool isStdCall) {
|
2006-05-24 05:06:34 +08:00
|
|
|
unsigned NumArgs = Op.Val->getNumValues() - 1;
|
2006-04-26 09:20:17 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
2006-05-24 05:06:34 +08:00
|
|
|
SDOperand Root = Op.getOperand(0);
|
2007-01-28 21:31:35 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2007-10-12 03:40:01 +08:00
|
|
|
unsigned CC = MF.getFunction()->getCallingConv();
|
2007-02-28 15:00:42 +08:00
|
|
|
// Assign locations to all of the incoming arguments.
|
2007-02-28 13:46:49 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-10-12 03:40:01 +08:00
|
|
|
CCState CCInfo(CC, isVarArg,
|
2007-06-19 08:13:10 +08:00
|
|
|
getTargetMachine(), ArgLocs);
|
2007-10-12 03:40:01 +08:00
|
|
|
// Check for possible tail call calling convention.
|
|
|
|
if (CC == CallingConv::Fast && PerformTailCallOpt)
|
|
|
|
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_TailCall);
|
|
|
|
else
|
|
|
|
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
|
|
|
|
|
2007-02-28 13:46:49 +08:00
|
|
|
SmallVector<SDOperand, 8> ArgValues;
|
|
|
|
unsigned LastVal = ~0U;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
|
|
|
|
// places.
|
|
|
|
assert(VA.getValNo() != LastVal &&
|
|
|
|
"Don't support value assigned to multiple locs yet");
|
|
|
|
LastVal = VA.getValNo();
|
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
MVT::ValueType RegVT = VA.getLocVT();
|
|
|
|
TargetRegisterClass *RC;
|
|
|
|
if (RegVT == MVT::i32)
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
else {
|
|
|
|
assert(MVT::isVector(RegVT));
|
|
|
|
RC = X86::VR128RegisterClass;
|
2007-01-28 21:31:35 +08:00
|
|
|
}
|
|
|
|
|
2007-03-02 13:12:29 +08:00
|
|
|
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
|
|
|
|
SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
|
2007-02-28 13:46:49 +08:00
|
|
|
|
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted to 32
|
|
|
|
// bits. Insert an assert[sz]ext to capture this, then truncate to the
|
|
|
|
// right size.
|
|
|
|
if (VA.getLocInfo() == CCValAssign::SExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
else if (VA.getLocInfo() == CCValAssign::ZExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
|
|
|
|
if (VA.getLocInfo() != CCValAssign::Full)
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
|
|
|
|
|
|
|
|
ArgValues.push_back(ArgValue);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
2007-09-14 23:48:13 +08:00
|
|
|
ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
|
2006-04-26 09:20:17 +08:00
|
|
|
}
|
|
|
|
}
|
2007-02-28 13:46:49 +08:00
|
|
|
|
|
|
|
unsigned StackSize = CCInfo.getNextStackOffset();
|
2007-10-12 03:40:01 +08:00
|
|
|
// align stack specially for tail calls
|
|
|
|
if (CC==CallingConv::Fast)
|
|
|
|
StackSize = GetAlignedArgumentStackSize(StackSize,DAG);
|
2006-04-26 09:20:17 +08:00
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
ArgValues.push_back(Root);
|
|
|
|
|
2006-04-26 09:20:17 +08:00
|
|
|
// If the function takes variable number of arguments, make a frame index for
|
|
|
|
// the start of the first vararg value... for expansion of llvm.va_start.
|
2006-05-24 05:08:24 +08:00
|
|
|
if (isVarArg)
|
2007-02-28 13:46:49 +08:00
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
|
2007-01-28 21:31:35 +08:00
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
// Tail call calling convention (CallingConv::Fast) does not support varargs.
|
|
|
|
assert( !(isVarArg && CC == CallingConv::Fast) &&
|
|
|
|
"CallingConv::Fast does not support varargs.");
|
|
|
|
|
|
|
|
if (isStdCall && !isVarArg &&
|
|
|
|
(CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) {
|
2007-02-28 13:46:49 +08:00
|
|
|
BytesToPopOnReturn = StackSize; // Callee pops everything..
|
2007-01-28 21:31:35 +08:00
|
|
|
BytesCallerReserves = 0;
|
|
|
|
} else {
|
2007-03-06 16:12:33 +08:00
|
|
|
BytesToPopOnReturn = 0; // Callee pops nothing.
|
2007-02-28 13:46:49 +08:00
|
|
|
|
|
|
|
// If this is an sret function, the return should pop the hidden pointer.
|
2007-03-06 16:12:33 +08:00
|
|
|
if (NumArgs &&
|
|
|
|
(cast<ConstantSDNode>(Op.getOperand(3))->getValue() &
|
2007-03-08 00:25:09 +08:00
|
|
|
ISD::ParamFlags::StructReturn))
|
2007-02-28 13:46:49 +08:00
|
|
|
BytesToPopOnReturn = 4;
|
|
|
|
|
|
|
|
BytesCallerReserves = StackSize;
|
2007-01-28 21:31:35 +08:00
|
|
|
}
|
2007-08-16 01:12:32 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
|
2006-05-24 05:06:34 +08:00
|
|
|
|
2007-08-16 01:12:32 +08:00
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
// Return the new list of results.
|
2007-02-25 15:10:00 +08:00
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
|
2007-02-26 15:50:02 +08:00
|
|
|
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
2007-01-28 21:31:35 +08:00
|
|
|
SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
|
2007-02-25 17:06:15 +08:00
|
|
|
unsigned CC) {
|
2006-05-25 08:59:30 +08:00
|
|
|
SDOperand Chain = Op.getOperand(0);
|
2007-01-28 21:31:35 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2006-05-25 08:59:30 +08:00
|
|
|
SDOperand Callee = Op.getOperand(4);
|
|
|
|
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
|
2007-10-12 03:40:01 +08:00
|
|
|
|
2007-02-28 15:00:42 +08:00
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
2007-02-28 13:31:48 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-06-19 08:13:10 +08:00
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
2007-10-12 03:40:01 +08:00
|
|
|
if(CC==CallingConv::Fast && PerformTailCallOpt)
|
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
|
|
|
|
else
|
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
|
2007-02-28 13:31:48 +08:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2007-10-12 03:40:01 +08:00
|
|
|
if (CC==CallingConv::Fast)
|
|
|
|
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-05-25 08:59:30 +08:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
|
|
|
|
SmallVector<SDOperand, 8> MemOpChains;
|
2007-01-28 21:31:35 +08:00
|
|
|
|
2007-02-28 13:31:48 +08:00
|
|
|
SDOperand StackPtr;
|
2007-01-28 21:31:35 +08:00
|
|
|
|
2007-02-28 13:31:48 +08:00
|
|
|
// Walk the register/memloc assignments, inserting copies/loads.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
|
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default: assert(0 && "Unknown loc info!");
|
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
2006-05-25 08:59:30 +08:00
|
|
|
}
|
2007-02-28 13:31:48 +08:00
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.Val == 0)
|
|
|
|
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
|
2007-09-21 23:50:22 +08:00
|
|
|
|
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
|
|
|
|
Arg));
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-28 13:39:26 +08:00
|
|
|
// If the first argument is an sret pointer, remember it.
|
2007-03-06 16:12:33 +08:00
|
|
|
bool isSRet = NumOps &&
|
|
|
|
(cast<ConstantSDNode>(Op.getOperand(6))->getValue() &
|
2007-03-08 00:25:09 +08:00
|
|
|
ISD::ParamFlags::StructReturn);
|
2007-02-28 13:39:26 +08:00
|
|
|
|
2006-05-25 08:59:30 +08:00
|
|
|
if (!MemOpChains.empty())
|
2006-08-08 10:23:42 +08:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains[0], MemOpChains.size());
|
2005-11-15 08:40:23 +08:00
|
|
|
|
2006-04-29 05:29:37 +08:00
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into registers.
|
|
|
|
SDOperand InFlag;
|
2006-05-25 08:59:30 +08:00
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
2006-04-29 05:29:37 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2007-02-22 05:18:14 +08:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
|
|
|
// GOT pointer.
|
2007-01-23 05:34:25 +08:00
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT()) {
|
2007-01-13 03:20:47 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EBX,
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2006-05-25 08:59:30 +08:00
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
2006-11-20 18:46:14 +08:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
2006-12-23 06:29:05 +08:00
|
|
|
// We should use extra load for direct calls to dllimported functions in
|
|
|
|
// non-JIT mode.
|
|
|
|
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
|
|
|
|
getTargetMachine(), true))
|
2006-11-20 18:46:14 +08:00
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
|
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
2006-05-25 08:59:30 +08:00
|
|
|
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
|
|
|
|
|
2007-02-25 14:40:16 +08:00
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-02-17 08:03:04 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
2006-06-15 02:17:40 +08:00
|
|
|
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
2006-11-21 08:01:06 +08:00
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
2006-06-15 02:17:40 +08:00
|
|
|
RegsToPass[i].second.getValueType()));
|
2007-02-22 05:18:14 +08:00
|
|
|
|
|
|
|
// Add an implicit use GOT pointer in EBX.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT())
|
|
|
|
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
|
2007-01-13 03:20:47 +08:00
|
|
|
|
2006-04-29 05:29:37 +08:00
|
|
|
if (InFlag.Val)
|
|
|
|
Ops.push_back(InFlag);
|
2007-10-12 03:40:01 +08:00
|
|
|
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
|
2006-04-29 05:29:37 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-02-17 08:03:04 +08:00
|
|
|
|
Implement an annoying part of the Darwin/X86 abi: the callee of a struct
return argument pops the hidden struct pointer if present, not the caller.
For example, in this testcase:
struct X { int D, E, F, G; };
struct X bar() {
struct X a;
a.D = 0;
a.E = 1;
a.F = 2;
a.G = 3;
return a;
}
void foo(struct X *P) {
*P = bar();
}
We used to emit:
_foo:
subl $28, %esp
movl 32(%esp), %eax
movl %eax, (%esp)
call _bar
addl $28, %esp
ret
_bar:
movl 4(%esp), %eax
movl $0, (%eax)
movl $1, 4(%eax)
movl $2, 8(%eax)
movl $3, 12(%eax)
ret
This is correct on Linux/X86 but not Darwin/X86. With this patch, we now
emit:
_foo:
subl $28, %esp
movl 32(%esp), %eax
movl %eax, (%esp)
call _bar
*** addl $24, %esp
ret
_bar:
movl 4(%esp), %eax
movl $0, (%eax)
movl $1, 4(%eax)
movl $2, 8(%eax)
movl $3, 12(%eax)
*** ret $4
For the record, GCC emits (which is functionally equivalent to our new code):
_bar:
movl 4(%esp), %eax
movl $3, 12(%eax)
movl $2, 8(%eax)
movl $1, 4(%eax)
movl $0, (%eax)
ret $4
_foo:
pushl %esi
subl $40, %esp
movl 48(%esp), %esi
leal 16(%esp), %eax
movl %eax, (%esp)
call _bar
subl $4, %esp
movl 16(%esp), %eax
movl %eax, (%esi)
movl 20(%esp), %eax
movl %eax, 4(%esi)
movl 24(%esp), %eax
movl %eax, 8(%esi)
movl 28(%esp), %eax
movl %eax, 12(%esi)
addl $40, %esp
popl %esi
ret
This fixes SingleSource/Benchmarks/CoyoteBench/fftbench with LLC and the
JIT, and fixes the X86-backend portion of PR729. The CBE still needs to
be updated.
llvm-svn: 28438
2006-05-24 02:50:38 +08:00
|
|
|
// Create the CALLSEQ_END node.
|
|
|
|
unsigned NumBytesForCalleeToPush = 0;
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
if (CC == CallingConv::X86_StdCall ||
|
|
|
|
(CC == CallingConv::Fast && PerformTailCallOpt)) {
|
2007-02-25 17:06:15 +08:00
|
|
|
if (isVarArg)
|
2007-02-28 13:39:26 +08:00
|
|
|
NumBytesForCalleeToPush = isSRet ? 4 : 0;
|
2007-02-25 17:06:15 +08:00
|
|
|
else
|
2007-01-28 21:31:35 +08:00
|
|
|
NumBytesForCalleeToPush = NumBytes;
|
2007-10-12 03:40:01 +08:00
|
|
|
assert(!(isVarArg && CC==CallingConv::Fast) &&
|
|
|
|
"CallingConv::Fast does not support varargs.");
|
2007-01-28 21:31:35 +08:00
|
|
|
} else {
|
|
|
|
// If this is is a call to a struct-return function, the callee
|
|
|
|
// pops the hidden struct pointer, so we have to push it back.
|
|
|
|
// This is common for Darwin/X86, Linux & Mingw32 targets.
|
2007-02-28 13:39:26 +08:00
|
|
|
NumBytesForCalleeToPush = isSRet ? 4 : 0;
|
2007-01-28 21:31:35 +08:00
|
|
|
}
|
2007-11-13 08:44:25 +08:00
|
|
|
|
|
|
|
Chain = DAG.getCALLSEQ_END(Chain,
|
|
|
|
DAG.getConstant(NumBytes, getPointerTy()),
|
|
|
|
DAG.getConstant(NumBytesForCalleeToPush,
|
|
|
|
getPointerTy()),
|
|
|
|
InFlag);
|
2007-02-25 16:59:22 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-02-17 08:03:04 +08:00
|
|
|
|
2007-02-25 16:59:22 +08:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
2007-02-25 17:06:15 +08:00
|
|
|
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2007-02-28 14:10:12 +08:00
|
|
|
// FastCall Calling Convention implementation
|
2006-09-08 14:48:29 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-02-28 14:10:12 +08:00
|
|
|
//
|
|
|
|
// The X86 'fastcall' calling convention passes up to two integer arguments in
|
|
|
|
// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
|
|
|
|
// and requires that the callee pop its arguments off the stack (allowing proper
|
|
|
|
// tail calls), and has the same return value conventions as C calling convs.
|
|
|
|
//
|
|
|
|
// This calling convention always arranges for the callee pop value to be 8n+4
|
|
|
|
// bytes, which is needed for tail recursion elimination and stack alignment
|
|
|
|
// reasons.
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand
|
2007-02-28 14:10:12 +08:00
|
|
|
X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
|
2006-09-08 14:48:29 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
SDOperand Root = Op.getOperand(0);
|
2007-06-19 08:13:10 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2007-02-28 15:00:42 +08:00
|
|
|
// Assign locations to all of the incoming arguments.
|
2007-02-28 14:21:19 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-06-19 08:13:10 +08:00
|
|
|
CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
|
|
|
|
getTargetMachine(), ArgLocs);
|
2007-02-28 15:00:42 +08:00
|
|
|
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
|
2007-02-28 14:21:19 +08:00
|
|
|
|
|
|
|
SmallVector<SDOperand, 8> ArgValues;
|
|
|
|
unsigned LastVal = ~0U;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
|
|
|
|
// places.
|
|
|
|
assert(VA.getValNo() != LastVal &&
|
|
|
|
"Don't support value assigned to multiple locs yet");
|
|
|
|
LastVal = VA.getValNo();
|
2007-02-27 12:18:15 +08:00
|
|
|
|
2007-02-28 14:21:19 +08:00
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
MVT::ValueType RegVT = VA.getLocVT();
|
|
|
|
TargetRegisterClass *RC;
|
|
|
|
if (RegVT == MVT::i32)
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
else {
|
|
|
|
assert(MVT::isVector(RegVT));
|
|
|
|
RC = X86::VR128RegisterClass;
|
2007-02-28 14:10:12 +08:00
|
|
|
}
|
|
|
|
|
2007-03-02 13:12:29 +08:00
|
|
|
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
|
|
|
|
SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
|
2007-02-28 14:21:19 +08:00
|
|
|
|
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted to 32
|
|
|
|
// bits. Insert an assert[sz]ext to capture this, then truncate to the
|
|
|
|
// right size.
|
|
|
|
if (VA.getLocInfo() == CCValAssign::SExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
else if (VA.getLocInfo() == CCValAssign::ZExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
|
|
|
|
if (VA.getLocInfo() != CCValAssign::Full)
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
|
|
|
|
|
|
|
|
ArgValues.push_back(ArgValue);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
2007-09-21 22:55:38 +08:00
|
|
|
ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
|
|
|
}
|
2007-02-28 14:21:19 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
ArgValues.push_back(Root);
|
|
|
|
|
2007-02-28 14:21:19 +08:00
|
|
|
unsigned StackSize = CCInfo.getNextStackOffset();
|
2007-03-02 00:29:22 +08:00
|
|
|
|
2007-03-03 05:50:27 +08:00
|
|
|
if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
|
2007-03-02 00:29:22 +08:00
|
|
|
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
|
2007-10-12 03:40:01 +08:00
|
|
|
// arguments and the arguments after the retaddr has been pushed are
|
|
|
|
// aligned.
|
2007-03-02 00:29:22 +08:00
|
|
|
if ((StackSize & 7) == 0)
|
|
|
|
StackSize += 4;
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
|
|
|
|
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
|
2007-02-28 14:21:19 +08:00
|
|
|
BytesToPopOnReturn = StackSize; // Callee pops all stack arguments.
|
2007-02-28 14:10:12 +08:00
|
|
|
BytesCallerReserves = 0;
|
|
|
|
|
2007-08-16 01:12:32 +08:00
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
// Return the new list of results.
|
2007-02-25 15:10:00 +08:00
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
|
2007-02-26 15:50:02 +08:00
|
|
|
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
|
|
|
|
2007-08-31 23:06:30 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
const SDOperand &StackPtr,
|
|
|
|
const CCValAssign &VA,
|
|
|
|
SDOperand Chain,
|
|
|
|
SDOperand Arg) {
|
|
|
|
SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
|
|
|
|
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
|
|
|
|
SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
|
|
|
|
unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
|
|
|
|
if (Flags & ISD::ParamFlags::ByVal) {
|
|
|
|
unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
|
|
|
|
ISD::ParamFlags::ByValAlignOffs);
|
|
|
|
|
|
|
|
unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
|
|
|
|
ISD::ParamFlags::ByValSizeOffs;
|
|
|
|
|
|
|
|
SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
|
|
|
|
SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
|
2007-11-24 15:07:01 +08:00
|
|
|
SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
|
2007-08-31 23:06:30 +08:00
|
|
|
|
2007-10-19 18:41:11 +08:00
|
|
|
return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode,
|
|
|
|
AlwaysInline);
|
2007-08-31 23:06:30 +08:00
|
|
|
} else {
|
|
|
|
return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
unsigned CC) {
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
|
2007-06-19 08:13:10 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand Callee = Op.getOperand(4);
|
|
|
|
|
2007-02-28 15:00:42 +08:00
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
2007-02-28 14:26:33 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-06-19 08:13:10 +08:00
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
2007-02-28 15:00:42 +08:00
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall);
|
2007-02-28 14:26:33 +08:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2007-03-03 05:50:27 +08:00
|
|
|
if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
|
2007-03-02 00:29:22 +08:00
|
|
|
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
|
2007-10-12 03:40:01 +08:00
|
|
|
// arguments and the arguments after the retaddr has been pushed are
|
|
|
|
// aligned.
|
2007-03-02 00:29:22 +08:00
|
|
|
if ((NumBytes & 7) == 0)
|
|
|
|
NumBytes += 4;
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
|
2007-02-28 14:26:33 +08:00
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
|
|
|
|
SmallVector<SDOperand, 8> MemOpChains;
|
2007-02-28 14:26:33 +08:00
|
|
|
|
|
|
|
SDOperand StackPtr;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
|
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default: assert(0 && "Unknown loc info!");
|
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
|
2007-02-28 14:10:12 +08:00
|
|
|
break;
|
2007-02-27 12:18:15 +08:00
|
|
|
}
|
2007-02-28 14:26:33 +08:00
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.Val == 0)
|
|
|
|
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
|
2007-09-21 23:50:22 +08:00
|
|
|
|
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
|
|
|
|
Arg));
|
2007-02-27 12:18:15 +08:00
|
|
|
}
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (!MemOpChains.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into registers.
|
|
|
|
SDOperand InFlag;
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
2006-11-20 18:46:14 +08:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
2006-12-23 06:29:05 +08:00
|
|
|
// We should use extra load for direct calls to dllimported functions in
|
|
|
|
// non-JIT mode.
|
|
|
|
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
|
|
|
|
getTargetMachine(), true))
|
2006-11-20 18:46:14 +08:00
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
|
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
2006-09-08 14:48:29 +08:00
|
|
|
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
|
|
|
// GOT pointer.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT()) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EBX,
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2007-02-25 14:40:16 +08:00
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-09-08 14:48:29 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
2006-11-21 08:01:06 +08:00
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
2006-09-08 14:48:29 +08:00
|
|
|
RegsToPass[i].second.getValueType()));
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
// Add an implicit use GOT pointer in EBX.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT())
|
|
|
|
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (InFlag.Val)
|
|
|
|
Ops.push_back(InFlag);
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
assert(isTailCall==false && "no tail call here");
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL,
|
2006-09-08 14:48:29 +08:00
|
|
|
NodeTys, &Ops[0], Ops.size());
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2007-02-25 15:18:38 +08:00
|
|
|
// Returns a flag for retval copy to use.
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2006-09-08 14:48:29 +08:00
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
|
2007-02-28 14:10:12 +08:00
|
|
|
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
|
2006-09-08 14:48:29 +08:00
|
|
|
Ops.push_back(InFlag);
|
|
|
|
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
|
2007-02-25 17:10:05 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2007-02-25 17:10:05 +08:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
|
|
|
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Fast Calling Convention (tail call) implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Like std call, callee cleans arguments, convention except that ECX is
|
|
|
|
// reserved for storing the tail called function address. Only 2 registers are
|
|
|
|
// free for argument passing (inreg). Tail call optimization is performed
|
|
|
|
// provided:
|
|
|
|
// * tailcallopt is enabled
|
|
|
|
// * caller/callee are fastcc
|
|
|
|
// * elf/pic is disabled OR
|
|
|
|
// * elf/pic enabled + callee is in module + callee has
|
|
|
|
// visibility protected or hidden
|
2007-10-13 05:30:57 +08:00
|
|
|
// To keep the stack aligned according to platform abi the function
|
|
|
|
// GetAlignedArgumentStackSize ensures that argument delta is always multiples
|
|
|
|
// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
|
2007-10-12 03:40:01 +08:00
|
|
|
// If a tail called function callee has more arguments than the caller the
|
|
|
|
// caller needs to make sure that there is room to move the RETADDR to. This is
|
2007-10-13 05:30:57 +08:00
|
|
|
// achieved by reserving an area the size of the argument delta right after the
|
2007-10-12 03:40:01 +08:00
|
|
|
// original REtADDR, but before the saved framepointer or the spilled registers
|
|
|
|
// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
|
|
|
|
// stack layout:
|
|
|
|
// arg1
|
|
|
|
// arg2
|
|
|
|
// RETADDR
|
|
|
|
// [ new RETADDR
|
|
|
|
// move area ]
|
|
|
|
// (possible EBP)
|
|
|
|
// ESI
|
|
|
|
// EDI
|
|
|
|
// local1 ..
|
|
|
|
|
|
|
|
/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
|
|
|
|
/// for a 16 byte align requirement.
|
|
|
|
unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
|
|
|
|
SelectionDAG& DAG) {
|
|
|
|
if (PerformTailCallOpt) {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const TargetMachine &TM = MF.getTarget();
|
|
|
|
const TargetFrameInfo &TFI = *TM.getFrameInfo();
|
|
|
|
unsigned StackAlignment = TFI.getStackAlignment();
|
|
|
|
uint64_t AlignMask = StackAlignment - 1;
|
|
|
|
int64_t Offset = StackSize;
|
|
|
|
unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
|
|
|
|
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
|
|
|
|
// Number smaller than 12 so just add the difference.
|
|
|
|
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
|
|
|
|
} else {
|
|
|
|
// Mask out lower bits, add stackalignment once plus the 12 bytes.
|
|
|
|
Offset = ((~AlignMask) & Offset) + StackAlignment +
|
|
|
|
(StackAlignment-SlotSize);
|
|
|
|
}
|
|
|
|
StackSize = Offset;
|
|
|
|
}
|
|
|
|
return StackSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IsEligibleForTailCallElimination - Check to see whether the next instruction
|
2007-11-02 09:26:22 +08:00
|
|
|
/// following the call is a return. A function is eligible if caller/callee
|
|
|
|
/// calling conventions match, currently only fastcc supports tail calls, and
|
|
|
|
/// the function CALL is immediatly followed by a RET.
|
2007-10-12 03:40:01 +08:00
|
|
|
bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
|
|
|
|
SDOperand Ret,
|
|
|
|
SelectionDAG& DAG) const {
|
2007-11-02 09:26:22 +08:00
|
|
|
if (!PerformTailCallOpt)
|
|
|
|
return false;
|
2007-10-12 03:40:01 +08:00
|
|
|
|
|
|
|
// Check whether CALL node immediatly preceeds the RET node and whether the
|
|
|
|
// return uses the result of the node or is a void return.
|
2007-11-02 09:26:22 +08:00
|
|
|
unsigned NumOps = Ret.getNumOperands();
|
|
|
|
if ((NumOps == 1 &&
|
|
|
|
(Ret.getOperand(0) == SDOperand(Call.Val,1) ||
|
|
|
|
Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
|
2007-11-03 01:45:40 +08:00
|
|
|
(NumOps > 1 &&
|
2007-11-02 09:26:22 +08:00
|
|
|
Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
|
|
|
|
Ret.getOperand(1) == SDOperand(Call.Val,0))) {
|
2007-10-12 03:40:01 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
unsigned CallerCC = MF.getFunction()->getCallingConv();
|
|
|
|
unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
|
|
|
|
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
|
|
|
|
SDOperand Callee = Call.getOperand(4);
|
|
|
|
// On elf/pic %ebx needs to be livein.
|
2007-11-02 09:26:22 +08:00
|
|
|
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
|
|
|
|
!Subtarget->isPICStyleGOT())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Can only do local tail calls with PIC.
|
|
|
|
GlobalValue * GV = 0;
|
|
|
|
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
|
|
|
|
if(G != 0 &&
|
|
|
|
(GV = G->getGlobal()) &&
|
|
|
|
(GV->hasHiddenVisibility() || GV->hasProtectedVisibility()))
|
|
|
|
return true;
|
2007-10-12 03:40:01 +08:00
|
|
|
}
|
|
|
|
}
|
2007-11-02 09:26:22 +08:00
|
|
|
|
|
|
|
return false;
|
2007-10-12 03:40:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
unsigned CC) {
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
|
|
|
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
|
|
|
|
SDOperand Callee = Op.getOperand(4);
|
|
|
|
bool is64Bit = Subtarget->is64Bit();
|
|
|
|
|
|
|
|
assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls.");
|
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
|
|
|
if (is64Bit)
|
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
|
|
|
|
else
|
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
|
|
|
|
|
|
|
|
|
|
|
|
// Lower arguments at fp - stackoffset + fpdiff.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
|
|
|
|
unsigned NumBytesToBePushed =
|
|
|
|
GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG);
|
|
|
|
|
|
|
|
unsigned NumBytesCallerPushed =
|
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
|
|
|
|
int FPDiff = NumBytesCallerPushed - NumBytesToBePushed;
|
|
|
|
|
|
|
|
// Set the delta of movement of the returnaddr stackslot.
|
|
|
|
// But only set if delta is greater than previous delta.
|
|
|
|
if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
|
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
|
|
|
|
|
2007-10-16 17:05:00 +08:00
|
|
|
Chain = DAG.
|
|
|
|
getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy()));
|
|
|
|
|
|
|
|
// Adjust the Return address stack slot.
|
|
|
|
SDOperand RetAddrFrIdx, NewRetAddrFrIdx;
|
2007-10-12 03:40:01 +08:00
|
|
|
if (FPDiff) {
|
|
|
|
MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32;
|
2007-10-16 17:05:00 +08:00
|
|
|
RetAddrFrIdx = getReturnAddressFrameIndex(DAG);
|
|
|
|
// Load the "old" Return address.
|
2007-10-12 03:40:01 +08:00
|
|
|
RetAddrFrIdx =
|
2007-10-16 17:05:00 +08:00
|
|
|
DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0);
|
|
|
|
// Calculate the new stack slot for the return address.
|
2007-10-12 03:40:01 +08:00
|
|
|
int SlotSize = is64Bit ? 8 : 4;
|
|
|
|
int NewReturnAddrFI =
|
|
|
|
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
|
2007-10-16 17:05:00 +08:00
|
|
|
NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
|
|
|
|
Chain = SDOperand(RetAddrFrIdx.Val, 1);
|
2007-10-12 03:40:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
|
|
|
|
SmallVector<SDOperand, 8> MemOpChains;
|
|
|
|
SmallVector<SDOperand, 8> MemOpChains2;
|
|
|
|
SDOperand FramePtr, StackPtr;
|
|
|
|
SDOperand PtrOff;
|
|
|
|
SDOperand FIN;
|
|
|
|
int FI = 0;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads. Lower
|
|
|
|
// arguments first to the stack slot where they would normally - in case of a
|
|
|
|
// normal function call - be.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
|
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default: assert(0 && "Unknown loc info!");
|
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.Val == 0)
|
|
|
|
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
|
|
|
|
|
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
|
|
|
|
Arg));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemOpChains.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into registers.
|
|
|
|
SDOperand InFlag;
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
InFlag = SDOperand();
|
2007-10-16 17:05:00 +08:00
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
// Copy from stack slots to stack slot of a tail called function. This needs
|
|
|
|
// to be done because if we would lower the arguments directly to their real
|
|
|
|
// stack slot we might end up overwriting each other.
|
|
|
|
// TODO: To make this more efficient (sometimes saving a store/load) we could
|
|
|
|
// analyse the arguments and emit this store/load/store sequence only for
|
|
|
|
// arguments which would be overwritten otherwise.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
if (!VA.isRegLoc()) {
|
|
|
|
SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
|
|
|
|
unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
|
|
|
|
|
|
|
|
// Get source stack slot.
|
|
|
|
SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
|
|
|
|
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
|
|
|
|
// Create frame index.
|
|
|
|
int32_t Offset = VA.getLocMemOffset()+FPDiff;
|
|
|
|
uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
|
|
|
|
FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
|
|
|
|
FIN = DAG.getFrameIndex(FI, MVT::i32);
|
|
|
|
if (Flags & ISD::ParamFlags::ByVal) {
|
|
|
|
// Copy relative to framepointer.
|
|
|
|
unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
|
|
|
|
ISD::ParamFlags::ByValAlignOffs);
|
|
|
|
|
|
|
|
unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
|
|
|
|
ISD::ParamFlags::ByValSizeOffs;
|
|
|
|
|
|
|
|
SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
|
|
|
|
SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
|
2007-11-10 18:48:01 +08:00
|
|
|
SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1);
|
|
|
|
|
|
|
|
MemOpChains2.push_back(DAG.getMemcpy(Chain, FIN, PtrOff, SizeNode,
|
|
|
|
AlignNode,AlwaysInline));
|
2007-10-12 03:40:01 +08:00
|
|
|
} else {
|
|
|
|
SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0);
|
|
|
|
// Store relative to framepointer.
|
|
|
|
MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemOpChains2.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains2[0], MemOpChains.size());
|
|
|
|
|
2007-10-16 17:05:00 +08:00
|
|
|
// Store the return address to the appropriate stack slot.
|
|
|
|
if (FPDiff)
|
|
|
|
Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0);
|
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
|
|
|
// GOT pointer.
|
|
|
|
// Does not work with tail call since ebx is not restored correctly by
|
|
|
|
// tailcaller. TODO: at least for x86 - verify for x86-64
|
|
|
|
|
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
|
|
|
// We should use extra load for direct calls to dllimported functions in
|
|
|
|
// non-JIT mode.
|
|
|
|
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
|
|
|
|
getTargetMachine(), true))
|
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
|
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
|
|
|
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
|
|
|
|
else {
|
|
|
|
assert(Callee.getOpcode() == ISD::LOAD &&
|
|
|
|
"Function destination must be loaded into virtual register");
|
|
|
|
unsigned Opc = is64Bit ? X86::R9 : X86::ECX;
|
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain,
|
|
|
|
DAG.getRegister(Opc, getPointerTy()) ,
|
|
|
|
Callee,InFlag);
|
|
|
|
Callee = DAG.getRegister(Opc, getPointerTy());
|
|
|
|
// Add register as live out.
|
|
|
|
DAG.getMachineFunction().addLiveOut(Opc);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
SmallVector<SDOperand, 8> Ops;
|
|
|
|
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy()));
|
|
|
|
Ops.push_back(DAG.getConstant(0, getPointerTy()));
|
|
|
|
if (InFlag.Val)
|
|
|
|
Ops.push_back(InFlag);
|
|
|
|
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
|
|
|
if (InFlag.Val)
|
|
|
|
Ops.push_back(InFlag);
|
|
|
|
assert(InFlag.Val &&
|
|
|
|
"Flag must be set. Depend on flag being set in LowerRET");
|
|
|
|
Chain = DAG.getNode(X86ISD::TAILCALL,
|
|
|
|
Op.Val->getVTList(), &Ops[0], Ops.size());
|
|
|
|
|
|
|
|
return SDOperand(Chain.Val, Op.ResNo);
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-02-28 14:10:12 +08:00
|
|
|
// X86-64 C Calling Convention implementation
|
2007-01-28 21:31:35 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
SDOperand
|
2007-02-28 14:10:12 +08:00
|
|
|
X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
|
2006-04-26 09:20:17 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
2006-05-24 05:06:34 +08:00
|
|
|
SDOperand Root = Op.getOperand(0);
|
2007-02-28 14:10:12 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2007-10-12 03:40:01 +08:00
|
|
|
unsigned CC= MF.getFunction()->getCallingConv();
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
static const unsigned GPR64ArgRegs[] = {
|
|
|
|
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
|
2007-01-28 21:31:35 +08:00
|
|
|
};
|
2007-02-28 14:10:12 +08:00
|
|
|
static const unsigned XMMArgRegs[] = {
|
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
|
|
|
|
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
|
2007-01-28 21:31:35 +08:00
|
|
|
};
|
|
|
|
|
2007-02-28 15:00:42 +08:00
|
|
|
|
|
|
|
// Assign locations to all of the incoming arguments.
|
2007-02-28 14:10:12 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-10-12 03:40:01 +08:00
|
|
|
CCState CCInfo(CC, isVarArg,
|
2007-06-19 08:13:10 +08:00
|
|
|
getTargetMachine(), ArgLocs);
|
2007-10-12 03:40:01 +08:00
|
|
|
if (CC == CallingConv::Fast && PerformTailCallOpt)
|
|
|
|
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall);
|
|
|
|
else
|
|
|
|
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
SmallVector<SDOperand, 8> ArgValues;
|
|
|
|
unsigned LastVal = ~0U;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
|
|
|
|
// places.
|
|
|
|
assert(VA.getValNo() != LastVal &&
|
|
|
|
"Don't support value assigned to multiple locs yet");
|
|
|
|
LastVal = VA.getValNo();
|
2007-01-28 21:31:35 +08:00
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
MVT::ValueType RegVT = VA.getLocVT();
|
|
|
|
TargetRegisterClass *RC;
|
|
|
|
if (RegVT == MVT::i32)
|
|
|
|
RC = X86::GR32RegisterClass;
|
|
|
|
else if (RegVT == MVT::i64)
|
|
|
|
RC = X86::GR64RegisterClass;
|
|
|
|
else if (RegVT == MVT::f32)
|
|
|
|
RC = X86::FR32RegisterClass;
|
|
|
|
else if (RegVT == MVT::f64)
|
|
|
|
RC = X86::FR64RegisterClass;
|
|
|
|
else {
|
|
|
|
assert(MVT::isVector(RegVT));
|
2007-06-09 13:08:10 +08:00
|
|
|
if (MVT::getSizeInBits(RegVT) == 64) {
|
|
|
|
RC = X86::GR64RegisterClass; // MMX values are passed in GPRs.
|
|
|
|
RegVT = MVT::i64;
|
|
|
|
} else
|
2007-06-09 13:01:50 +08:00
|
|
|
RC = X86::VR128RegisterClass;
|
2007-01-28 21:31:35 +08:00
|
|
|
}
|
2007-03-02 13:12:29 +08:00
|
|
|
|
|
|
|
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
|
|
|
|
SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted to 32
|
|
|
|
// bits. Insert an assert[sz]ext to capture this, then truncate to the
|
|
|
|
// right size.
|
|
|
|
if (VA.getLocInfo() == CCValAssign::SExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
else if (VA.getLocInfo() == CCValAssign::ZExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
|
|
|
|
if (VA.getLocInfo() != CCValAssign::Full)
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
|
|
|
|
|
2007-06-09 13:08:10 +08:00
|
|
|
// Handle MMX values passed in GPRs.
|
|
|
|
if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
|
|
|
|
MVT::getSizeInBits(RegVT) == 64)
|
|
|
|
ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
ArgValues.push_back(ArgValue);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
2007-09-14 23:48:13 +08:00
|
|
|
ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
|
2007-02-28 14:10:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned StackSize = CCInfo.getNextStackOffset();
|
2007-10-12 03:40:01 +08:00
|
|
|
if (CC==CallingConv::Fast)
|
|
|
|
StackSize =GetAlignedArgumentStackSize(StackSize, DAG);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
// If the function takes variable number of arguments, make a frame index for
|
|
|
|
// the start of the first vararg value... for expansion of llvm.va_start.
|
|
|
|
if (isVarArg) {
|
2007-10-12 03:40:01 +08:00
|
|
|
assert(CC!=CallingConv::Fast
|
|
|
|
&& "Var arg not supported with calling convention fastcc");
|
2007-02-28 14:10:12 +08:00
|
|
|
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
|
|
|
|
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
|
|
|
|
|
|
|
|
// For X86-64, if there are vararg parameters that are passed via
|
|
|
|
// registers, then we must store them to their spots on the stack so they
|
|
|
|
// may be loaded by deferencing the result of va_next.
|
|
|
|
VarArgsGPOffset = NumIntRegs * 8;
|
|
|
|
VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
|
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
|
|
|
|
RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
|
|
|
|
|
|
|
|
// Store the integer parameter registers.
|
|
|
|
SmallVector<SDOperand, 8> MemOps;
|
|
|
|
SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
|
|
|
|
SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
|
|
|
|
DAG.getConstant(VarArgsGPOffset, getPointerTy()));
|
|
|
|
for (; NumIntRegs != 6; ++NumIntRegs) {
|
|
|
|
unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
|
|
|
|
X86::GR64RegisterClass);
|
|
|
|
SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
|
|
|
|
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
|
|
|
|
MemOps.push_back(Store);
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
|
|
|
DAG.getConstant(8, getPointerTy()));
|
2006-04-27 09:32:22 +08:00
|
|
|
}
|
2006-05-24 05:06:34 +08:00
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
// Now store the XMM (fp + vector) parameter registers.
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
|
|
|
|
DAG.getConstant(VarArgsFPOffset, getPointerTy()));
|
|
|
|
for (; NumXMMRegs != 8; ++NumXMMRegs) {
|
|
|
|
unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
|
|
|
|
X86::VR128RegisterClass);
|
|
|
|
SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
|
|
|
|
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
|
|
|
|
MemOps.push_back(Store);
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
|
|
|
DAG.getConstant(16, getPointerTy()));
|
|
|
|
}
|
|
|
|
if (!MemOps.empty())
|
|
|
|
Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOps[0], MemOps.size());
|
2006-04-26 09:20:17 +08:00
|
|
|
}
|
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
ArgValues.push_back(Root);
|
2007-10-12 03:40:01 +08:00
|
|
|
// Tail call convention (fastcc) needs callee pop.
|
2007-10-14 18:09:39 +08:00
|
|
|
if (CC == CallingConv::Fast && PerformTailCallOpt) {
|
2007-10-12 03:40:01 +08:00
|
|
|
BytesToPopOnReturn = StackSize; // Callee pops everything.
|
|
|
|
BytesCallerReserves = 0;
|
|
|
|
} else {
|
|
|
|
BytesToPopOnReturn = 0; // Callee pops nothing.
|
|
|
|
BytesCallerReserves = StackSize;
|
|
|
|
}
|
2007-08-16 01:12:32 +08:00
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
|
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
// Return the new list of results.
|
2007-02-25 15:10:00 +08:00
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
|
2007-02-26 15:50:02 +08:00
|
|
|
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
unsigned CC) {
|
2006-05-25 08:59:30 +08:00
|
|
|
SDOperand Chain = Op.getOperand(0);
|
2007-02-28 14:10:12 +08:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
|
2006-05-25 08:59:30 +08:00
|
|
|
SDOperand Callee = Op.getOperand(4);
|
2007-02-28 15:00:42 +08:00
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
2007-02-28 14:10:12 +08:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-06-19 08:13:10 +08:00
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
2007-10-14 18:09:39 +08:00
|
|
|
if (CC==CallingConv::Fast && PerformTailCallOpt)
|
2007-10-12 03:40:01 +08:00
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
|
|
|
|
else
|
|
|
|
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2007-10-12 03:40:01 +08:00
|
|
|
if (CC == CallingConv::Fast)
|
|
|
|
NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG);
|
|
|
|
|
2006-09-21 06:03:51 +08:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
|
|
|
|
SmallVector<SDOperand, 8> MemOpChains;
|
2006-09-21 06:03:51 +08:00
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
SDOperand StackPtr;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
|
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default: assert(0 && "Unknown loc info!");
|
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
|
2006-09-21 06:03:51 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (StackPtr.Val == 0)
|
|
|
|
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
|
2007-08-20 23:18:24 +08:00
|
|
|
|
2007-08-31 23:06:30 +08:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
|
|
|
|
Arg));
|
2006-09-21 06:03:51 +08:00
|
|
|
}
|
|
|
|
}
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2006-09-21 06:03:51 +08:00
|
|
|
if (!MemOpChains.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
2007-01-28 21:31:35 +08:00
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into registers.
|
|
|
|
SDOperand InFlag;
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
if (isVarArg) {
|
2007-10-12 03:40:01 +08:00
|
|
|
assert ( CallingConv::Fast != CC &&
|
|
|
|
"Var args not supported with calling convention fastcc");
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
// From AMD64 ABI document:
|
|
|
|
// For calls that may call functions that use varargs or stdargs
|
|
|
|
// (prototype-less calls or calls to functions containing ellipsis (...) in
|
|
|
|
// the declaration) %al is used as hidden argument to specify the number
|
|
|
|
// of SSE registers used. The contents of %al do not need to match exactly
|
|
|
|
// the number of registers, but must be an ubound on the number of SSE
|
|
|
|
// registers used and is in the range 0 - 8 inclusive.
|
|
|
|
|
|
|
|
// Count the number of XMM registers allocated.
|
|
|
|
static const unsigned XMMArgRegs[] = {
|
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
|
|
|
|
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
|
|
|
|
};
|
|
|
|
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
|
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::AL,
|
|
|
|
DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2006-09-21 06:03:51 +08:00
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
2006-11-20 18:46:14 +08:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
2006-12-23 06:29:05 +08:00
|
|
|
// We should use extra load for direct calls to dllimported functions in
|
|
|
|
// non-JIT mode.
|
2007-03-15 06:11:11 +08:00
|
|
|
if (getTargetMachine().getCodeModel() != CodeModel::Large
|
2007-04-17 02:10:23 +08:00
|
|
|
&& !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
|
|
|
|
getTargetMachine(), true))
|
2006-11-20 18:46:14 +08:00
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
|
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
2007-03-15 06:11:11 +08:00
|
|
|
if (getTargetMachine().getCodeModel() != CodeModel::Large)
|
|
|
|
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
|
2006-09-21 06:03:51 +08:00
|
|
|
|
2007-02-25 14:40:16 +08:00
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-09-21 06:03:51 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
2007-01-28 21:31:35 +08:00
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
2006-09-21 06:03:51 +08:00
|
|
|
|
2007-01-28 21:31:35 +08:00
|
|
|
if (InFlag.Val)
|
|
|
|
Ops.push_back(InFlag);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2007-10-12 03:40:01 +08:00
|
|
|
Chain = DAG.getNode(X86ISD::CALL,
|
2007-01-28 21:31:35 +08:00
|
|
|
NodeTys, &Ops[0], Ops.size());
|
|
|
|
InFlag = Chain.getValue(1);
|
2007-10-12 03:40:01 +08:00
|
|
|
int NumBytesForCalleeToPush = 0;
|
2007-10-14 18:09:39 +08:00
|
|
|
if (CC==CallingConv::Fast && PerformTailCallOpt) {
|
2007-10-12 03:40:01 +08:00
|
|
|
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
|
|
|
|
} else {
|
|
|
|
NumBytesForCalleeToPush = 0; // Callee pops nothing.
|
|
|
|
}
|
2007-02-25 15:18:38 +08:00
|
|
|
// Returns a flag for retval copy to use.
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2006-09-21 06:03:51 +08:00
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
|
2007-10-12 03:40:01 +08:00
|
|
|
Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
|
2006-09-21 06:03:51 +08:00
|
|
|
Ops.push_back(InFlag);
|
|
|
|
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
|
2007-02-25 17:10:05 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2007-02-28 14:10:12 +08:00
|
|
|
|
2007-02-25 17:10:05 +08:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
|
|
|
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
|
2006-09-21 06:03:51 +08:00
|
|
|
}
|
|
|
|
|
2007-02-28 14:10:12 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Other Lowering Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
|
2007-08-16 01:12:32 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
int ReturnAddrIndex = FuncInfo->getRAIndex();
|
|
|
|
|
2005-11-15 08:40:23 +08:00
|
|
|
if (ReturnAddrIndex == 0) {
|
|
|
|
// Set up a frame object for the return address.
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
|
|
|
|
else
|
|
|
|
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
|
2007-08-16 01:12:32 +08:00
|
|
|
|
|
|
|
FuncInfo->setRAIndex(ReturnAddrIndex);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-01-31 07:41:35 +08:00
|
|
|
/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
|
|
|
|
/// specific condition code. It returns a false if it cannot do a direct
|
2006-09-13 11:22:10 +08:00
|
|
|
/// translation. X86CC is the translated CondCode. LHS/RHS are modified as
|
|
|
|
/// needed.
|
2006-04-06 07:38:46 +08:00
|
|
|
static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
|
2006-09-13 11:22:10 +08:00
|
|
|
unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
|
|
|
|
SelectionDAG &DAG) {
|
2006-10-21 01:42:20 +08:00
|
|
|
X86CC = X86::COND_INVALID;
|
2006-01-06 08:43:03 +08:00
|
|
|
if (!isFP) {
|
2006-09-14 01:04:54 +08:00
|
|
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
|
|
|
|
// X > -1 -> X == 0, jump !sign.
|
|
|
|
RHS = DAG.getConstant(0, RHS.getValueType());
|
2006-10-21 01:42:20 +08:00
|
|
|
X86CC = X86::COND_NS;
|
2006-09-14 01:04:54 +08:00
|
|
|
return true;
|
|
|
|
} else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
|
|
|
|
// X < 0 -> X == 0, jump on sign.
|
2006-10-21 01:42:20 +08:00
|
|
|
X86CC = X86::COND_S;
|
2006-09-14 01:04:54 +08:00
|
|
|
return true;
|
2007-09-17 22:49:27 +08:00
|
|
|
} else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) {
|
|
|
|
// X < 1 -> X <= 0
|
|
|
|
RHS = DAG.getConstant(0, RHS.getValueType());
|
|
|
|
X86CC = X86::COND_LE;
|
|
|
|
return true;
|
2006-09-14 01:04:54 +08:00
|
|
|
}
|
2006-09-13 11:22:10 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-01-06 08:43:03 +08:00
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETEQ: X86CC = X86::COND_E; break;
|
|
|
|
case ISD::SETGT: X86CC = X86::COND_G; break;
|
|
|
|
case ISD::SETGE: X86CC = X86::COND_GE; break;
|
|
|
|
case ISD::SETLT: X86CC = X86::COND_L; break;
|
|
|
|
case ISD::SETLE: X86CC = X86::COND_LE; break;
|
|
|
|
case ISD::SETNE: X86CC = X86::COND_NE; break;
|
|
|
|
case ISD::SETULT: X86CC = X86::COND_B; break;
|
|
|
|
case ISD::SETUGT: X86CC = X86::COND_A; break;
|
|
|
|
case ISD::SETULE: X86CC = X86::COND_BE; break;
|
|
|
|
case ISD::SETUGE: X86CC = X86::COND_AE; break;
|
2006-01-06 08:43:03 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// On a floating point condition, the flags are set as follows:
|
|
|
|
// ZF PF CF op
|
|
|
|
// 0 | 0 | 0 | X > Y
|
|
|
|
// 0 | 0 | 1 | X < Y
|
|
|
|
// 1 | 0 | 0 | X == Y
|
|
|
|
// 1 | 1 | 1 | unordered
|
2006-09-13 11:22:10 +08:00
|
|
|
bool Flip = false;
|
2006-01-06 08:43:03 +08:00
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETUEQ:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETEQ: X86CC = X86::COND_E; break;
|
2006-04-17 15:24:10 +08:00
|
|
|
case ISD::SETOLT: Flip = true; // Fallthrough
|
2006-01-06 08:43:03 +08:00
|
|
|
case ISD::SETOGT:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETGT: X86CC = X86::COND_A; break;
|
2006-04-17 15:24:10 +08:00
|
|
|
case ISD::SETOLE: Flip = true; // Fallthrough
|
2006-01-06 08:43:03 +08:00
|
|
|
case ISD::SETOGE:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETGE: X86CC = X86::COND_AE; break;
|
2006-04-17 15:24:10 +08:00
|
|
|
case ISD::SETUGT: Flip = true; // Fallthrough
|
2006-01-06 08:43:03 +08:00
|
|
|
case ISD::SETULT:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETLT: X86CC = X86::COND_B; break;
|
2006-04-17 15:24:10 +08:00
|
|
|
case ISD::SETUGE: Flip = true; // Fallthrough
|
2006-01-06 08:43:03 +08:00
|
|
|
case ISD::SETULE:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETLE: X86CC = X86::COND_BE; break;
|
2006-01-06 08:43:03 +08:00
|
|
|
case ISD::SETONE:
|
2006-10-21 01:42:20 +08:00
|
|
|
case ISD::SETNE: X86CC = X86::COND_NE; break;
|
|
|
|
case ISD::SETUO: X86CC = X86::COND_P; break;
|
|
|
|
case ISD::SETO: X86CC = X86::COND_NP; break;
|
2006-01-06 08:43:03 +08:00
|
|
|
}
|
2006-09-13 11:22:10 +08:00
|
|
|
if (Flip)
|
|
|
|
std::swap(LHS, RHS);
|
2006-01-06 08:43:03 +08:00
|
|
|
}
|
2006-01-31 07:41:35 +08:00
|
|
|
|
2006-10-21 01:42:20 +08:00
|
|
|
return X86CC != X86::COND_INVALID;
|
2006-01-06 08:43:03 +08:00
|
|
|
}
|
|
|
|
|
2006-01-11 08:33:36 +08:00
|
|
|
/// hasFPCMov - is there a floating point cmov for the specific X86 condition
|
|
|
|
/// code. Current x86 isa includes the following FP cmov instructions:
|
2006-01-11 04:26:56 +08:00
|
|
|
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
|
2006-01-11 08:33:36 +08:00
|
|
|
static bool hasFPCMov(unsigned X86CC) {
|
2006-01-11 04:26:56 +08:00
|
|
|
switch (X86CC) {
|
|
|
|
default:
|
|
|
|
return false;
|
2006-10-21 01:42:20 +08:00
|
|
|
case X86::COND_B:
|
|
|
|
case X86::COND_BE:
|
|
|
|
case X86::COND_E:
|
|
|
|
case X86::COND_P:
|
|
|
|
case X86::COND_A:
|
|
|
|
case X86::COND_AE:
|
|
|
|
case X86::COND_NE:
|
|
|
|
case X86::COND_NP:
|
2006-01-11 04:26:56 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-07 07:23:56 +08:00
|
|
|
/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
|
2006-04-08 05:53:05 +08:00
|
|
|
/// true if Op is undef or if its value falls within the specified range (L, H].
|
2006-04-07 07:23:56 +08:00
|
|
|
static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
|
|
|
|
if (Op.getOpcode() == ISD::UNDEF)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Op)->getValue();
|
2006-04-08 05:53:05 +08:00
|
|
|
return (Val >= Low && Val < Hi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return
|
|
|
|
/// true if Op is undef or if its value equal to the specified value.
|
|
|
|
static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
|
|
|
|
if (Op.getOpcode() == ISD::UNDEF)
|
|
|
|
return true;
|
|
|
|
return cast<ConstantSDNode>(Op)->getValue() == Val;
|
2006-04-07 07:23:56 +08:00
|
|
|
}
|
|
|
|
|
2006-03-23 02:59:22 +08:00
|
|
|
/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
|
|
|
|
bool X86::isPSHUFDMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2007-08-03 05:17:01 +08:00
|
|
|
if (N->getNumOperands() != 2 && N->getNumOperands() != 4)
|
2006-03-23 02:59:22 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if the value doesn't reference the second vector.
|
2006-03-30 07:07:14 +08:00
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2007-08-03 05:17:01 +08:00
|
|
|
if (cast<ConstantSDNode>(Arg)->getValue() >= e)
|
2006-03-30 07:07:14 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-05 09:47:37 +08:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
|
2006-03-30 07:07:14 +08:00
|
|
|
bool X86::isPSHUFHWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword copied in order.
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
if (cast<ConstantSDNode>(Arg)->getValue() != i)
|
2006-03-30 07:07:14 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upper quadword shuffled.
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
2006-03-30 07:07:14 +08:00
|
|
|
if (Val < 4 || Val > 7)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-05 09:47:37 +08:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
|
2006-03-30 07:07:14 +08:00
|
|
|
bool X86::isPSHUFLWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Upper quadword copied in order.
|
2006-04-08 05:53:05 +08:00
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
2006-03-30 07:07:14 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword shuffled.
|
2006-04-08 05:53:05 +08:00
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
if (!isUndefOrInRange(N->getOperand(i), 0, 4))
|
2006-03-30 07:07:14 +08:00
|
|
|
return false;
|
2006-03-24 09:18:28 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
|
2007-02-25 15:10:00 +08:00
|
|
|
static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) {
|
2006-04-20 16:58:49 +08:00
|
|
|
if (NumElems != 2 && NumElems != 4) return false;
|
2006-03-24 09:18:28 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
unsigned Half = NumElems / 2;
|
|
|
|
for (unsigned i = 0; i < Half; ++i)
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrInRange(Elems[i], 0, NumElems))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
for (unsigned i = Half; i < NumElems; ++i)
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
2006-03-24 10:58:06 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
return true;
|
|
|
|
}
|
2006-03-24 09:18:28 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
bool X86::isSHUFPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
2006-03-23 02:59:22 +08:00
|
|
|
|
2007-05-18 02:45:50 +08:00
|
|
|
/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
|
2006-04-20 16:58:49 +08:00
|
|
|
/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
|
|
|
|
/// half elements to come from vector 1 (which would equal the dest.) and
|
|
|
|
/// the upper half to come from vector 2.
|
2007-02-25 15:10:00 +08:00
|
|
|
static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) {
|
|
|
|
if (NumOps != 2 && NumOps != 4) return false;
|
2006-04-20 16:58:49 +08:00
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
unsigned Half = NumOps / 2;
|
2006-04-20 16:58:49 +08:00
|
|
|
for (unsigned i = 0; i < Half; ++i)
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
2007-02-25 15:10:00 +08:00
|
|
|
for (unsigned i = Half; i < NumOps; ++i)
|
|
|
|
if (!isUndefOrInRange(Ops[i], 0, NumOps))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
2006-03-23 02:59:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
static bool isCommutedSHUFP(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return isCommutedSHUFP(N->op_begin(), N->getNumOperands());
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
|
|
|
|
2006-03-24 10:58:06 +08:00
|
|
|
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
|
|
|
|
bool X86::isMOVHLPSMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-03-28 14:50:32 +08:00
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
|
2006-04-08 05:53:05 +08:00
|
|
|
return isUndefOrEqual(N->getOperand(0), 6) &&
|
|
|
|
isUndefOrEqual(N->getOperand(1), 7) &&
|
|
|
|
isUndefOrEqual(N->getOperand(2), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(3), 3);
|
2006-03-28 14:50:32 +08:00
|
|
|
}
|
|
|
|
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
llvm-svn: 31519
2006-11-08 06:14:24 +08:00
|
|
|
/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <2, 3, 2, 3>
|
|
|
|
bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3
|
|
|
|
return isUndefOrEqual(N->getOperand(0), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(1), 3) &&
|
|
|
|
isUndefOrEqual(N->getOperand(2), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(3), 3);
|
|
|
|
}
|
|
|
|
|
2006-04-07 07:23:56 +08:00
|
|
|
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
|
|
|
|
bool X86::isMOVLPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
|
2006-04-08 05:53:05 +08:00
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
2006-04-08 05:53:05 +08:00
|
|
|
for (unsigned i = NumElems/2; i < NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-20 04:35:22 +08:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
|
|
|
|
/// and MOVLHPS.
|
2006-04-07 07:23:56 +08:00
|
|
|
bool X86::isMOVHPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
|
2006-04-08 05:53:05 +08:00
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i + NumElems/2);
|
2006-04-08 05:53:05 +08:00
|
|
|
if (!isUndefOrEqual(Arg, i + NumElems))
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-28 10:43:26 +08:00
|
|
|
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
|
2007-02-25 15:10:00 +08:00
|
|
|
bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts,
|
|
|
|
bool V2IsSplat = false) {
|
|
|
|
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
|
2006-03-24 10:58:06 +08:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
|
|
|
|
SDOperand BitI = Elts[i];
|
|
|
|
SDOperand BitI1 = Elts[i+1];
|
2006-04-08 05:53:05 +08:00
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
2006-04-20 16:58:49 +08:00
|
|
|
if (V2IsSplat) {
|
2007-02-25 15:10:00 +08:00
|
|
|
if (isUndefOrEqual(BitI1, NumElts))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrEqual(BitI1, j + NumElts))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
2006-03-28 10:43:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2006-03-24 10:58:06 +08:00
|
|
|
}
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
|
2006-03-28 08:39:58 +08:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
2006-03-28 08:39:58 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
|
2007-02-25 15:10:00 +08:00
|
|
|
bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts,
|
|
|
|
bool V2IsSplat = false) {
|
|
|
|
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
|
2006-03-28 08:39:58 +08:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
|
|
|
|
SDOperand BitI = Elts[i];
|
|
|
|
SDOperand BitI1 = Elts[i+1];
|
|
|
|
if (!isUndefOrEqual(BitI, j + NumElts/2))
|
2006-04-08 05:53:05 +08:00
|
|
|
return false;
|
2006-04-20 16:58:49 +08:00
|
|
|
if (V2IsSplat) {
|
2007-02-25 15:10:00 +08:00
|
|
|
if (isUndefOrEqual(BitI1, NumElts))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
2006-03-28 08:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
|
|
|
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
llvm-svn: 27437
2006-04-05 15:20:06 +08:00
|
|
|
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <0, 0, 1, 1>
|
|
|
|
bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
llvm-svn: 27437
2006-04-05 15:20:06 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
|
|
|
|
SDOperand BitI = N->getOperand(i);
|
|
|
|
SDOperand BitI1 = N->getOperand(i+1);
|
|
|
|
|
2006-04-08 05:53:05 +08:00
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
|
|
|
if (!isUndefOrEqual(BitI1, j))
|
|
|
|
return false;
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
llvm-svn: 27437
2006-04-05 15:20:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <2, 2, 3, 3>
|
|
|
|
bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
|
|
|
|
SDOperand BitI = N->getOperand(i);
|
|
|
|
SDOperand BitI1 = N->getOperand(i + 1);
|
|
|
|
|
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
|
|
|
if (!isUndefOrEqual(BitI1, j))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSS,
|
|
|
|
/// MOVSD, and MOVD, i.e. setting the lowest element.
|
2007-02-25 15:10:00 +08:00
|
|
|
static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) {
|
2007-12-07 06:14:22 +08:00
|
|
|
if (NumElts != 2 && NumElts != 4)
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!isUndefOrEqual(Elts[0], NumElts))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
for (unsigned i = 1; i < NumElts; ++i) {
|
|
|
|
if (!isUndefOrEqual(Elts[i], i))
|
2006-04-20 16:58:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
bool X86::isMOVLMask(SDNode *N) {
|
2006-04-11 08:19:04 +08:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return ::isMOVLMask(N->op_begin(), N->getNumOperands());
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
2006-04-11 08:19:04 +08:00
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
|
|
|
|
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
|
2006-04-20 16:58:49 +08:00
|
|
|
/// element of vector 2 and the other elements to come from vector 1 in order.
|
2007-02-25 15:10:00 +08:00
|
|
|
static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps,
|
|
|
|
bool V2IsSplat = false,
|
2006-09-08 09:50:06 +08:00
|
|
|
bool V2IsUndef = false) {
|
2007-02-25 15:10:00 +08:00
|
|
|
if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
|
2006-04-11 08:19:04 +08:00
|
|
|
return false;
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
if (!isUndefOrEqual(Ops[0], 0))
|
2006-04-11 08:19:04 +08:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
for (unsigned i = 1; i < NumOps; ++i) {
|
2006-04-20 16:58:49 +08:00
|
|
|
SDOperand Arg = Ops[i];
|
2007-02-25 15:10:00 +08:00
|
|
|
if (!(isUndefOrEqual(Arg, i+NumOps) ||
|
|
|
|
(V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) ||
|
|
|
|
(V2IsSplat && isUndefOrEqual(Arg, NumOps))))
|
2006-09-08 09:50:06 +08:00
|
|
|
return false;
|
2006-04-11 08:19:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
llvm-svn: 27437
2006-04-05 15:20:06 +08:00
|
|
|
|
2006-09-08 09:50:06 +08:00
|
|
|
static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
|
|
|
|
bool V2IsUndef = false) {
|
2006-04-20 16:58:49 +08:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 15:10:00 +08:00
|
|
|
return isCommutedMOVL(N->op_begin(), N->getNumOperands(),
|
|
|
|
V2IsSplat, V2IsUndef);
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
|
|
|
|
2006-04-15 05:59:03 +08:00
|
|
|
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
|
|
|
|
bool X86::isMOVSHDUPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect 1, 1, 3, 3
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val != 1) return false;
|
|
|
|
}
|
2006-04-15 13:37:34 +08:00
|
|
|
|
|
|
|
bool HasHi = false;
|
2006-04-15 05:59:03 +08:00
|
|
|
for (unsigned i = 2; i < 4; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val != 3) return false;
|
2006-04-15 13:37:34 +08:00
|
|
|
HasHi = true;
|
2006-04-15 05:59:03 +08:00
|
|
|
}
|
2006-04-15 11:13:24 +08:00
|
|
|
|
2006-04-15 13:37:34 +08:00
|
|
|
// Don't use movshdup if it can be done with a shufps.
|
|
|
|
return HasHi;
|
2006-04-15 05:59:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
|
|
|
|
bool X86::isMOVSLDUPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect 0, 0, 2, 2
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val != 0) return false;
|
|
|
|
}
|
2006-04-15 13:37:34 +08:00
|
|
|
|
|
|
|
bool HasHi = false;
|
2006-04-15 05:59:03 +08:00
|
|
|
for (unsigned i = 2; i < 4; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val != 2) return false;
|
2006-04-15 13:37:34 +08:00
|
|
|
HasHi = true;
|
2006-04-15 05:59:03 +08:00
|
|
|
}
|
2006-04-15 11:13:24 +08:00
|
|
|
|
2006-04-15 13:37:34 +08:00
|
|
|
// Don't use movshdup if it can be done with a shufps.
|
|
|
|
return HasHi;
|
2006-04-15 05:59:03 +08:00
|
|
|
}
|
|
|
|
|
2007-06-19 08:02:56 +08:00
|
|
|
/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a identity operation on the LHS or RHS.
|
|
|
|
static bool isIdentityMask(SDNode *N, bool RHS = false) {
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-22 10:53:00 +08:00
|
|
|
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
|
|
|
|
/// a splat of a single element.
|
2006-04-18 04:43:08 +08:00
|
|
|
static bool isSplatMask(SDNode *N) {
|
2006-03-22 10:53:00 +08:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
// This is a splat operation if each element of the permute is the same, and
|
|
|
|
// if the value doesn't reference the second vector.
|
2006-04-20 07:28:59 +08:00
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
SDOperand ElementBase;
|
|
|
|
unsigned i = 0;
|
|
|
|
for (; i != NumElems; ++i) {
|
|
|
|
SDOperand Elt = N->getOperand(i);
|
2006-11-03 04:25:50 +08:00
|
|
|
if (isa<ConstantSDNode>(Elt)) {
|
2006-04-20 07:28:59 +08:00
|
|
|
ElementBase = Elt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ElementBase.Val)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (; i != NumElems; ++i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2006-04-20 07:28:59 +08:00
|
|
|
if (Arg != ElementBase) return false;
|
2006-03-22 10:53:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it is a splat of the first vector operand.
|
2006-04-20 07:28:59 +08:00
|
|
|
return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
|
2006-03-22 10:53:00 +08:00
|
|
|
}
|
|
|
|
|
2006-04-18 04:43:08 +08:00
|
|
|
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
|
|
|
|
/// a splat of a single element and it's a 2 or 4 element mask.
|
|
|
|
bool X86::isSplatMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-04-20 07:28:59 +08:00
|
|
|
// We can only splat 64-bit, and 32-bit quantities with a single instruction.
|
2006-04-18 04:43:08 +08:00
|
|
|
if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
|
|
|
|
return false;
|
|
|
|
return ::isSplatMask(N);
|
|
|
|
}
|
|
|
|
|
2006-10-28 05:08:32 +08:00
|
|
|
/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a splat of zero element.
|
|
|
|
bool X86::isSplatLoMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
|
2006-10-28 05:08:32 +08:00
|
|
|
if (!isUndefOrEqual(N->getOperand(i), 0))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-22 16:01:21 +08:00
|
|
|
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
|
2006-03-22 10:53:00 +08:00
|
|
|
unsigned NumOperands = N->getNumOperands();
|
|
|
|
unsigned Shift = (NumOperands == 4) ? 2 : 1;
|
|
|
|
unsigned Mask = 0;
|
2006-03-29 07:41:33 +08:00
|
|
|
for (unsigned i = 0; i < NumOperands; ++i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
unsigned Val = 0;
|
|
|
|
SDOperand Arg = N->getOperand(NumOperands-i-1);
|
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = cast<ConstantSDNode>(Arg)->getValue();
|
2006-03-24 09:18:28 +08:00
|
|
|
if (Val >= NumOperands) Val -= NumOperands;
|
2006-03-22 16:01:21 +08:00
|
|
|
Mask |= Val;
|
2006-03-29 07:41:33 +08:00
|
|
|
if (i != NumOperands - 1)
|
|
|
|
Mask <<= Shift;
|
|
|
|
}
|
2006-03-22 16:01:21 +08:00
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
2006-03-30 07:07:14 +08:00
|
|
|
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
|
|
|
|
unsigned Mask = 0;
|
|
|
|
// 8 nodes, but we only care about the last 4.
|
|
|
|
for (unsigned i = 7; i >= 4; --i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
unsigned Val = 0;
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = cast<ConstantSDNode>(Arg)->getValue();
|
2006-03-30 07:07:14 +08:00
|
|
|
Mask |= (Val - 4);
|
|
|
|
if (i != 4)
|
|
|
|
Mask <<= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
|
|
|
|
unsigned Mask = 0;
|
|
|
|
// 8 nodes, but we only care about the first 4.
|
|
|
|
for (int i = 3; i >= 0; --i) {
|
2006-03-31 08:30:29 +08:00
|
|
|
unsigned Val = 0;
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
|
|
|
Val = cast<ConstantSDNode>(Arg)->getValue();
|
2006-03-30 07:07:14 +08:00
|
|
|
Mask |= Val;
|
|
|
|
if (i != 0)
|
|
|
|
Mask <<= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
2006-04-05 09:47:37 +08:00
|
|
|
/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a 8 element shuffle that can be broken into a pair of
|
|
|
|
/// PSHUFHW and PSHUFLW.
|
|
|
|
static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword shuffled.
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
2007-12-11 09:46:18 +08:00
|
|
|
if (Val >= 4)
|
2006-04-05 09:47:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upper quadword shuffled.
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
|
|
|
SDOperand Arg = N->getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val < 4 || Val > 7)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
/// CommuteVectorShuffle - Swap vector_shuffle operands as well as
|
2006-04-07 07:23:56 +08:00
|
|
|
/// values in ther permute mask.
|
2006-10-26 05:49:50 +08:00
|
|
|
static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1,
|
|
|
|
SDOperand &V2, SDOperand &Mask,
|
|
|
|
SelectionDAG &DAG) {
|
2006-04-07 07:23:56 +08:00
|
|
|
MVT::ValueType VT = Op.getValueType();
|
|
|
|
MVT::ValueType MaskVT = Mask.getValueType();
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
|
2006-04-07 07:23:56 +08:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Arg = Mask.getOperand(i);
|
2006-04-20 06:48:17 +08:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
|
|
|
|
continue;
|
|
|
|
}
|
2006-04-07 07:23:56 +08:00
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val < NumElems)
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
|
|
|
|
}
|
|
|
|
|
2006-10-26 05:49:50 +08:00
|
|
|
std::swap(V1, V2);
|
2007-12-07 16:07:39 +08:00
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
|
2006-10-26 05:49:50 +08:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
|
2006-04-07 07:23:56 +08:00
|
|
|
}
|
|
|
|
|
2007-12-08 05:30:01 +08:00
|
|
|
/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
|
|
|
|
/// the two vector operands have swapped position.
|
2007-12-07 16:07:39 +08:00
|
|
|
static
|
|
|
|
SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType MaskVT = Mask.getValueType();
|
|
|
|
MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
|
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Arg = Mask.getOperand(i);
|
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val < NumElems)
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-04-20 04:35:22 +08:00
|
|
|
/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
|
|
|
|
/// match movhlps. The lower half elements should come from upper half of
|
|
|
|
/// V1 (and in order), and the upper half elements should come from the upper
|
2006-11-21 08:01:06 +08:00
|
|
|
/// half of V2 (and in order).
|
2006-04-20 04:35:22 +08:00
|
|
|
static bool ShouldXformToMOVHLPS(SDNode *Mask) {
|
|
|
|
unsigned NumElems = Mask->getNumOperands();
|
|
|
|
if (NumElems != 4)
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0, e = 2; i != e; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+2))
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 2; i != 4; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+4))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-07 07:23:56 +08:00
|
|
|
/// isScalarLoadToVector - Returns true if the node is a scalar load that
|
|
|
|
/// is promoted to a vector.
|
2006-04-20 04:35:22 +08:00
|
|
|
static inline bool isScalarLoadToVector(SDNode *N) {
|
|
|
|
if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
|
|
|
N = N->getOperand(0).Val;
|
2006-10-10 04:57:25 +08:00
|
|
|
return ISD::isNON_EXTLoad(N);
|
2006-04-07 07:23:56 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-04-20 04:35:22 +08:00
|
|
|
/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
|
|
|
|
/// match movlp{s|d}. The lower half elements should come from lower half of
|
|
|
|
/// V1 (and in order), and the upper half elements should come from the upper
|
|
|
|
/// half of V2 (and in order). And since V1 will become the source of the
|
|
|
|
/// MOVLP, it must be either a vector load or a scalar load to vector.
|
2006-10-10 05:39:25 +08:00
|
|
|
static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) {
|
2006-10-10 04:57:25 +08:00
|
|
|
if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
|
2006-04-20 04:35:22 +08:00
|
|
|
return false;
|
2006-10-10 05:39:25 +08:00
|
|
|
// Is V2 is a vector load, don't do this transformation. We will try to use
|
|
|
|
// load folding shufps op.
|
|
|
|
if (ISD::isNON_EXTLoad(V2))
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
2006-04-20 04:35:22 +08:00
|
|
|
unsigned NumElems = Mask->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0, e = NumElems/2; i != e; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i))
|
|
|
|
return false;
|
|
|
|
for (unsigned i = NumElems/2; i != NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
|
|
|
|
return false;
|
|
|
|
return true;
|
2006-04-07 07:23:56 +08:00
|
|
|
}
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
|
|
|
|
/// all the same.
|
|
|
|
static bool isSplatVector(SDNode *N) {
|
|
|
|
if (N->getOpcode() != ISD::BUILD_VECTOR)
|
|
|
|
return false;
|
2006-04-07 07:23:56 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
SDOperand SplatValue = N->getOperand(0);
|
|
|
|
for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
|
|
|
|
if (N->getOperand(i) != SplatValue)
|
2006-04-07 07:23:56 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-09-08 09:50:06 +08:00
|
|
|
/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
|
|
|
|
/// to an undef.
|
|
|
|
static bool isUndefShuffle(SDNode *N) {
|
2007-05-18 02:45:50 +08:00
|
|
|
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
|
2006-09-08 09:50:06 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
SDOperand V1 = N->getOperand(0);
|
|
|
|
SDOperand V2 = N->getOperand(1);
|
|
|
|
SDOperand Mask = N->getOperand(2);
|
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Arg = Mask.getOperand(i);
|
|
|
|
if (Arg.getOpcode() != ISD::UNDEF) {
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
|
|
|
|
return false;
|
|
|
|
else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-05-18 02:45:50 +08:00
|
|
|
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
|
|
|
|
/// constant +0.0.
|
|
|
|
static inline bool isZeroNode(SDOperand Elt) {
|
|
|
|
return ((isa<ConstantSDNode>(Elt) &&
|
|
|
|
cast<ConstantSDNode>(Elt)->getValue() == 0) ||
|
|
|
|
(isa<ConstantFPSDNode>(Elt) &&
|
2007-08-31 12:03:46 +08:00
|
|
|
cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
|
2007-05-18 02:45:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
|
|
|
|
/// to an zero vector.
|
|
|
|
static bool isZeroShuffle(SDNode *N) {
|
|
|
|
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDOperand V1 = N->getOperand(0);
|
|
|
|
SDOperand V2 = N->getOperand(1);
|
|
|
|
SDOperand Mask = N->getOperand(2);
|
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Arg = Mask.getOperand(i);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Idx < NumElems) {
|
|
|
|
unsigned Opc = V1.Val->getOpcode();
|
|
|
|
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val))
|
|
|
|
continue;
|
|
|
|
if (Opc != ISD::BUILD_VECTOR ||
|
|
|
|
!isZeroNode(V1.Val->getOperand(Idx)))
|
|
|
|
return false;
|
|
|
|
} else if (Idx >= NumElems) {
|
|
|
|
unsigned Opc = V2.Val->getOpcode();
|
|
|
|
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val))
|
|
|
|
continue;
|
|
|
|
if (Opc != ISD::BUILD_VECTOR ||
|
|
|
|
!isZeroNode(V2.Val->getOperand(Idx - NumElems)))
|
|
|
|
return false;
|
2007-05-18 02:45:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getZeroVector - Returns a vector of specified type with all zero elements.
|
|
|
|
///
|
|
|
|
static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
|
|
|
|
assert(MVT::isVector(VT) && "Expected a vector type");
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
|
|
|
|
// Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
|
|
|
|
// type. This ensures they get CSE'd.
|
|
|
|
SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
|
|
|
|
SDOperand Vec;
|
|
|
|
if (MVT::getSizeInBits(VT) == 64) // MMX
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
|
|
|
|
else // SSE
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getOnesVector - Returns a vector of specified type with all bits set.
|
|
|
|
///
|
|
|
|
static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) {
|
|
|
|
assert(MVT::isVector(VT) && "Expected a vector type");
|
|
|
|
|
|
|
|
// Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
|
|
|
|
// type. This ensures they get CSE'd.
|
|
|
|
SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32);
|
|
|
|
SDOperand Vec;
|
|
|
|
if (MVT::getSizeInBits(VT) == 64) // MMX
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
|
|
|
|
else // SSE
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
|
2007-05-18 02:45:50 +08:00
|
|
|
}
|
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
|
|
|
|
/// that point to V2 points to its first element.
|
|
|
|
static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
|
|
|
|
assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
bool Changed = false;
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-20 16:58:49 +08:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Arg = Mask.getOperand(i);
|
|
|
|
if (Arg.getOpcode() != ISD::UNDEF) {
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
|
|
|
|
if (Val > NumElems) {
|
|
|
|
Arg = DAG.getConstant(NumElems, Arg.getValueType());
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MaskVec.push_back(Arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed)
|
2006-08-08 10:23:42 +08:00
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
|
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-20 16:58:49 +08:00
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
|
|
|
|
/// operation of specified width.
|
|
|
|
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
|
2006-04-20 16:58:49 +08:00
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
|
2006-04-20 16:58:49 +08:00
|
|
|
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-20 16:58:49 +08:00
|
|
|
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
|
|
|
|
for (unsigned i = 1; i != NumElems; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, BaseVT));
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
|
|
|
|
2006-04-18 04:43:08 +08:00
|
|
|
/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
|
|
|
|
/// of specified width.
|
|
|
|
static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-18 04:43:08 +08:00
|
|
|
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, BaseVT));
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
|
|
|
|
}
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-18 04:43:08 +08:00
|
|
|
}
|
|
|
|
|
2006-04-20 16:58:49 +08:00
|
|
|
/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
|
|
|
|
/// of specified width.
|
|
|
|
static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
|
2006-04-20 16:58:49 +08:00
|
|
|
unsigned Half = NumElems/2;
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-20 16:58:49 +08:00
|
|
|
for (unsigned i = 0; i != Half; ++i) {
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
|
|
|
|
}
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-20 16:58:49 +08:00
|
|
|
}
|
|
|
|
|
2006-04-18 04:43:08 +08:00
|
|
|
/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
|
|
|
|
///
|
|
|
|
static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
SDOperand V1 = Op.getOperand(0);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
SDOperand Mask = Op.getOperand(2);
|
2006-04-18 04:43:08 +08:00
|
|
|
MVT::ValueType VT = Op.getValueType();
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
Mask = getUnpacklMask(NumElems, DAG);
|
2006-04-18 04:43:08 +08:00
|
|
|
while (NumElems != 4) {
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
|
2006-04-18 04:43:08 +08:00
|
|
|
NumElems >>= 1;
|
|
|
|
}
|
|
|
|
V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
|
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
Mask = getZeroVector(MVT::v4i32, DAG);
|
2006-04-18 04:43:08 +08:00
|
|
|
SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
|
2006-04-18 04:43:08 +08:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
|
|
|
|
}
|
|
|
|
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
/// vector of zero or undef vector. This produces a shuffle where the low
|
|
|
|
/// element of V2 is swizzled into the zero/undef vector, landing at element
|
|
|
|
/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
unsigned NumElems, unsigned Idx,
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
bool isZero, SelectionDAG &DAG) {
|
|
|
|
SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
SmallVector<SDOperand, 16> MaskVec;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i)
|
|
|
|
if (i == Idx) // If this is the insertion idx, put the low elt of V2 here.
|
|
|
|
MaskVec.push_back(DAG.getConstant(NumElems, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, EVT));
|
2006-08-08 10:23:42 +08:00
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskVec[0], MaskVec.size());
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
}
|
|
|
|
|
2006-04-25 02:01:45 +08:00
|
|
|
/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
|
|
|
|
///
|
|
|
|
static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
|
|
|
|
unsigned NumNonZero, unsigned NumZero,
|
2006-09-08 14:48:29 +08:00
|
|
|
SelectionDAG &DAG, TargetLowering &TLI) {
|
2006-04-25 02:01:45 +08:00
|
|
|
if (NumNonZero > 8)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
SDOperand V(0, 0);
|
|
|
|
bool First = true;
|
|
|
|
for (unsigned i = 0; i < 16; ++i) {
|
|
|
|
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
|
|
|
|
if (ThisIsNonZero && First) {
|
|
|
|
if (NumZero)
|
|
|
|
V = getZeroVector(MVT::v8i16, DAG);
|
|
|
|
else
|
|
|
|
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
|
|
|
|
First = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((i & 1) != 0) {
|
|
|
|
SDOperand ThisElt(0, 0), LastElt(0, 0);
|
|
|
|
bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
|
|
|
|
if (LastIsNonZero) {
|
|
|
|
LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
|
|
|
|
}
|
|
|
|
if (ThisIsNonZero) {
|
|
|
|
ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
|
|
|
|
ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
|
|
|
|
ThisElt, DAG.getConstant(8, MVT::i8));
|
|
|
|
if (LastIsNonZero)
|
|
|
|
ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
|
|
|
|
} else
|
|
|
|
ThisElt = LastElt;
|
|
|
|
|
|
|
|
if (ThisElt.Val)
|
|
|
|
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
|
2006-09-08 14:48:29 +08:00
|
|
|
DAG.getConstant(i/2, TLI.getPointerTy()));
|
2006-04-25 02:01:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
|
|
|
|
}
|
|
|
|
|
2007-03-23 02:42:45 +08:00
|
|
|
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
|
2006-04-25 02:01:45 +08:00
|
|
|
///
|
|
|
|
static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
|
|
|
|
unsigned NumNonZero, unsigned NumZero,
|
2006-09-08 14:48:29 +08:00
|
|
|
SelectionDAG &DAG, TargetLowering &TLI) {
|
2006-04-25 02:01:45 +08:00
|
|
|
if (NumNonZero > 4)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
SDOperand V(0, 0);
|
|
|
|
bool First = true;
|
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
|
|
|
bool isNonZero = (NonZeros & (1 << i)) != 0;
|
|
|
|
if (isNonZero) {
|
|
|
|
if (First) {
|
|
|
|
if (NumZero)
|
|
|
|
V = getZeroVector(MVT::v8i16, DAG);
|
|
|
|
else
|
|
|
|
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
|
|
|
|
First = false;
|
|
|
|
}
|
|
|
|
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
|
2006-09-08 14:48:29 +08:00
|
|
|
DAG.getConstant(i, TLI.getPointerTy()));
|
2006-04-25 02:01:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2007-12-11 09:46:18 +08:00
|
|
|
/// is4WideVector - Returns true if the specific v8i16 or v16i8 vector is
|
|
|
|
/// actually just a 4 wide vector. e.g. <a, a, y, y, d, d, x, x>
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
// All zero's are handled with pxor, all one's are handled with pcmpeqd.
|
|
|
|
if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) {
|
|
|
|
// Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
|
|
|
|
// 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
|
|
|
|
// eliminated on x86-32 hosts.
|
|
|
|
if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
|
|
|
|
return Op;
|
2006-04-26 04:13:52 +08:00
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
if (ISD::isBuildVectorAllOnes(Op.Val))
|
|
|
|
return getOnesVector(Op.getValueType(), DAG);
|
|
|
|
return getZeroVector(Op.getValueType(), DAG);
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType EVT = MVT::getVectorElementType(VT);
|
2006-04-26 04:13:52 +08:00
|
|
|
unsigned EVTBits = MVT::getSizeInBits(EVT);
|
|
|
|
|
|
|
|
unsigned NumElems = Op.getNumOperands();
|
|
|
|
unsigned NumZero = 0;
|
|
|
|
unsigned NumNonZero = 0;
|
|
|
|
unsigned NonZeros = 0;
|
2007-07-25 06:55:08 +08:00
|
|
|
unsigned NumNonZeroImms = 0;
|
2007-12-11 09:46:18 +08:00
|
|
|
SmallSet<SDOperand, 8> Values;
|
2006-04-26 04:13:52 +08:00
|
|
|
for (unsigned i = 0; i < NumElems; ++i) {
|
|
|
|
SDOperand Elt = Op.getOperand(i);
|
|
|
|
if (Elt.getOpcode() != ISD::UNDEF) {
|
|
|
|
Values.insert(Elt);
|
|
|
|
if (isZeroNode(Elt))
|
|
|
|
NumZero++;
|
|
|
|
else {
|
|
|
|
NonZeros |= (1 << i);
|
|
|
|
NumNonZero++;
|
2007-07-25 06:55:08 +08:00
|
|
|
if (Elt.getOpcode() == ISD::Constant ||
|
|
|
|
Elt.getOpcode() == ISD::ConstantFP)
|
|
|
|
NumNonZeroImms++;
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-26 00:23:39 +08:00
|
|
|
if (NumNonZero == 0) {
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
// All undef vector. Return an UNDEF. All zero vectors were handled above.
|
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
2007-06-26 00:23:39 +08:00
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
// Splat is obviously ok. Let legalizer expand it to a shuffle.
|
|
|
|
if (Values.size() == 1)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
// Special case for single non-zero element.
|
2006-10-26 04:48:19 +08:00
|
|
|
if (NumNonZero == 1) {
|
2006-04-26 04:13:52 +08:00
|
|
|
unsigned Idx = CountTrailingZeros_32(NonZeros);
|
|
|
|
SDOperand Item = Op.getOperand(Idx);
|
|
|
|
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
|
|
|
|
if (Idx == 0)
|
|
|
|
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
|
|
|
|
return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
|
|
|
|
NumZero > 0, DAG);
|
|
|
|
|
|
|
|
if (EVTBits == 32) {
|
|
|
|
// Turn it into a shuffle of zero and zero-extended scalar to vector.
|
|
|
|
Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
|
|
|
|
DAG);
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-26 04:13:52 +08:00
|
|
|
for (unsigned i = 0; i < NumElems; i++)
|
|
|
|
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
|
2006-08-08 10:23:42 +08:00
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
|
|
|
|
DAG.getNode(ISD::UNDEF, VT), Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-25 06:55:08 +08:00
|
|
|
// A vector full of immediates; various special cases are already
|
|
|
|
// handled, so this is best done with a single constant-pool load.
|
|
|
|
if (NumNonZero == NumNonZeroImms)
|
|
|
|
return SDOperand();
|
|
|
|
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
// Let legalizer expand 2-wide build_vectors.
|
2006-04-26 04:13:52 +08:00
|
|
|
if (EVTBits == 64)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
// If element VT is < 32 bits, convert it to inserts into a zero vector.
|
2007-03-28 08:57:11 +08:00
|
|
|
if (EVTBits == 8 && NumElems == 16) {
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
|
|
|
|
*this);
|
2006-04-26 04:13:52 +08:00
|
|
|
if (V.Val) return V;
|
|
|
|
}
|
|
|
|
|
2007-03-28 08:57:11 +08:00
|
|
|
if (EVTBits == 16 && NumElems == 8) {
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
|
|
|
|
*this);
|
2006-04-26 04:13:52 +08:00
|
|
|
if (V.Val) return V;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If element VT is == 32 bits, turn it into a number of shuffles.
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> V;
|
|
|
|
V.resize(NumElems);
|
2006-04-26 04:13:52 +08:00
|
|
|
if (NumElems == 4 && NumZero > 0) {
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
bool isZero = !(NonZeros & (1 << i));
|
|
|
|
if (isZero)
|
|
|
|
V[i] = getZeroVector(VT, DAG);
|
|
|
|
else
|
|
|
|
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
|
|
|
switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
|
|
|
|
default: break;
|
|
|
|
case 0:
|
|
|
|
V[i] = V[i*2]; // Must be a zero vector.
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
|
|
|
|
getMOVLMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
|
|
|
|
getMOVLMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
|
|
|
|
getUnpacklMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-16 15:21:53 +08:00
|
|
|
// Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
|
2006-11-21 08:01:06 +08:00
|
|
|
// clears the upper bits.
|
2006-04-26 04:13:52 +08:00
|
|
|
// FIXME: we can do the same for v4f32 case when we know both parts of
|
|
|
|
// the lower half come from scalar_to_vector (loadf32). We should do
|
|
|
|
// that in post legalizer dag combiner with target specific hooks.
|
2006-10-26 04:48:19 +08:00
|
|
|
if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
|
2006-04-26 04:13:52 +08:00
|
|
|
return V[0];
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-26 04:13:52 +08:00
|
|
|
bool Reverse = (NonZeros & 0x3) == 2;
|
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
|
|
|
if (Reverse)
|
|
|
|
MaskVec.push_back(DAG.getConstant(1-i, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, EVT));
|
|
|
|
Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
|
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
|
|
|
if (Reverse)
|
|
|
|
MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
|
2006-08-12 01:38:39 +08:00
|
|
|
SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Values.size() > 2) {
|
|
|
|
// Expand into a number of unpckl*.
|
|
|
|
// e.g. for v4f32
|
|
|
|
// Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
|
|
|
|
// : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
|
|
|
|
// Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
|
|
|
|
SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
|
|
|
|
NumElems >>= 1;
|
|
|
|
while (NumElems != 0) {
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
|
|
|
|
UnpckMask);
|
|
|
|
NumElems >>= 1;
|
|
|
|
}
|
|
|
|
return V[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2007-12-07 16:07:39 +08:00
|
|
|
static
|
|
|
|
SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2,
|
|
|
|
SDOperand PermMask, SelectionDAG &DAG,
|
|
|
|
TargetLowering &TLI) {
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand NewV;
|
2007-12-07 16:07:39 +08:00
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8);
|
|
|
|
MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
|
2007-12-11 09:46:18 +08:00
|
|
|
MVT::ValueType PtrVT = TLI.getPointerTy();
|
|
|
|
SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(),
|
|
|
|
PermMask.Val->op_end());
|
|
|
|
|
|
|
|
// First record which half of which vector the low elements come from.
|
|
|
|
SmallVector<unsigned, 4> LowQuad(4);
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
SDOperand Elt = MaskElts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
int QuadIdx = EltIdx / 4;
|
|
|
|
++LowQuad[QuadIdx];
|
|
|
|
}
|
|
|
|
int BestLowQuad = -1;
|
|
|
|
unsigned MaxQuad = 1;
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
if (LowQuad[i] > MaxQuad) {
|
|
|
|
BestLowQuad = i;
|
|
|
|
MaxQuad = LowQuad[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Record which half of which vector the high elements come from.
|
|
|
|
SmallVector<unsigned, 4> HighQuad(4);
|
|
|
|
for (unsigned i = 4; i < 8; ++i) {
|
|
|
|
SDOperand Elt = MaskElts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
int QuadIdx = EltIdx / 4;
|
|
|
|
++HighQuad[QuadIdx];
|
|
|
|
}
|
|
|
|
int BestHighQuad = -1;
|
|
|
|
MaxQuad = 1;
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
if (HighQuad[i] > MaxQuad) {
|
|
|
|
BestHighQuad = i;
|
|
|
|
MaxQuad = HighQuad[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's possible to sort parts of either half with PSHUF{H|L}W, then do it.
|
|
|
|
if (BestLowQuad != -1 || BestHighQuad != -1) {
|
|
|
|
// First sort the 4 chunks in order using shufpd.
|
2007-12-07 16:07:39 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2007-12-11 09:46:18 +08:00
|
|
|
if (BestLowQuad != -1)
|
|
|
|
MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(0, MVT::i32));
|
|
|
|
if (BestHighQuad != -1)
|
|
|
|
MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(1, MVT::i32));
|
|
|
|
SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2);
|
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1),
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask);
|
|
|
|
NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV);
|
|
|
|
|
|
|
|
// Now sort high and low parts separately.
|
|
|
|
BitVector InOrder(8);
|
|
|
|
if (BestLowQuad != -1) {
|
|
|
|
// Sort lower half in order using PSHUFLW.
|
|
|
|
MaskVec.clear();
|
|
|
|
bool AnyOutOrder = false;
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
SDOperand Elt = MaskElts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(Elt);
|
|
|
|
InOrder.set(i);
|
|
|
|
} else {
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx != i)
|
|
|
|
AnyOutOrder = true;
|
|
|
|
MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT));
|
|
|
|
// If this element is in the right place after this shuffle, then
|
|
|
|
// remember it.
|
|
|
|
if ((int)(EltIdx / 4) == BestLowQuad)
|
|
|
|
InOrder.set(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (AnyOutOrder) {
|
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, MaskEVT));
|
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BestHighQuad != -1) {
|
|
|
|
// Sort high half in order using PSHUFHW if possible.
|
|
|
|
MaskVec.clear();
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, MaskEVT));
|
|
|
|
bool AnyOutOrder = false;
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
|
|
|
SDOperand Elt = MaskElts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(Elt);
|
|
|
|
InOrder.set(i);
|
|
|
|
} else {
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx != i)
|
|
|
|
AnyOutOrder = true;
|
|
|
|
MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT));
|
|
|
|
// If this element is in the right place after this shuffle, then
|
|
|
|
// remember it.
|
|
|
|
if ((int)(EltIdx / 4) == BestHighQuad)
|
|
|
|
InOrder.set(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (AnyOutOrder) {
|
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The other elements are put in the right place using pextrw and pinsrw.
|
|
|
|
for (unsigned i = 0; i != 8; ++i) {
|
|
|
|
if (InOrder[i])
|
|
|
|
continue;
|
|
|
|
SDOperand Elt = MaskElts[i];
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx == i)
|
|
|
|
continue;
|
|
|
|
SDOperand ExtOp = (EltIdx < 8)
|
|
|
|
? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
|
|
|
|
DAG.getConstant(EltIdx, PtrVT))
|
|
|
|
: DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
|
|
|
|
DAG.getConstant(EltIdx - 8, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
|
|
|
}
|
|
|
|
return NewV;
|
|
|
|
}
|
|
|
|
|
|
|
|
// PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use
|
|
|
|
///as few as possible.
|
2007-12-07 16:07:39 +08:00
|
|
|
// First, let's find out how many elements are already in the right order.
|
|
|
|
unsigned V1InOrder = 0;
|
|
|
|
unsigned V1FromV1 = 0;
|
|
|
|
unsigned V2InOrder = 0;
|
|
|
|
unsigned V2FromV2 = 0;
|
2007-12-11 09:46:18 +08:00
|
|
|
SmallVector<SDOperand, 8> V1Elts;
|
|
|
|
SmallVector<SDOperand, 8> V2Elts;
|
2007-12-07 16:07:39 +08:00
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand Elt = MaskElts[i];
|
2007-12-07 16:07:39 +08:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
2007-12-11 09:46:18 +08:00
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(Elt);
|
2007-12-07 16:07:39 +08:00
|
|
|
++V1InOrder;
|
|
|
|
++V2InOrder;
|
2007-12-11 09:46:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx == i) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(DAG.getConstant(i+8, MaskEVT));
|
|
|
|
++V1InOrder;
|
|
|
|
} else if (EltIdx == i+8) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(DAG.getConstant(i, MaskEVT));
|
|
|
|
++V2InOrder;
|
|
|
|
} else if (EltIdx < 8) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
++V1FromV1;
|
2007-12-07 16:07:39 +08:00
|
|
|
} else {
|
2007-12-11 09:46:18 +08:00
|
|
|
V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT));
|
|
|
|
++V2FromV2;
|
2007-12-07 16:07:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (V2InOrder > V1InOrder) {
|
|
|
|
PermMask = CommuteVectorShuffleMask(PermMask, DAG);
|
|
|
|
std::swap(V1, V2);
|
|
|
|
std::swap(V1Elts, V2Elts);
|
|
|
|
std::swap(V1FromV1, V2FromV2);
|
|
|
|
}
|
|
|
|
|
2007-12-11 09:46:18 +08:00
|
|
|
if ((V1FromV1 + V1InOrder) != 8) {
|
|
|
|
// Some elements are from V2.
|
|
|
|
if (V1FromV1) {
|
|
|
|
// If there are elements that are from V1 but out of place,
|
|
|
|
// then first sort them in place
|
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
|
|
|
SDOperand Elt = V1Elts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx >= 8)
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT));
|
|
|
|
}
|
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
NewV = V1;
|
2007-12-07 16:07:39 +08:00
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand Elt = V1Elts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (EltIdx < 8)
|
|
|
|
continue;
|
|
|
|
SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
|
|
|
|
DAG.getConstant(EltIdx - 8, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
2007-12-07 16:07:39 +08:00
|
|
|
}
|
2007-12-11 09:46:18 +08:00
|
|
|
return NewV;
|
|
|
|
} else {
|
|
|
|
// All elements are from V1.
|
|
|
|
NewV = V1;
|
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
|
|
|
SDOperand Elt = V1Elts[i];
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
|
|
|
|
DAG.getConstant(EltIdx, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
|
|
|
}
|
|
|
|
return NewV;
|
2007-12-07 16:07:39 +08:00
|
|
|
}
|
2007-12-11 09:46:18 +08:00
|
|
|
}
|
2007-12-07 16:07:39 +08:00
|
|
|
|
2007-12-11 09:46:18 +08:00
|
|
|
/// RewriteAs4WideShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
|
|
|
|
/// ones if possible. This can be done when every pair / quad of shuffle mask
|
|
|
|
/// elements point to elements in the right sequence. e.g.
|
|
|
|
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
|
|
|
|
static
|
|
|
|
SDOperand RewriteAs4WideShuffle(SDOperand V1, SDOperand V2,
|
|
|
|
SDOperand PermMask, SelectionDAG &DAG,
|
|
|
|
TargetLowering &TLI) {
|
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
|
|
|
unsigned Scale = NumElems / 4;
|
|
|
|
SmallVector<SDOperand, 4> MaskVec;
|
|
|
|
for (unsigned i = 0; i < NumElems; i += Scale) {
|
|
|
|
unsigned StartIdx = ~0U;
|
|
|
|
for (unsigned j = 0; j < Scale; ++j) {
|
|
|
|
SDOperand Elt = PermMask.getOperand(i+j);
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (StartIdx == ~0U)
|
|
|
|
StartIdx = EltIdx - (EltIdx % Scale);
|
|
|
|
if (EltIdx != StartIdx + j)
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
if (StartIdx == ~0U)
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32));
|
2007-12-07 16:07:39 +08:00
|
|
|
}
|
2007-12-11 09:46:18 +08:00
|
|
|
|
|
|
|
V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
|
|
|
|
V2 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V2);
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, V2,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, &MaskVec[0],4));
|
2007-12-07 16:07:39 +08:00
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
SDOperand V1 = Op.getOperand(0);
|
|
|
|
SDOperand V2 = Op.getOperand(1);
|
|
|
|
SDOperand PermMask = Op.getOperand(2);
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
|
|
|
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
|
|
|
|
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
|
2006-10-16 14:36:00 +08:00
|
|
|
bool V1IsSplat = false;
|
|
|
|
bool V2IsSplat = false;
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2006-09-08 09:50:06 +08:00
|
|
|
if (isUndefShuffle(Op.Val))
|
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
|
|
|
|
2007-05-18 02:45:50 +08:00
|
|
|
if (isZeroShuffle(Op.Val))
|
|
|
|
return getZeroVector(VT, DAG);
|
|
|
|
|
2007-06-19 08:02:56 +08:00
|
|
|
if (isIdentityMask(PermMask.Val))
|
|
|
|
return V1;
|
|
|
|
else if (isIdentityMask(PermMask.Val, true))
|
|
|
|
return V2;
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
if (isSplatMask(PermMask.Val)) {
|
|
|
|
if (NumElems <= 4) return Op;
|
|
|
|
// Promote it to a v4i32 splat.
|
2006-10-26 04:48:19 +08:00
|
|
|
return PromoteSplat(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
2006-10-26 04:48:19 +08:00
|
|
|
if (X86::isMOVLMask(PermMask.Val))
|
|
|
|
return (V1IsUndef) ? V2 : Op;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-10-26 04:48:19 +08:00
|
|
|
if (X86::isMOVSHDUPMask(PermMask.Val) ||
|
|
|
|
X86::isMOVSLDUPMask(PermMask.Val) ||
|
|
|
|
X86::isMOVHLPSMask(PermMask.Val) ||
|
|
|
|
X86::isMOVHPMask(PermMask.Val) ||
|
|
|
|
X86::isMOVLPMask(PermMask.Val))
|
|
|
|
return Op;
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2006-10-26 04:48:19 +08:00
|
|
|
if (ShouldXformToMOVHLPS(PermMask.Val) ||
|
|
|
|
ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
|
2006-10-26 05:49:50 +08:00
|
|
|
return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-26 04:48:19 +08:00
|
|
|
|
2006-10-26 05:49:50 +08:00
|
|
|
bool Commuted = false;
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
// FIXME: This should also accept a bitcast of a splat? Be careful, not
|
|
|
|
// 1,1,1,1 -> v8i16 though.
|
2006-10-26 04:48:19 +08:00
|
|
|
V1IsSplat = isSplatVector(V1.Val);
|
|
|
|
V2IsSplat = isSplatVector(V2.Val);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
|
|
|
|
// Canonicalize the splat or undef, if present, to be on the RHS.
|
2006-10-26 04:48:19 +08:00
|
|
|
if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
|
2006-10-26 05:49:50 +08:00
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-26 04:48:19 +08:00
|
|
|
std::swap(V1IsSplat, V2IsSplat);
|
|
|
|
std::swap(V1IsUndef, V2IsUndef);
|
2006-10-26 05:49:50 +08:00
|
|
|
Commuted = true;
|
2006-10-26 04:48:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
|
|
|
|
if (V2IsUndef) return V1;
|
2006-10-26 05:49:50 +08:00
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-26 04:48:19 +08:00
|
|
|
if (V2IsSplat) {
|
|
|
|
// V2 is a splat, so the mask may be malformed. That is, it may point
|
|
|
|
// to any V2 element. The instruction selectior won't like this. Get
|
|
|
|
// a corrected mask and commute to form a proper MOVS{S|D}.
|
|
|
|
SDOperand NewMask = getMOVLMask(NumElems, DAG);
|
|
|
|
if (NewMask.Val != PermMask.Val)
|
|
|
|
Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
2006-10-05 02:33:38 +08:00
|
|
|
}
|
2006-10-26 04:48:19 +08:00
|
|
|
return Op;
|
2006-10-16 14:36:00 +08:00
|
|
|
}
|
2006-10-05 02:33:38 +08:00
|
|
|
|
2006-10-16 14:36:00 +08:00
|
|
|
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
|
2006-10-16 14:36:00 +08:00
|
|
|
X86::isUNPCKLMask(PermMask.Val) ||
|
|
|
|
X86::isUNPCKHMask(PermMask.Val))
|
|
|
|
return Op;
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2006-10-26 04:48:19 +08:00
|
|
|
if (V2IsSplat) {
|
|
|
|
// Normalize mask so all entries that point to V2 points to its first
|
2006-11-21 08:01:06 +08:00
|
|
|
// element then try to match unpck{h|l} again. If match, return a
|
2006-10-26 04:48:19 +08:00
|
|
|
// new vector_shuffle with the corrected mask.
|
|
|
|
SDOperand NewMask = NormalizeMask(PermMask, DAG);
|
|
|
|
if (NewMask.Val != PermMask.Val) {
|
|
|
|
if (X86::isUNPCKLMask(PermMask.Val, true)) {
|
|
|
|
SDOperand NewMask = getUnpacklMask(NumElems, DAG);
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
|
|
|
} else if (X86::isUNPCKHMask(PermMask.Val, true)) {
|
|
|
|
SDOperand NewMask = getUnpackhMask(NumElems, DAG);
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Normalize the node to match x86 shuffle ops if needed
|
2006-10-26 05:49:50 +08:00
|
|
|
if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val))
|
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
|
|
|
|
|
|
|
if (Commuted) {
|
|
|
|
// Commute is back and try unpck* again.
|
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
|
|
|
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
llvm-svn: 36403
2007-04-25 05:16:55 +08:00
|
|
|
X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
|
2006-10-26 05:49:50 +08:00
|
|
|
X86::isUNPCKLMask(PermMask.Val) ||
|
|
|
|
X86::isUNPCKHMask(PermMask.Val))
|
|
|
|
return Op;
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
// If VT is integer, try PSHUF* first, then SHUFP*.
|
|
|
|
if (MVT::isInteger(VT)) {
|
2007-08-03 05:17:01 +08:00
|
|
|
// MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically
|
|
|
|
// possible to shuffle a v2i32 using PSHUFW, that's not yet implemented.
|
|
|
|
if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) &&
|
|
|
|
X86::isPSHUFDMask(PermMask.Val)) ||
|
2006-04-26 04:13:52 +08:00
|
|
|
X86::isPSHUFHWMask(PermMask.Val) ||
|
|
|
|
X86::isPSHUFLWMask(PermMask.Val)) {
|
|
|
|
if (V2.getOpcode() != ISD::UNDEF)
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
|
|
|
|
DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
2007-05-18 01:13:13 +08:00
|
|
|
if (X86::isSHUFPMask(PermMask.Val) &&
|
|
|
|
MVT::getSizeInBits(VT) != 64) // Don't do this for MMX.
|
2006-04-26 04:13:52 +08:00
|
|
|
return Op;
|
|
|
|
} else {
|
|
|
|
// Floating point cases in the other order.
|
|
|
|
if (X86::isSHUFPMask(PermMask.Val))
|
|
|
|
return Op;
|
|
|
|
if (X86::isPSHUFDMask(PermMask.Val) ||
|
|
|
|
X86::isPSHUFHWMask(PermMask.Val) ||
|
|
|
|
X86::isPSHUFLWMask(PermMask.Val)) {
|
|
|
|
if (V2.getOpcode() != ISD::UNDEF)
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
|
|
|
|
DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-11 09:46:18 +08:00
|
|
|
// If the shuffle can be rewritten as a 4 wide shuffle, then do it!
|
|
|
|
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
|
|
|
|
SDOperand NewOp = RewriteAs4WideShuffle(V1, V2, PermMask, DAG, *this);
|
|
|
|
if (NewOp.Val)
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
|
|
|
|
}
|
|
|
|
|
2007-12-07 16:07:39 +08:00
|
|
|
// Handle v8i16 specifically since SSE can do byte extraction and insertion.
|
2007-12-11 09:46:18 +08:00
|
|
|
if (VT == MVT::v8i16) {
|
|
|
|
SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this);
|
|
|
|
if (NewOp.Val)
|
|
|
|
return NewOp;
|
|
|
|
}
|
2007-12-07 16:07:39 +08:00
|
|
|
|
2007-12-11 09:46:18 +08:00
|
|
|
// Handle all 4 wide cases with a number of shuffles.
|
|
|
|
if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) {
|
2007-12-07 16:07:39 +08:00
|
|
|
// Don't do this for MMX.
|
2006-04-26 04:13:52 +08:00
|
|
|
MVT::ValueType MaskVT = PermMask.getValueType();
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<std::pair<int, int>, 8> Locs;
|
Implement four-wide shuffle with 2 shufps if no more than two elements come
from each vector. e.g.
shuffle(G1, G2, 7, 1, 5, 2)
==>
movaps _G2, %xmm0
shufps $151, _G1, %xmm0
shufps $216, %xmm0, %xmm0
llvm-svn: 28011
2006-04-28 15:03:38 +08:00
|
|
|
Locs.reserve(NumElems);
|
2007-12-11 09:46:18 +08:00
|
|
|
SmallVector<SDOperand, 8> Mask1(NumElems,
|
|
|
|
DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
SmallVector<SDOperand, 8> Mask2(NumElems,
|
|
|
|
DAG.getNode(ISD::UNDEF, MaskEVT));
|
Implement four-wide shuffle with 2 shufps if no more than two elements come
from each vector. e.g.
shuffle(G1, G2, 7, 1, 5, 2)
==>
movaps _G2, %xmm0
shufps $151, _G1, %xmm0
shufps $216, %xmm0, %xmm0
llvm-svn: 28011
2006-04-28 15:03:38 +08:00
|
|
|
unsigned NumHi = 0;
|
|
|
|
unsigned NumLo = 0;
|
|
|
|
// If no more than two elements come from either vector. This can be
|
|
|
|
// implemented with two shuffles. First shuffle gather the elements.
|
|
|
|
// The second shuffle, which takes the first shuffle as both of its
|
|
|
|
// vector operands, put the elements into the right order.
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
SDOperand Elt = PermMask.getOperand(i);
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
Locs[i] = std::make_pair(-1, -1);
|
|
|
|
} else {
|
|
|
|
unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
|
|
|
|
if (Val < NumElems) {
|
|
|
|
Locs[i] = std::make_pair(0, NumLo);
|
|
|
|
Mask1[NumLo] = Elt;
|
|
|
|
NumLo++;
|
|
|
|
} else {
|
|
|
|
Locs[i] = std::make_pair(1, NumHi);
|
|
|
|
if (2+NumHi < NumElems)
|
|
|
|
Mask1[2+NumHi] = Elt;
|
|
|
|
NumHi++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (NumLo <= 2 && NumHi <= 2) {
|
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&Mask1[0], Mask1.size()));
|
Implement four-wide shuffle with 2 shufps if no more than two elements come
from each vector. e.g.
shuffle(G1, G2, 7, 1, 5, 2)
==>
movaps _G2, %xmm0
shufps $151, _G1, %xmm0
shufps $216, %xmm0, %xmm0
llvm-svn: 28011
2006-04-28 15:03:38 +08:00
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (Locs[i].first == -1)
|
|
|
|
continue;
|
|
|
|
else {
|
|
|
|
unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
|
|
|
|
Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
|
|
|
|
Mask2[i] = DAG.getConstant(Idx, MaskEVT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&Mask2[0], Mask2.size()));
|
Implement four-wide shuffle with 2 shufps if no more than two elements come
from each vector. e.g.
shuffle(G1, G2, 7, 1, 5, 2)
==>
movaps _G2, %xmm0
shufps $151, _G1, %xmm0
shufps $216, %xmm0, %xmm0
llvm-svn: 28011
2006-04-28 15:03:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Break it into (shuffle shuffle_hi, shuffle_lo).
|
|
|
|
Locs.clear();
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
SmallVector<SDOperand,8> *MaskPtr = &LoMask;
|
2006-04-26 04:13:52 +08:00
|
|
|
unsigned MaskIdx = 0;
|
|
|
|
unsigned LoIdx = 0;
|
|
|
|
unsigned HiIdx = NumElems/2;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (i == NumElems/2) {
|
|
|
|
MaskPtr = &HiMask;
|
|
|
|
MaskIdx = 1;
|
|
|
|
LoIdx = 0;
|
|
|
|
HiIdx = NumElems/2;
|
|
|
|
}
|
|
|
|
SDOperand Elt = PermMask.getOperand(i);
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
Locs[i] = std::make_pair(-1, -1);
|
|
|
|
} else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
|
|
|
|
Locs[i] = std::make_pair(MaskIdx, LoIdx);
|
|
|
|
(*MaskPtr)[LoIdx] = Elt;
|
|
|
|
LoIdx++;
|
|
|
|
} else {
|
|
|
|
Locs[i] = std::make_pair(MaskIdx, HiIdx);
|
|
|
|
(*MaskPtr)[HiIdx] = Elt;
|
|
|
|
HiIdx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-16 14:45:34 +08:00
|
|
|
SDOperand LoShuffle =
|
|
|
|
DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&LoMask[0], LoMask.size()));
|
2006-11-21 08:01:06 +08:00
|
|
|
SDOperand HiShuffle =
|
2006-05-16 14:45:34 +08:00
|
|
|
DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&HiMask[0], HiMask.size()));
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskOps;
|
2006-04-26 04:13:52 +08:00
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (Locs[i].first == -1) {
|
|
|
|
MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
} else {
|
|
|
|
unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
|
|
|
|
MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskOps[0], MaskOps.size()));
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
if (!isa<ConstantSDNode>(Op.getOperand(1)))
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
|
|
|
// TODO: handle v16i8.
|
|
|
|
if (MVT::getSizeInBits(VT) == 16) {
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand Vec = Op.getOperand(0);
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
|
|
|
|
if (Idx == 0)
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, MVT::i16,
|
|
|
|
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec),
|
|
|
|
Op.getOperand(1)));
|
2006-04-26 04:13:52 +08:00
|
|
|
// Transform it so it match pextrw which produces a 32-bit result.
|
|
|
|
MVT::ValueType EVT = (MVT::ValueType)(VT+1);
|
|
|
|
SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
|
|
|
|
Op.getOperand(0), Op.getOperand(1));
|
|
|
|
SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
|
|
|
|
DAG.getValueType(VT));
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
|
|
|
|
} else if (MVT::getSizeInBits(VT) == 32) {
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
|
|
|
|
if (Idx == 0)
|
|
|
|
return Op;
|
|
|
|
// SHUFPS the element to the lowest double word, then movss.
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> IdxVec;
|
2007-10-12 03:40:01 +08:00
|
|
|
IdxVec.
|
|
|
|
push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
|
|
|
|
IdxVec.
|
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
|
|
|
|
IdxVec.
|
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
|
|
|
|
IdxVec.
|
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
|
2006-08-12 01:38:39 +08:00
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&IdxVec[0], IdxVec.size());
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand Vec = Op.getOperand(0);
|
2006-04-26 04:13:52 +08:00
|
|
|
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
llvm-svn: 31519
2006-11-08 06:14:24 +08:00
|
|
|
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
|
2006-06-15 16:14:54 +08:00
|
|
|
DAG.getConstant(0, getPointerTy()));
|
2006-04-26 04:13:52 +08:00
|
|
|
} else if (MVT::getSizeInBits(VT) == 64) {
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
|
|
|
|
if (Idx == 0)
|
|
|
|
return Op;
|
|
|
|
|
|
|
|
// UNPCKHPD the element to the lowest double word, then movsd.
|
|
|
|
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
|
|
|
|
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> IdxVec;
|
2007-06-15 06:58:02 +08:00
|
|
|
IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
|
2007-10-12 03:40:01 +08:00
|
|
|
IdxVec.
|
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
|
2006-08-12 01:38:39 +08:00
|
|
|
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&IdxVec[0], IdxVec.size());
|
2007-12-11 09:46:18 +08:00
|
|
|
SDOperand Vec = Op.getOperand(0);
|
2006-04-26 04:13:52 +08:00
|
|
|
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
|
|
|
|
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
|
|
|
|
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
|
2006-06-15 16:14:54 +08:00
|
|
|
DAG.getConstant(0, getPointerTy()));
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
|
2006-05-16 15:21:53 +08:00
|
|
|
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
|
2006-04-26 04:13:52 +08:00
|
|
|
// as its second argument.
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType BaseVT = MVT::getVectorElementType(VT);
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand N0 = Op.getOperand(0);
|
|
|
|
SDOperand N1 = Op.getOperand(1);
|
|
|
|
SDOperand N2 = Op.getOperand(2);
|
|
|
|
if (MVT::getSizeInBits(BaseVT) == 16) {
|
|
|
|
if (N1.getValueType() != MVT::i32)
|
|
|
|
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
|
|
|
|
if (N2.getValueType() != MVT::i32)
|
2007-06-29 08:01:20 +08:00
|
|
|
N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy());
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
|
|
|
|
} else if (MVT::getSizeInBits(BaseVT) == 32) {
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
|
|
|
|
if (Idx == 0) {
|
|
|
|
// Use a movss.
|
|
|
|
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
|
|
|
|
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MaskVec;
|
2006-04-26 04:13:52 +08:00
|
|
|
MaskVec.push_back(DAG.getConstant(4, BaseVT));
|
|
|
|
for (unsigned i = 1; i <= 3; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, BaseVT));
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
|
2006-08-12 01:38:39 +08:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskVec[0], MaskVec.size()));
|
2006-04-26 04:13:52 +08:00
|
|
|
} else {
|
|
|
|
// Use two pinsrw instructions to insert a 32 bit value.
|
|
|
|
Idx <<= 1;
|
|
|
|
if (MVT::isFloatingPoint(N1.getValueType())) {
|
2007-07-31 14:21:44 +08:00
|
|
|
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
|
|
|
|
N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
|
|
|
|
N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
|
|
|
|
DAG.getConstant(0, getPointerTy()));
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
|
|
|
|
N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
|
2006-06-15 16:14:54 +08:00
|
|
|
DAG.getConstant(Idx, getPointerTy()));
|
2006-04-26 04:13:52 +08:00
|
|
|
N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
|
|
|
|
N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
|
2006-06-15 16:14:54 +08:00
|
|
|
DAG.getConstant(Idx+1, getPointerTy()));
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
|
|
|
|
return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
|
|
|
|
}
|
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
|
2006-04-26 04:13:52 +08:00
|
|
|
// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
|
|
|
|
// one of the above mentioned nodes. It has to be wrapped because otherwise
|
|
|
|
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
|
|
|
|
// be used to form addressing mode. These wrapped nodes will be selected
|
|
|
|
// into MOV32ri.
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
2006-11-30 07:19:46 +08:00
|
|
|
SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(),
|
|
|
|
getPointerTy(),
|
|
|
|
CP->getAlignment());
|
2006-12-05 12:01:03 +08:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
2007-01-13 03:20:47 +08:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
2006-11-30 07:19:46 +08:00
|
|
|
SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
|
2006-12-05 12:01:03 +08:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
2007-01-13 03:20:47 +08:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
2006-12-23 06:29:05 +08:00
|
|
|
|
|
|
|
// For Darwin & Mingw32, external and weak symbols are indirect, so we want to
|
|
|
|
// load the value at address GV, not the value of GV itself. This means that
|
|
|
|
// the GlobalAddress must be in the base or index register of the address, not
|
|
|
|
// the GV offset field. Platform check is inside GVRequiresExtraLoad() call
|
2007-01-13 03:20:47 +08:00
|
|
|
// The same applies for external symbols during PIC codegen
|
2006-12-23 06:29:05 +08:00
|
|
|
if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false))
|
|
|
|
Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2007-04-21 05:38:10 +08:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "general dynamic" model
|
|
|
|
static SDOperand
|
|
|
|
LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
|
|
|
const MVT::ValueType PtrVT) {
|
|
|
|
SDOperand InFlag;
|
|
|
|
SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg,
|
|
|
|
PtrVT), InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// emit leal symbol@TLSGD(,%ebx,1), %eax
|
|
|
|
SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
|
|
|
|
SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
|
|
|
GA->getValueType(0),
|
|
|
|
GA->getOffset());
|
|
|
|
SDOperand Ops[] = { Chain, TGA, InFlag };
|
|
|
|
SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
|
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
Chain = Result.getValue(1);
|
|
|
|
|
|
|
|
// call ___tls_get_addr. This function receives its argument in
|
|
|
|
// the register EAX.
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
SDOperand Ops1[] = { Chain,
|
|
|
|
DAG.getTargetExternalSymbol("___tls_get_addr",
|
|
|
|
PtrVT),
|
|
|
|
DAG.getRegister(X86::EAX, PtrVT),
|
|
|
|
DAG.getRegister(X86::EBX, PtrVT),
|
|
|
|
InFlag };
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
|
|
|
|
// "local exec" model.
|
|
|
|
static SDOperand
|
|
|
|
LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
|
|
|
const MVT::ValueType PtrVT) {
|
|
|
|
// Get the Thread Pointer
|
|
|
|
SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
|
|
|
|
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
|
|
|
|
// exec)
|
|
|
|
SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
|
|
|
GA->getValueType(0),
|
|
|
|
GA->getOffset());
|
|
|
|
SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
|
2007-04-23 06:50:52 +08:00
|
|
|
|
|
|
|
if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
|
|
|
|
Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0);
|
|
|
|
|
2007-04-21 05:38:10 +08:00
|
|
|
// The address of the thread local variable is the add of the thread
|
|
|
|
// pointer with the offset of the variable.
|
|
|
|
return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
// TODO: implement the "local dynamic" model
|
2007-04-22 04:56:26 +08:00
|
|
|
// TODO: implement the "initial exec"model for pic executables
|
|
|
|
assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() &&
|
|
|
|
"TLS not implemented for non-ELF and 64-bit targets");
|
2007-04-21 05:38:10 +08:00
|
|
|
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
|
|
|
|
// If the relocation model is PIC, use the "General Dynamic" TLS Model,
|
|
|
|
// otherwise use the "Local Exec"TLS Model
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
|
|
|
|
return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy());
|
|
|
|
else
|
|
|
|
return LowerToTLSExecModel(GA, DAG, getPointerTy());
|
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
|
2006-11-30 07:19:46 +08:00
|
|
|
SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
|
2006-12-05 12:01:03 +08:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
2007-01-13 03:20:47 +08:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
|
|
|
|
SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
|
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2007-10-17 14:02:13 +08:00
|
|
|
/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
|
|
|
|
/// take a 2 x i32 value to shift plus a shift amount.
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
|
2007-10-17 14:02:13 +08:00
|
|
|
assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
|
|
|
|
"Not an i64 shift!");
|
|
|
|
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
|
|
|
|
SDOperand ShOpLo = Op.getOperand(0);
|
|
|
|
SDOperand ShOpHi = Op.getOperand(1);
|
|
|
|
SDOperand ShAmt = Op.getOperand(2);
|
|
|
|
SDOperand Tmp1 = isSRA ?
|
|
|
|
DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
|
|
|
|
DAG.getConstant(0, MVT::i32);
|
|
|
|
|
|
|
|
SDOperand Tmp2, Tmp3;
|
|
|
|
if (Op.getOpcode() == ISD::SHL_PARTS) {
|
|
|
|
Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
|
|
|
|
Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
|
|
|
|
} else {
|
|
|
|
Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
|
|
|
|
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
|
|
|
|
}
|
|
|
|
|
|
|
|
const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
|
|
|
|
SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
|
|
|
|
DAG.getConstant(32, MVT::i8));
|
|
|
|
SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32,
|
|
|
|
AndNode, DAG.getConstant(0, MVT::i8));
|
|
|
|
|
|
|
|
SDOperand Hi, Lo;
|
|
|
|
SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
|
|
|
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
|
|
|
|
SmallVector<SDOperand, 4> Ops;
|
|
|
|
if (Op.getOpcode() == ISD::SHL_PARTS) {
|
|
|
|
Ops.push_back(Tmp2);
|
|
|
|
Ops.push_back(Tmp3);
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
|
|
|
Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
|
2006-01-10 02:33:28 +08:00
|
|
|
|
2007-10-17 14:02:13 +08:00
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Tmp3);
|
|
|
|
Ops.push_back(Tmp1);
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
|
|
|
Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
|
|
|
|
} else {
|
|
|
|
Ops.push_back(Tmp2);
|
|
|
|
Ops.push_back(Tmp3);
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
|
|
|
Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
|
2006-01-10 02:33:28 +08:00
|
|
|
|
|
|
|
Ops.clear();
|
2007-10-17 14:02:13 +08:00
|
|
|
Ops.push_back(Tmp3);
|
|
|
|
Ops.push_back(Tmp1);
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
|
|
|
Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
|
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Lo);
|
|
|
|
Ops.push_back(Hi);
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
|
|
|
|
Op.getOperand(0).getValueType() >= MVT::i16 &&
|
|
|
|
"Unknown SINT_TO_FP to lower!");
|
2006-01-13 06:54:21 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand Result;
|
|
|
|
MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
|
|
|
|
unsigned Size = MVT::getSizeInBits(SrcVT)/8;
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
|
|
|
|
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2006-10-06 07:01:46 +08:00
|
|
|
SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
|
2006-10-14 05:14:26 +08:00
|
|
|
StackSlot, NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2007-09-15 06:26:36 +08:00
|
|
|
// These are really Legal; caller falls through into that case.
|
2007-09-23 22:52:20 +08:00
|
|
|
if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32)
|
|
|
|
return Result;
|
|
|
|
if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64)
|
2007-09-15 06:26:36 +08:00
|
|
|
return Result;
|
2007-09-20 07:55:34 +08:00
|
|
|
if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
|
|
|
|
Subtarget->is64Bit())
|
|
|
|
return Result;
|
2007-09-15 06:26:36 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
// Build the FILD
|
2007-02-25 15:10:00 +08:00
|
|
|
SDVTList Tys;
|
2007-09-23 22:52:20 +08:00
|
|
|
bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) ||
|
|
|
|
(X86ScalarSSEf64 && Op.getValueType() == MVT::f64);
|
2007-09-15 06:26:36 +08:00
|
|
|
if (useSSE)
|
2007-02-25 15:10:00 +08:00
|
|
|
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
|
|
|
|
else
|
2007-07-03 08:53:03 +08:00
|
|
|
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(StackSlot);
|
|
|
|
Ops.push_back(DAG.getValueType(SrcVT));
|
2007-09-15 06:26:36 +08:00
|
|
|
Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
|
2006-08-08 10:23:42 +08:00
|
|
|
Tys, &Ops[0], Ops.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2007-09-15 06:26:36 +08:00
|
|
|
if (useSSE) {
|
2006-04-26 04:13:52 +08:00
|
|
|
Chain = Result.getValue(1);
|
|
|
|
SDOperand InFlag = Result.getValue(2);
|
|
|
|
|
|
|
|
// FIXME: Currently the FST is flagged to the FILD_FLAG. This
|
|
|
|
// shouldn't be necessary except that RFP cannot be live across
|
|
|
|
// multiple blocks. When stackifier is fixed, they can be uncoupled.
|
2005-11-15 08:40:23 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2006-04-26 04:13:52 +08:00
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
|
2005-11-15 08:40:23 +08:00
|
|
|
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2007-02-25 15:10:00 +08:00
|
|
|
Tys = DAG.getVTList(MVT::Other);
|
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-01-13 06:54:21 +08:00
|
|
|
Ops.push_back(Chain);
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.push_back(Result);
|
2005-11-15 08:40:23 +08:00
|
|
|
Ops.push_back(StackSlot);
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.push_back(DAG.getValueType(Op.getValueType()));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-08 10:23:42 +08:00
|
|
|
Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
|
2006-10-10 04:57:25 +08:00
|
|
|
Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0);
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
std::pair<SDOperand,SDOperand> X86TargetLowering::
|
|
|
|
FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) {
|
2006-04-26 04:13:52 +08:00
|
|
|
assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
|
|
|
|
"Unknown FP_TO_SINT to lower!");
|
|
|
|
|
2007-09-15 06:26:36 +08:00
|
|
|
// These are really Legal.
|
2007-09-23 22:52:20 +08:00
|
|
|
if (Op.getValueType() == MVT::i32 &&
|
|
|
|
X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
|
2007-11-24 15:07:01 +08:00
|
|
|
return std::make_pair(SDOperand(), SDOperand());
|
2007-09-23 22:52:20 +08:00
|
|
|
if (Op.getValueType() == MVT::i32 &&
|
|
|
|
X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
|
2007-11-24 15:07:01 +08:00
|
|
|
return std::make_pair(SDOperand(), SDOperand());
|
2007-09-20 07:55:34 +08:00
|
|
|
if (Subtarget->is64Bit() &&
|
|
|
|
Op.getValueType() == MVT::i64 &&
|
|
|
|
Op.getOperand(0).getValueType() != MVT::f80)
|
2007-11-24 15:07:01 +08:00
|
|
|
return std::make_pair(SDOperand(), SDOperand());
|
2007-09-15 06:26:36 +08:00
|
|
|
|
2007-10-16 04:11:21 +08:00
|
|
|
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
|
|
|
|
// stack slot.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
|
|
|
|
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2006-04-26 04:13:52 +08:00
|
|
|
unsigned Opc;
|
|
|
|
switch (Op.getValueType()) {
|
2007-11-24 15:07:01 +08:00
|
|
|
default: assert(0 && "Invalid FP_TO_SINT to lower!");
|
|
|
|
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
|
|
|
|
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
|
|
|
|
case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
SDOperand Chain = DAG.getEntryNode();
|
|
|
|
SDOperand Value = Op.getOperand(0);
|
2007-09-23 22:52:20 +08:00
|
|
|
if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) ||
|
|
|
|
(X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) {
|
2006-04-26 04:13:52 +08:00
|
|
|
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
|
2006-10-14 05:14:26 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
|
2007-07-03 08:53:03 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
|
2007-02-25 15:10:00 +08:00
|
|
|
SDOperand Ops[] = {
|
|
|
|
Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
|
|
|
|
};
|
|
|
|
Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
|
2006-04-26 04:13:52 +08:00
|
|
|
Chain = Value.getValue(1);
|
|
|
|
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
|
|
|
|
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2005-11-21 05:41:10 +08:00
|
|
|
}
|
2006-01-06 08:43:03 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
// Build the FP_TO_INT*_IN_MEM
|
2007-02-25 15:10:00 +08:00
|
|
|
SDOperand Ops[] = { Chain, Value, StackSlot };
|
|
|
|
SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3);
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
return std::make_pair(FIST, StackSlot);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG);
|
|
|
|
SDOperand FIST = Vals.first, StackSlot = Vals.second;
|
|
|
|
if (FIST.Val == 0) return SDOperand();
|
2007-10-17 14:17:29 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
// Load the result.
|
|
|
|
return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) {
|
|
|
|
std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG);
|
|
|
|
SDOperand FIST = Vals.first, StackSlot = Vals.second;
|
|
|
|
if (FIST.Val == 0) return 0;
|
|
|
|
|
|
|
|
// Return an i64 load from the stack slot.
|
|
|
|
SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0);
|
|
|
|
|
|
|
|
// Use a MERGE_VALUES node to drop the chain result value.
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val;
|
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
2007-07-10 08:05:58 +08:00
|
|
|
MVT::ValueType EltVT = VT;
|
|
|
|
if (MVT::isVector(VT))
|
|
|
|
EltVT = MVT::getVectorElementType(VT);
|
|
|
|
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
|
2006-04-26 04:13:52 +08:00
|
|
|
std::vector<Constant*> CV;
|
2007-07-10 08:05:58 +08:00
|
|
|
if (EltVT == MVT::f64) {
|
2007-09-12 02:32:33 +08:00
|
|
|
Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63))));
|
2007-07-10 08:05:58 +08:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-26 04:13:52 +08:00
|
|
|
} else {
|
2007-09-12 02:32:33 +08:00
|
|
|
Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31))));
|
2007-07-10 08:05:58 +08:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
2007-07-28 01:16:43 +08:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
|
|
|
SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
|
|
|
|
false, 16);
|
2006-04-26 04:13:52 +08:00
|
|
|
return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
2007-07-10 08:05:58 +08:00
|
|
|
MVT::ValueType EltVT = VT;
|
2007-07-20 07:36:01 +08:00
|
|
|
unsigned EltNum = 1;
|
|
|
|
if (MVT::isVector(VT)) {
|
2007-07-10 08:05:58 +08:00
|
|
|
EltVT = MVT::getVectorElementType(VT);
|
2007-07-20 07:36:01 +08:00
|
|
|
EltNum = MVT::getVectorNumElements(VT);
|
|
|
|
}
|
2007-07-10 08:05:58 +08:00
|
|
|
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
|
2006-04-26 04:13:52 +08:00
|
|
|
std::vector<Constant*> CV;
|
2007-07-10 08:05:58 +08:00
|
|
|
if (EltVT == MVT::f64) {
|
2007-09-12 02:32:33 +08:00
|
|
|
Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63)));
|
2007-07-10 08:05:58 +08:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-26 04:13:52 +08:00
|
|
|
} else {
|
2007-09-12 02:32:33 +08:00
|
|
|
Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31)));
|
2007-07-10 08:05:58 +08:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
2007-07-28 01:16:43 +08:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
|
|
|
SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
|
|
|
|
false, 16);
|
2007-07-20 07:36:01 +08:00
|
|
|
if (MVT::isVector(VT)) {
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT,
|
|
|
|
DAG.getNode(ISD::XOR, MVT::v2i64,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)),
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask)));
|
|
|
|
} else {
|
|
|
|
return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
2007-01-05 15:55:56 +08:00
|
|
|
SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
|
2007-01-06 05:37:56 +08:00
|
|
|
SDOperand Op0 = Op.getOperand(0);
|
|
|
|
SDOperand Op1 = Op.getOperand(1);
|
2007-01-05 15:55:56 +08:00
|
|
|
MVT::ValueType VT = Op.getValueType();
|
2007-01-06 05:37:56 +08:00
|
|
|
MVT::ValueType SrcVT = Op1.getValueType();
|
2007-01-05 15:55:56 +08:00
|
|
|
const Type *SrcTy = MVT::getTypeForValueType(SrcVT);
|
2007-01-06 05:37:56 +08:00
|
|
|
|
|
|
|
// If second operand is smaller, extend it first.
|
|
|
|
if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
|
|
|
|
Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
|
|
|
|
SrcVT = VT;
|
2007-09-07 02:13:44 +08:00
|
|
|
SrcTy = MVT::getTypeForValueType(SrcVT);
|
2007-01-06 05:37:56 +08:00
|
|
|
}
|
2007-10-21 09:07:44 +08:00
|
|
|
// And if it is bigger, shrink it first.
|
|
|
|
if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
|
|
|
|
Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1);
|
|
|
|
SrcVT = VT;
|
|
|
|
SrcTy = MVT::getTypeForValueType(SrcVT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point the operands and the result should have the same
|
|
|
|
// type, and that won't be f80 since that is not custom lowered.
|
2007-01-06 05:37:56 +08:00
|
|
|
|
2007-01-05 15:55:56 +08:00
|
|
|
// First get the sign bit of second operand.
|
|
|
|
std::vector<Constant*> CV;
|
|
|
|
if (SrcVT == MVT::f64) {
|
2007-09-12 02:32:33 +08:00
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
|
2007-01-05 15:55:56 +08:00
|
|
|
} else {
|
2007-09-12 02:32:33 +08:00
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
2007-01-05 15:55:56 +08:00
|
|
|
}
|
2007-07-28 01:16:43 +08:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
|
|
|
SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0,
|
|
|
|
false, 16);
|
2007-01-06 05:37:56 +08:00
|
|
|
SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);
|
2007-01-05 15:55:56 +08:00
|
|
|
|
|
|
|
// Shift sign bit right or left if the two operands have different types.
|
|
|
|
if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
|
|
|
|
// Op0 is MVT::f32, Op1 is MVT::f64.
|
|
|
|
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
|
|
|
|
SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
|
|
|
|
DAG.getConstant(32, MVT::i32));
|
|
|
|
SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
|
|
|
|
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
|
|
|
|
DAG.getConstant(0, getPointerTy()));
|
|
|
|
}
|
|
|
|
|
2007-01-06 05:37:56 +08:00
|
|
|
// Clear first operand sign bit.
|
|
|
|
CV.clear();
|
|
|
|
if (VT == MVT::f64) {
|
2007-09-12 02:32:33 +08:00
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63)))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
|
2007-01-06 05:37:56 +08:00
|
|
|
} else {
|
2007-09-12 02:32:33 +08:00
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31)))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
|
2007-01-06 05:37:56 +08:00
|
|
|
}
|
2007-07-28 01:16:43 +08:00
|
|
|
C = ConstantVector::get(CV);
|
|
|
|
CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
|
|
|
|
false, 16);
|
2007-01-06 05:37:56 +08:00
|
|
|
SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2);
|
|
|
|
|
|
|
|
// Or the value with the sign bit.
|
|
|
|
return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
|
2007-01-05 15:55:56 +08:00
|
|
|
}
|
|
|
|
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
|
2007-09-25 09:57:46 +08:00
|
|
|
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
|
2007-09-26 08:45:55 +08:00
|
|
|
SDOperand Cond;
|
2007-09-25 09:57:46 +08:00
|
|
|
SDOperand Op0 = Op.getOperand(0);
|
|
|
|
SDOperand Op1 = Op.getOperand(1);
|
|
|
|
SDOperand CC = Op.getOperand(2);
|
|
|
|
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
|
|
|
|
bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
|
|
|
|
unsigned X86CC;
|
|
|
|
|
|
|
|
if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
|
2007-09-26 08:45:55 +08:00
|
|
|
Op0, Op1, DAG)) {
|
2007-09-29 08:00:36 +08:00
|
|
|
Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
|
|
|
|
return DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 09:57:46 +08:00
|
|
|
DAG.getConstant(X86CC, MVT::i8), Cond);
|
2007-09-26 08:45:55 +08:00
|
|
|
}
|
2007-09-25 09:57:46 +08:00
|
|
|
|
|
|
|
assert(isFP && "Illegal integer SetCC!");
|
|
|
|
|
2007-09-29 08:00:36 +08:00
|
|
|
Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
|
2007-09-25 09:57:46 +08:00
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: assert(false && "Illegal floating point SetCC!");
|
|
|
|
case ISD::SETOEQ: { // !PF & ZF
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 09:57:46 +08:00
|
|
|
DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 09:57:46 +08:00
|
|
|
DAG.getConstant(X86::COND_E, MVT::i8), Cond);
|
|
|
|
return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
|
|
|
|
}
|
|
|
|
case ISD::SETUNE: { // PF | !ZF
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 09:57:46 +08:00
|
|
|
DAG.getConstant(X86::COND_P, MVT::i8), Cond);
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 09:57:46 +08:00
|
|
|
DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
|
|
|
|
return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
|
2006-09-11 10:19:56 +08:00
|
|
|
bool addTest = true;
|
|
|
|
SDOperand Cond = Op.getOperand(0);
|
|
|
|
SDOperand CC;
|
|
|
|
|
|
|
|
if (Cond.getOpcode() == ISD::SETCC)
|
2007-09-29 08:00:36 +08:00
|
|
|
Cond = LowerSETCC(Cond, DAG);
|
2006-09-11 10:19:56 +08:00
|
|
|
|
2007-10-09 06:16:29 +08:00
|
|
|
// If condition flag is set by a X86ISD::CMP, then use it as the condition
|
|
|
|
// setting operand in place of the X86ISD::SETCC.
|
2006-09-11 10:19:56 +08:00
|
|
|
if (Cond.getOpcode() == X86ISD::SETCC) {
|
|
|
|
CC = Cond.getOperand(0);
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2006-09-11 10:19:56 +08:00
|
|
|
SDOperand Cmp = Cond.getOperand(1);
|
|
|
|
unsigned Opc = Cmp.getOpcode();
|
2007-10-09 06:16:29 +08:00
|
|
|
MVT::ValueType VT = Op.getValueType();
|
|
|
|
bool IllegalFPCMov = false;
|
|
|
|
if (VT == MVT::f32 && !X86ScalarSSEf32)
|
|
|
|
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
|
|
|
|
else if (VT == MVT::f64 && !X86ScalarSSEf64)
|
|
|
|
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
|
2007-10-17 02:09:08 +08:00
|
|
|
else if (VT == MVT::f80)
|
|
|
|
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
|
2007-09-29 08:00:36 +08:00
|
|
|
if ((Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
|
2007-10-09 06:16:29 +08:00
|
|
|
Cond = Cmp;
|
2007-09-25 09:57:46 +08:00
|
|
|
addTest = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addTest) {
|
|
|
|
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
2007-10-09 06:16:29 +08:00
|
|
|
Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
|
2007-09-25 09:57:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
|
|
|
|
MVT::Flag);
|
|
|
|
SmallVector<SDOperand, 4> Ops;
|
|
|
|
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
|
|
|
|
// condition is true.
|
|
|
|
Ops.push_back(Op.getOperand(2));
|
|
|
|
Ops.push_back(Op.getOperand(1));
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
2007-09-29 08:00:36 +08:00
|
|
|
return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
|
2007-09-25 09:57:46 +08:00
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
|
2006-09-11 10:19:56 +08:00
|
|
|
bool addTest = true;
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand Cond = Op.getOperand(1);
|
|
|
|
SDOperand Dest = Op.getOperand(2);
|
|
|
|
SDOperand CC;
|
2006-09-11 10:19:56 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
if (Cond.getOpcode() == ISD::SETCC)
|
2007-09-29 08:00:36 +08:00
|
|
|
Cond = LowerSETCC(Cond, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
|
2007-10-09 06:16:29 +08:00
|
|
|
// If condition flag is set by a X86ISD::CMP, then use it as the condition
|
|
|
|
// setting operand in place of the X86ISD::SETCC.
|
2006-04-26 04:13:52 +08:00
|
|
|
if (Cond.getOpcode() == X86ISD::SETCC) {
|
2006-09-11 10:19:56 +08:00
|
|
|
CC = Cond.getOperand(0);
|
|
|
|
|
|
|
|
SDOperand Cmp = Cond.getOperand(1);
|
|
|
|
unsigned Opc = Cmp.getOpcode();
|
2007-09-29 08:00:36 +08:00
|
|
|
if (Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI) {
|
2007-10-09 06:16:29 +08:00
|
|
|
Cond = Cmp;
|
2007-09-25 09:57:46 +08:00
|
|
|
addTest = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addTest) {
|
|
|
|
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
2007-09-29 08:00:36 +08:00
|
|
|
Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
|
2007-09-25 09:57:46 +08:00
|
|
|
}
|
2007-09-29 08:00:36 +08:00
|
|
|
return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
|
2007-09-25 09:57:46 +08:00
|
|
|
Chain, Op.getOperand(2), CC, Cond);
|
|
|
|
}
|
|
|
|
|
2006-05-25 08:59:30 +08:00
|
|
|
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
|
2007-10-12 03:40:01 +08:00
|
|
|
unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
|
|
|
|
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
|
|
|
|
|
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
if(CallingConv==CallingConv::Fast && isTailCall && PerformTailCallOpt)
|
|
|
|
return LowerX86_TailCallTo(Op, DAG, CallingConv);
|
|
|
|
else
|
|
|
|
return LowerX86_64CCCCallTo(Op, DAG, CallingConv);
|
2006-05-25 08:59:30 +08:00
|
|
|
else
|
2006-09-21 06:03:51 +08:00
|
|
|
switch (CallingConv) {
|
2006-09-28 02:29:38 +08:00
|
|
|
default:
|
2006-11-21 08:01:06 +08:00
|
|
|
assert(0 && "Unsupported calling convention");
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::Fast:
|
2007-10-12 03:40:01 +08:00
|
|
|
if (isTailCall && PerformTailCallOpt)
|
|
|
|
return LowerX86_TailCallTo(Op, DAG, CallingConv);
|
|
|
|
else
|
|
|
|
return LowerCCCCallTo(Op,DAG, CallingConv);
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::C:
|
2006-11-21 08:01:06 +08:00
|
|
|
case CallingConv::X86_StdCall:
|
2007-02-25 17:06:15 +08:00
|
|
|
return LowerCCCCallTo(Op, DAG, CallingConv);
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::X86_FastCall:
|
2007-02-25 17:06:15 +08:00
|
|
|
return LowerFastCCCallTo(Op, DAG, CallingConv);
|
2006-09-21 06:03:51 +08:00
|
|
|
}
|
2006-05-25 08:59:30 +08:00
|
|
|
}
|
|
|
|
|
2007-04-18 03:34:00 +08:00
|
|
|
|
|
|
|
// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
|
|
|
|
// Calls to _alloca is needed to probe the stack when allocating more than 4k
|
|
|
|
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
|
|
|
|
// that the guard pages used by the OS virtual memory manager are allocated in
|
|
|
|
// correct sequence.
|
2007-07-06 04:36:08 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
|
|
|
|
SelectionDAG &DAG) {
|
2007-04-18 03:34:00 +08:00
|
|
|
assert(Subtarget->isTargetCygMing() &&
|
|
|
|
"This should be used only on Cygwin/Mingw targets");
|
|
|
|
|
2007-04-17 17:20:00 +08:00
|
|
|
// Get the inputs.
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
SDOperand Size = Op.getOperand(1);
|
|
|
|
// FIXME: Ensure alignment here
|
|
|
|
|
2007-07-06 04:36:08 +08:00
|
|
|
SDOperand Flag;
|
|
|
|
|
2007-04-17 17:20:00 +08:00
|
|
|
MVT::ValueType IntPtr = getPointerTy();
|
|
|
|
MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
|
2007-07-06 04:36:08 +08:00
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
SDOperand Ops[] = { Chain,
|
|
|
|
DAG.getTargetExternalSymbol("_alloca", IntPtr),
|
|
|
|
DAG.getRegister(X86::EAX, IntPtr),
|
|
|
|
Flag };
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
|
|
|
|
Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
|
2007-04-17 17:20:00 +08:00
|
|
|
|
|
|
|
std::vector<MVT::ValueType> Tys;
|
|
|
|
Tys.push_back(SPTy);
|
|
|
|
Tys.push_back(MVT::Other);
|
2007-07-06 04:36:08 +08:00
|
|
|
SDOperand Ops1[2] = { Chain.getValue(0), Chain };
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
|
2007-04-17 17:20:00 +08:00
|
|
|
}
|
|
|
|
|
2006-04-26 09:20:17 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
|
2006-06-07 07:30:24 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const Function* Fn = MF.getFunction();
|
|
|
|
if (Fn->hasExternalLinkage() &&
|
2007-01-03 19:43:14 +08:00
|
|
|
Subtarget->isTargetCygMing() &&
|
2006-06-09 14:24:42 +08:00
|
|
|
Fn->getName() == "main")
|
2007-04-18 01:21:52 +08:00
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true);
|
2006-06-07 07:30:24 +08:00
|
|
|
|
2006-05-24 05:06:34 +08:00
|
|
|
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
return LowerX86_64CCCArguments(Op, DAG);
|
2006-05-24 05:06:34 +08:00
|
|
|
else
|
2006-09-21 06:03:51 +08:00
|
|
|
switch(CC) {
|
2006-09-28 02:29:38 +08:00
|
|
|
default:
|
|
|
|
assert(0 && "Unsupported calling convention");
|
|
|
|
case CallingConv::Fast:
|
2007-10-12 03:40:01 +08:00
|
|
|
return LowerCCCArguments(Op,DAG, true);
|
2006-09-21 06:03:51 +08:00
|
|
|
// Falls through
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::C:
|
2006-09-21 06:03:51 +08:00
|
|
|
return LowerCCCArguments(Op, DAG);
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::X86_StdCall:
|
2007-04-18 01:21:52 +08:00
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall);
|
2007-01-28 21:31:35 +08:00
|
|
|
return LowerCCCArguments(Op, DAG, true);
|
2006-09-28 02:29:38 +08:00
|
|
|
case CallingConv::X86_FastCall:
|
2007-04-18 01:21:52 +08:00
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall);
|
2007-02-28 14:05:16 +08:00
|
|
|
return LowerFastCCArguments(Op, DAG);
|
2006-09-21 06:03:51 +08:00
|
|
|
}
|
2006-04-26 09:20:17 +08:00
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
SDOperand InFlag(0, 0);
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
unsigned Align =
|
|
|
|
(unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
|
|
|
|
if (Align == 0) Align = 1;
|
|
|
|
|
|
|
|
ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
|
2007-08-27 18:18:20 +08:00
|
|
|
// If not DWORD aligned or size is more than the threshold, call memset.
|
2007-08-28 01:48:26 +08:00
|
|
|
// The libc version is likely to be faster for these cases. It can use the
|
|
|
|
// address value and run time information about the CPU.
|
2006-04-26 04:13:52 +08:00
|
|
|
if ((Align & 3) != 0 ||
|
2007-10-31 19:52:06 +08:00
|
|
|
(I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) {
|
2006-04-26 04:13:52 +08:00
|
|
|
MVT::ValueType IntPtr = getPointerTy();
|
2006-05-03 09:29:57 +08:00
|
|
|
const Type *IntPtrTy = getTargetData()->getIntPtrType();
|
2006-12-31 13:55:36 +08:00
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
|
|
|
Entry.Node = Op.getOperand(1);
|
|
|
|
Entry.Ty = IntPtrTy;
|
|
|
|
Args.push_back(Entry);
|
2007-01-04 01:24:59 +08:00
|
|
|
// Extend the unsigned i8 argument to be an int value for the call.
|
2006-12-31 13:55:36 +08:00
|
|
|
Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
|
|
|
|
Entry.Ty = IntPtrTy;
|
|
|
|
Args.push_back(Entry);
|
|
|
|
Entry.Node = Op.getOperand(3);
|
|
|
|
Args.push_back(Entry);
|
2006-04-26 04:13:52 +08:00
|
|
|
std::pair<SDOperand,SDOperand> CallResult =
|
2006-12-31 13:55:36 +08:00
|
|
|
LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false,
|
2006-04-26 04:13:52 +08:00
|
|
|
DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
|
|
|
|
return CallResult.second;
|
|
|
|
}
|
|
|
|
|
|
|
|
MVT::ValueType AVT;
|
|
|
|
SDOperand Count;
|
|
|
|
ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
|
|
|
|
unsigned BytesLeft = 0;
|
|
|
|
bool TwoRepStos = false;
|
|
|
|
if (ValC) {
|
|
|
|
unsigned ValReg;
|
2006-09-08 14:48:29 +08:00
|
|
|
uint64_t Val = ValC->getValue() & 255;
|
2006-04-26 04:13:52 +08:00
|
|
|
|
|
|
|
// If the value is a constant, then we can potentially use larger sets.
|
|
|
|
switch (Align & 3) {
|
|
|
|
case 2: // WORD aligned
|
|
|
|
AVT = MVT::i16;
|
|
|
|
ValReg = X86::AX;
|
2006-09-08 14:48:29 +08:00
|
|
|
Val = (Val << 8) | Val;
|
2006-04-26 04:13:52 +08:00
|
|
|
break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case 0: // DWORD aligned
|
2006-04-26 04:13:52 +08:00
|
|
|
AVT = MVT::i32;
|
2006-09-08 14:48:29 +08:00
|
|
|
ValReg = X86::EAX;
|
2006-04-26 04:13:52 +08:00
|
|
|
Val = (Val << 8) | Val;
|
|
|
|
Val = (Val << 16) | Val;
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned
|
|
|
|
AVT = MVT::i64;
|
|
|
|
ValReg = X86::RAX;
|
|
|
|
Val = (Val << 32) | Val;
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
break;
|
|
|
|
default: // Byte aligned
|
|
|
|
AVT = MVT::i8;
|
|
|
|
ValReg = X86::AL;
|
2006-09-08 14:48:29 +08:00
|
|
|
Count = Op.getOperand(3);
|
2006-04-26 04:13:52 +08:00
|
|
|
break;
|
2006-04-20 06:48:17 +08:00
|
|
|
}
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (AVT > MVT::i8) {
|
|
|
|
if (I) {
|
|
|
|
unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
|
|
|
|
Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
|
|
|
|
BytesLeft = I->getValue() % UBytes;
|
|
|
|
} else {
|
|
|
|
assert(AVT >= MVT::i32 &&
|
|
|
|
"Do not use rep;stos if not at least DWORD aligned");
|
|
|
|
Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
|
|
|
|
Op.getOperand(3), DAG.getConstant(2, MVT::i8));
|
|
|
|
TwoRepStos = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
} else {
|
|
|
|
AVT = MVT::i8;
|
|
|
|
Count = Op.getOperand(3);
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
2006-03-22 10:53:00 +08:00
|
|
|
}
|
2006-03-27 15:00:16 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
|
|
|
|
Count, InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
|
|
|
|
Op.getOperand(1), InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-04-25 06:58:52 +08:00
|
|
|
|
2007-02-25 14:40:16 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(AVT));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-11 15:35:45 +08:00
|
|
|
Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
if (TwoRepStos) {
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
Count = Op.getOperand(3);
|
|
|
|
MVT::ValueType CVT = Count.getValueType();
|
|
|
|
SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
|
2006-09-08 14:48:29 +08:00
|
|
|
DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
|
|
|
|
Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
|
|
|
|
Left, InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2007-02-25 14:40:16 +08:00
|
|
|
Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(MVT::i8));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-11 15:35:45 +08:00
|
|
|
Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
} else if (BytesLeft) {
|
2006-09-08 14:48:29 +08:00
|
|
|
// Issue stores for the last 1 - 7 bytes.
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand Value;
|
|
|
|
unsigned Val = ValC->getValue() & 255;
|
|
|
|
unsigned Offset = I->getValue() - BytesLeft;
|
|
|
|
SDOperand DstAddr = Op.getOperand(1);
|
|
|
|
MVT::ValueType AddrVT = DstAddr.getValueType();
|
2006-09-08 14:48:29 +08:00
|
|
|
if (BytesLeft >= 4) {
|
|
|
|
Val = (Val << 8) | Val;
|
|
|
|
Val = (Val << 16) | Val;
|
|
|
|
Value = DAG.getConstant(Val, MVT::i32);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, AddrVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, AddrVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-09-08 14:48:29 +08:00
|
|
|
BytesLeft -= 4;
|
|
|
|
Offset += 4;
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
if (BytesLeft >= 2) {
|
|
|
|
Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, AddrVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, AddrVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
BytesLeft -= 2;
|
|
|
|
Offset += 2;
|
|
|
|
}
|
|
|
|
if (BytesLeft == 1) {
|
|
|
|
Value = DAG.getConstant(Val, MVT::i8);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, AddrVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, AddrVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Chain;
|
|
|
|
}
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
|
2007-09-28 20:53:01 +08:00
|
|
|
SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain,
|
|
|
|
SDOperand Dest,
|
|
|
|
SDOperand Source,
|
|
|
|
unsigned Size,
|
|
|
|
unsigned Align,
|
|
|
|
SelectionDAG &DAG) {
|
2006-04-26 04:13:52 +08:00
|
|
|
MVT::ValueType AVT;
|
|
|
|
unsigned BytesLeft = 0;
|
|
|
|
switch (Align & 3) {
|
|
|
|
case 2: // WORD aligned
|
|
|
|
AVT = MVT::i16;
|
|
|
|
break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case 0: // DWORD aligned
|
2006-04-26 04:13:52 +08:00
|
|
|
AVT = MVT::i32;
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned
|
|
|
|
AVT = MVT::i64;
|
2006-04-26 04:13:52 +08:00
|
|
|
break;
|
|
|
|
default: // Byte aligned
|
|
|
|
AVT = MVT::i8;
|
|
|
|
break;
|
|
|
|
}
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
llvm-svn: 27939
2006-04-22 07:03:30 +08:00
|
|
|
|
2007-09-28 20:53:01 +08:00
|
|
|
unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
|
|
|
|
SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy());
|
|
|
|
BytesLeft = Size % UBytes;
|
2006-09-08 14:48:29 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand InFlag(0, 0);
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
|
|
|
|
Count, InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
|
2007-09-28 20:53:01 +08:00
|
|
|
Dest, InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
|
2007-09-28 20:53:01 +08:00
|
|
|
Source, InFlag);
|
2006-04-26 04:13:52 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
llvm-svn: 27923
2006-04-21 09:05:10 +08:00
|
|
|
|
2007-02-25 14:40:16 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-26 04:13:52 +08:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(AVT));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-11 15:35:45 +08:00
|
|
|
Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
|
2006-03-24 15:29:27 +08:00
|
|
|
|
2007-09-28 20:53:01 +08:00
|
|
|
if (BytesLeft) {
|
2006-09-08 14:48:29 +08:00
|
|
|
// Issue loads and stores for the last 1 - 7 bytes.
|
2007-09-28 20:53:01 +08:00
|
|
|
unsigned Offset = Size - BytesLeft;
|
|
|
|
SDOperand DstAddr = Dest;
|
2006-04-26 04:13:52 +08:00
|
|
|
MVT::ValueType DstVT = DstAddr.getValueType();
|
2007-09-28 20:53:01 +08:00
|
|
|
SDOperand SrcAddr = Source;
|
2006-04-26 04:13:52 +08:00
|
|
|
MVT::ValueType SrcVT = SrcAddr.getValueType();
|
|
|
|
SDOperand Value;
|
2006-09-08 14:48:29 +08:00
|
|
|
if (BytesLeft >= 4) {
|
|
|
|
Value = DAG.getLoad(MVT::i32, Chain,
|
|
|
|
DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
|
|
|
|
DAG.getConstant(Offset, SrcVT)),
|
2006-10-10 04:57:25 +08:00
|
|
|
NULL, 0);
|
2006-09-08 14:48:29 +08:00
|
|
|
Chain = Value.getValue(1);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, DstVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, DstVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-09-08 14:48:29 +08:00
|
|
|
BytesLeft -= 4;
|
|
|
|
Offset += 4;
|
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
if (BytesLeft >= 2) {
|
|
|
|
Value = DAG.getLoad(MVT::i16, Chain,
|
|
|
|
DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
|
|
|
|
DAG.getConstant(Offset, SrcVT)),
|
2006-10-10 04:57:25 +08:00
|
|
|
NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
Chain = Value.getValue(1);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, DstVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, DstVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
BytesLeft -= 2;
|
|
|
|
Offset += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BytesLeft == 1) {
|
|
|
|
Value = DAG.getLoad(MVT::i8, Chain,
|
|
|
|
DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
|
|
|
|
DAG.getConstant(Offset, SrcVT)),
|
2006-10-10 04:57:25 +08:00
|
|
|
NULL, 0);
|
2006-04-26 04:13:52 +08:00
|
|
|
Chain = Value.getValue(1);
|
2006-10-06 07:01:46 +08:00
|
|
|
Chain = DAG.getStore(Chain, Value,
|
|
|
|
DAG.getNode(ISD::ADD, DstVT, DstAddr,
|
|
|
|
DAG.getConstant(Offset, DstVT)),
|
2006-10-14 05:14:26 +08:00
|
|
|
NULL, 0);
|
2006-03-25 17:37:23 +08:00
|
|
|
}
|
2006-03-24 15:29:27 +08:00
|
|
|
}
|
2006-04-01 03:22:53 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
return Chain;
|
|
|
|
}
|
2006-04-01 03:22:53 +08:00
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain
|
|
|
|
SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){
|
2007-02-25 14:40:16 +08:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2007-11-24 15:07:01 +08:00
|
|
|
SDOperand TheChain = N->getOperand(0);
|
|
|
|
SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1);
|
2006-11-29 16:28:13 +08:00
|
|
|
if (Subtarget->is64Bit()) {
|
2007-11-24 15:07:01 +08:00
|
|
|
SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
|
|
|
|
SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX,
|
|
|
|
MVT::i64, rax.getValue(2));
|
|
|
|
SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx,
|
2006-11-29 16:28:13 +08:00
|
|
|
DAG.getConstant(32, MVT::i8));
|
2007-02-25 15:10:00 +08:00
|
|
|
SDOperand Ops[] = {
|
2007-11-24 15:07:01 +08:00
|
|
|
DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1)
|
2007-02-25 15:10:00 +08:00
|
|
|
};
|
2007-02-25 14:40:16 +08:00
|
|
|
|
|
|
|
Tys = DAG.getVTList(MVT::i64, MVT::Other);
|
2007-11-24 15:07:01 +08:00
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
|
2007-02-25 15:10:00 +08:00
|
|
|
}
|
|
|
|
|
2007-11-24 15:07:01 +08:00
|
|
|
SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
|
|
|
|
SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX,
|
|
|
|
MVT::i32, eax.getValue(2));
|
|
|
|
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
|
|
|
|
SDOperand Ops[] = { eax, edx };
|
|
|
|
Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2);
|
|
|
|
|
|
|
|
// Use a MERGE_VALUES to return the value and chain.
|
|
|
|
Ops[1] = edx.getValue(1);
|
|
|
|
Tys = DAG.getVTList(MVT::i64, MVT::Other);
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
|
2006-10-14 05:14:26 +08:00
|
|
|
SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (!Subtarget->is64Bit()) {
|
|
|
|
// vastart just stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument.
|
|
|
|
SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2006-10-14 05:14:26 +08:00
|
|
|
return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(),
|
|
|
|
SV->getOffset());
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// __va_list_tag:
|
|
|
|
// gp_offset (0 - 6 * 8)
|
|
|
|
// fp_offset (48 - 48 + 8 * 16)
|
|
|
|
// overflow_arg_area (point to parameters coming in memory).
|
|
|
|
// reg_save_area
|
2007-02-25 15:10:00 +08:00
|
|
|
SmallVector<SDOperand, 8> MemOps;
|
2006-09-08 14:48:29 +08:00
|
|
|
SDOperand FIN = Op.getOperand(1);
|
|
|
|
// Store gp_offset
|
2006-10-06 07:01:46 +08:00
|
|
|
SDOperand Store = DAG.getStore(Op.getOperand(0),
|
|
|
|
DAG.getConstant(VarArgsGPOffset, MVT::i32),
|
2006-10-14 05:14:26 +08:00
|
|
|
FIN, SV->getValue(), SV->getOffset());
|
2006-09-08 14:48:29 +08:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store fp_offset
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
|
|
|
DAG.getConstant(4, getPointerTy()));
|
2006-10-06 07:01:46 +08:00
|
|
|
Store = DAG.getStore(Op.getOperand(0),
|
|
|
|
DAG.getConstant(VarArgsFPOffset, MVT::i32),
|
2006-10-14 05:14:26 +08:00
|
|
|
FIN, SV->getValue(), SV->getOffset());
|
2006-09-08 14:48:29 +08:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store ptr to overflow_arg_area
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
|
|
|
DAG.getConstant(4, getPointerTy()));
|
|
|
|
SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2006-10-14 05:14:26 +08:00
|
|
|
Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(),
|
|
|
|
SV->getOffset());
|
2006-09-08 14:48:29 +08:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store ptr to reg_save_area.
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
|
|
|
DAG.getConstant(8, getPointerTy()));
|
|
|
|
SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
|
2006-10-14 05:14:26 +08:00
|
|
|
Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(),
|
|
|
|
SV->getOffset());
|
2006-09-08 14:48:29 +08:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
2007-03-03 07:16:35 +08:00
|
|
|
SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
SDOperand DstPtr = Op.getOperand(1);
|
|
|
|
SDOperand SrcPtr = Op.getOperand(2);
|
|
|
|
SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3));
|
|
|
|
SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4));
|
|
|
|
|
|
|
|
SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr,
|
|
|
|
SrcSV->getValue(), SrcSV->getOffset());
|
|
|
|
Chain = SrcPtr.getValue(1);
|
|
|
|
for (unsigned i = 0; i < 3; ++i) {
|
|
|
|
SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr,
|
|
|
|
SrcSV->getValue(), SrcSV->getOffset());
|
|
|
|
Chain = Val.getValue(1);
|
|
|
|
Chain = DAG.getStore(Chain, Val, DstPtr,
|
|
|
|
DstSV->getValue(), DstSV->getOffset());
|
|
|
|
if (i == 2)
|
|
|
|
break;
|
|
|
|
SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr,
|
|
|
|
DAG.getConstant(8, getPointerTy()));
|
|
|
|
DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr,
|
|
|
|
DAG.getConstant(8, getPointerTy()));
|
|
|
|
}
|
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
SDOperand
|
|
|
|
X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
default: return SDOperand(); // Don't custom lower most intrinsics.
|
2006-04-06 07:38:46 +08:00
|
|
|
// Comparison intrinsics.
|
2006-04-26 04:13:52 +08:00
|
|
|
case Intrinsic::x86_sse_comieq_ss:
|
|
|
|
case Intrinsic::x86_sse_comilt_ss:
|
|
|
|
case Intrinsic::x86_sse_comile_ss:
|
|
|
|
case Intrinsic::x86_sse_comigt_ss:
|
|
|
|
case Intrinsic::x86_sse_comige_ss:
|
|
|
|
case Intrinsic::x86_sse_comineq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomieq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomilt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomile_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomigt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomige_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomineq_ss:
|
|
|
|
case Intrinsic::x86_sse2_comieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_comilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comile_sd:
|
|
|
|
case Intrinsic::x86_sse2_comigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comige_sd:
|
|
|
|
case Intrinsic::x86_sse2_comineq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomile_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomige_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomineq_sd: {
|
|
|
|
unsigned Opc = 0;
|
|
|
|
ISD::CondCode CC = ISD::SETCC_INVALID;
|
|
|
|
switch (IntNo) {
|
|
|
|
default: break;
|
2006-11-21 08:01:06 +08:00
|
|
|
case Intrinsic::x86_sse_comieq_ss:
|
|
|
|
case Intrinsic::x86_sse2_comieq_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETEQ;
|
|
|
|
break;
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse_comilt_ss:
|
|
|
|
case Intrinsic::x86_sse2_comilt_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comile_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_comile_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETLE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comigt_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_comigt_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETGT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comige_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_comige_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comineq_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_comineq_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETNE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomieq_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_ucomieq_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETEQ;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomilt_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_ucomilt_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomile_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_ucomile_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETLE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomigt_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_ucomigt_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETGT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomige_ss:
|
2006-04-06 07:38:46 +08:00
|
|
|
case Intrinsic::x86_sse2_ucomige_sd:
|
2006-04-26 04:13:52 +08:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomineq_ss:
|
|
|
|
case Intrinsic::x86_sse2_ucomineq_sd:
|
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETNE;
|
|
|
|
break;
|
2006-04-06 07:38:46 +08:00
|
|
|
}
|
2006-09-11 10:19:56 +08:00
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
unsigned X86CC;
|
2006-09-13 11:22:10 +08:00
|
|
|
SDOperand LHS = Op.getOperand(1);
|
|
|
|
SDOperand RHS = Op.getOperand(2);
|
|
|
|
translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
|
2006-09-11 10:19:56 +08:00
|
|
|
|
2007-09-29 08:00:36 +08:00
|
|
|
SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
|
|
|
|
SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
|
|
|
DAG.getConstant(X86CC, MVT::i8), Cond);
|
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
2006-04-06 07:38:46 +08:00
|
|
|
}
|
2006-04-26 04:13:52 +08:00
|
|
|
}
|
|
|
|
|
2007-01-30 06:58:52 +08:00
|
|
|
SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
// Depths > 0 not supported yet!
|
|
|
|
if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
// Just load the return address
|
|
|
|
SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
|
|
|
|
return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
// Depths > 0 not supported yet!
|
|
|
|
if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
|
|
|
|
return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
|
|
|
|
DAG.getConstant(4, getPointerTy()));
|
|
|
|
}
|
|
|
|
|
2007-07-14 22:06:15 +08:00
|
|
|
SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
// Is not yet supported on x86-64
|
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
return DAG.getConstant(8, getPointerTy());
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG)
|
|
|
|
{
|
|
|
|
assert(!Subtarget->is64Bit() &&
|
|
|
|
"Lowering of eh_return builtin is not supported yet on x86-64");
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
SDOperand Chain = Op.getOperand(0);
|
|
|
|
SDOperand Offset = Op.getOperand(1);
|
|
|
|
SDOperand Handler = Op.getOperand(2);
|
|
|
|
|
|
|
|
SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF),
|
|
|
|
getPointerTy());
|
|
|
|
|
|
|
|
SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
|
|
|
|
DAG.getConstant(-4UL, getPointerTy()));
|
|
|
|
StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
|
|
|
|
Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr);
|
|
|
|
MF.addLiveOut(X86::ECX);
|
|
|
|
|
|
|
|
return DAG.getNode(X86ISD::EH_RETURN, MVT::Other,
|
|
|
|
Chain, DAG.getRegister(X86::ECX, getPointerTy()));
|
|
|
|
}
|
|
|
|
|
2007-07-28 04:02:49 +08:00
|
|
|
SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
SDOperand Root = Op.getOperand(0);
|
|
|
|
SDOperand Trmp = Op.getOperand(1); // trampoline
|
|
|
|
SDOperand FPtr = Op.getOperand(2); // nested function
|
|
|
|
SDOperand Nest = Op.getOperand(3); // 'nest' parameter value
|
|
|
|
|
|
|
|
SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4));
|
|
|
|
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
return SDOperand(); // not yet supported
|
|
|
|
} else {
|
|
|
|
Function *Func = (Function *)
|
|
|
|
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
|
|
|
|
unsigned CC = Func->getCallingConv();
|
2007-08-30 03:01:20 +08:00
|
|
|
unsigned NestReg;
|
2007-07-28 04:02:49 +08:00
|
|
|
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
assert(0 && "Unsupported calling convention");
|
|
|
|
case CallingConv::C:
|
|
|
|
case CallingConv::X86_StdCall: {
|
|
|
|
// Pass 'nest' parameter in ECX.
|
|
|
|
// Must be kept in sync with X86CallingConv.td
|
2007-08-30 03:01:20 +08:00
|
|
|
NestReg = X86::ECX;
|
2007-07-28 04:02:49 +08:00
|
|
|
|
|
|
|
// Check that ECX wasn't needed by an 'inreg' parameter.
|
|
|
|
const FunctionType *FTy = Func->getFunctionType();
|
2007-11-27 21:23:08 +08:00
|
|
|
const ParamAttrsList *Attrs = Func->getParamAttrs();
|
2007-07-28 04:02:49 +08:00
|
|
|
|
|
|
|
if (Attrs && !Func->isVarArg()) {
|
|
|
|
unsigned InRegCount = 0;
|
|
|
|
unsigned Idx = 1;
|
|
|
|
|
|
|
|
for (FunctionType::param_iterator I = FTy->param_begin(),
|
|
|
|
E = FTy->param_end(); I != E; ++I, ++Idx)
|
|
|
|
if (Attrs->paramHasAttr(Idx, ParamAttr::InReg))
|
|
|
|
// FIXME: should only count parameters that are lowered to integers.
|
|
|
|
InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32;
|
|
|
|
|
|
|
|
if (InRegCount > 2) {
|
|
|
|
cerr << "Nest register in use - reduce number of inreg parameters!\n";
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CallingConv::X86_FastCall:
|
|
|
|
// Pass 'nest' parameter in EAX.
|
|
|
|
// Must be kept in sync with X86CallingConv.td
|
2007-08-30 03:01:20 +08:00
|
|
|
NestReg = X86::EAX;
|
2007-07-28 04:02:49 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-08-30 03:01:20 +08:00
|
|
|
const X86InstrInfo *TII =
|
|
|
|
((X86TargetMachine&)getTargetMachine()).getInstrInfo();
|
|
|
|
|
2007-07-28 04:02:49 +08:00
|
|
|
SDOperand OutChains[4];
|
|
|
|
SDOperand Addr, Disp;
|
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
|
|
|
|
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
|
|
|
|
|
2007-08-30 03:01:20 +08:00
|
|
|
unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
|
|
|
|
unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg);
|
|
|
|
OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
|
2007-07-28 04:02:49 +08:00
|
|
|
Trmp, TrmpSV->getValue(), TrmpSV->getOffset());
|
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
|
|
|
|
OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
|
|
|
|
TrmpSV->getOffset() + 1, false, 1);
|
|
|
|
|
2007-08-30 03:01:20 +08:00
|
|
|
unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
|
2007-07-28 04:02:49 +08:00
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
|
|
|
|
OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
|
|
|
|
TrmpSV->getValue() + 5, TrmpSV->getOffset());
|
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32));
|
|
|
|
OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(),
|
|
|
|
TrmpSV->getOffset() + 6, false, 1);
|
|
|
|
|
2007-09-11 22:10:23 +08:00
|
|
|
SDOperand Ops[] =
|
|
|
|
{ Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
|
|
|
|
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
|
2007-07-28 04:02:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-16 09:31:51 +08:00
|
|
|
SDOperand X86TargetLowering::LowerFLT_ROUNDS(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
/*
|
|
|
|
The rounding mode is in bits 11:10 of FPSR, and has the following
|
|
|
|
settings:
|
|
|
|
00 Round to nearest
|
|
|
|
01 Round to -inf
|
|
|
|
10 Round to +inf
|
|
|
|
11 Round to 0
|
|
|
|
|
|
|
|
FLT_ROUNDS, on the other hand, expects the following:
|
|
|
|
-1 Undefined
|
|
|
|
0 Round to 0
|
|
|
|
1 Round to nearest
|
|
|
|
2 Round to +inf
|
|
|
|
3 Round to -inf
|
|
|
|
|
|
|
|
To perform the conversion, we do:
|
|
|
|
(((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
|
|
|
|
*/
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const TargetMachine &TM = MF.getTarget();
|
|
|
|
const TargetFrameInfo &TFI = *TM.getFrameInfo();
|
|
|
|
unsigned StackAlignment = TFI.getStackAlignment();
|
|
|
|
MVT::ValueType VT = Op.getValueType();
|
|
|
|
|
|
|
|
// Save FP Control Word to stack slot
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
|
|
|
|
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
|
|
|
|
|
|
|
SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other,
|
|
|
|
DAG.getEntryNode(), StackSlot);
|
|
|
|
|
|
|
|
// Load FP Control Word from stack slot
|
|
|
|
SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0);
|
|
|
|
|
|
|
|
// Transform as necessary
|
|
|
|
SDOperand CWD1 =
|
|
|
|
DAG.getNode(ISD::SRL, MVT::i16,
|
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
CWD, DAG.getConstant(0x800, MVT::i16)),
|
|
|
|
DAG.getConstant(11, MVT::i8));
|
|
|
|
SDOperand CWD2 =
|
|
|
|
DAG.getNode(ISD::SRL, MVT::i16,
|
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
CWD, DAG.getConstant(0x400, MVT::i16)),
|
|
|
|
DAG.getConstant(9, MVT::i8));
|
|
|
|
|
|
|
|
SDOperand RetVal =
|
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
DAG.getNode(ISD::ADD, MVT::i16,
|
|
|
|
DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2),
|
|
|
|
DAG.getConstant(1, MVT::i16)),
|
|
|
|
DAG.getConstant(3, MVT::i16));
|
|
|
|
|
|
|
|
|
|
|
|
return DAG.getNode((MVT::getSizeInBits(VT) < 16 ?
|
|
|
|
ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
|
|
|
|
}
|
|
|
|
|
2006-04-26 04:13:52 +08:00
|
|
|
/// LowerOperation - Provide custom lowering hooks for some operations.
|
|
|
|
///
|
|
|
|
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: assert(0 && "Should not custom lower this!");
|
|
|
|
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
|
|
|
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
|
|
|
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
|
|
|
|
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
|
|
|
|
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
|
|
|
|
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
|
|
|
|
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
|
2007-04-21 05:38:10 +08:00
|
|
|
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
|
|
|
|
case ISD::SHL_PARTS:
|
|
|
|
case ISD::SRA_PARTS:
|
|
|
|
case ISD::SRL_PARTS: return LowerShift(Op, DAG);
|
|
|
|
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
|
|
|
|
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
|
|
|
|
case ISD::FABS: return LowerFABS(Op, DAG);
|
|
|
|
case ISD::FNEG: return LowerFNEG(Op, DAG);
|
2007-01-05 15:55:56 +08:00
|
|
|
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
|
2007-09-29 08:00:36 +08:00
|
|
|
case ISD::SETCC: return LowerSETCC(Op, DAG);
|
|
|
|
case ISD::SELECT: return LowerSELECT(Op, DAG);
|
|
|
|
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
|
2006-05-25 08:59:30 +08:00
|
|
|
case ISD::CALL: return LowerCALL(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
case ISD::RET: return LowerRET(Op, DAG);
|
2006-04-26 09:20:17 +08:00
|
|
|
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
case ISD::MEMSET: return LowerMEMSET(Op, DAG);
|
|
|
|
case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
|
|
|
|
case ISD::VASTART: return LowerVASTART(Op, DAG);
|
2007-03-03 07:16:35 +08:00
|
|
|
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
|
2006-04-26 04:13:52 +08:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
2007-01-30 06:58:52 +08:00
|
|
|
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
|
|
|
|
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
2007-07-14 22:06:15 +08:00
|
|
|
case ISD::FRAME_TO_ARGS_OFFSET:
|
|
|
|
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
|
2007-04-17 17:20:00 +08:00
|
|
|
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
2007-07-14 22:06:15 +08:00
|
|
|
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
|
2007-07-28 04:02:49 +08:00
|
|
|
case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
|
2007-11-16 09:31:51 +08:00
|
|
|
case ISD::FLT_ROUNDS: return LowerFLT_ROUNDS(Op, DAG);
|
2007-11-24 15:07:01 +08:00
|
|
|
|
|
|
|
|
|
|
|
// FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands.
|
|
|
|
case ISD::READCYCLECOUNTER:
|
|
|
|
return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ExpandOperation - Provide custom lowering hooks for expanding operations.
|
|
|
|
SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
|
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: assert(0 && "Should not custom lower this!");
|
|
|
|
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
|
|
|
|
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
|
2005-12-23 15:31:11 +08:00
|
|
|
}
|
2005-11-15 08:40:23 +08:00
|
|
|
}
|
2005-12-20 14:22:03 +08:00
|
|
|
|
|
|
|
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
|
switch (Opcode) {
|
|
|
|
default: return NULL;
|
2006-01-10 02:33:28 +08:00
|
|
|
case X86ISD::SHLD: return "X86ISD::SHLD";
|
|
|
|
case X86ISD::SHRD: return "X86ISD::SHRD";
|
2006-01-31 11:14:29 +08:00
|
|
|
case X86ISD::FAND: return "X86ISD::FAND";
|
2007-01-05 15:55:56 +08:00
|
|
|
case X86ISD::FOR: return "X86ISD::FOR";
|
2006-02-01 06:28:30 +08:00
|
|
|
case X86ISD::FXOR: return "X86ISD::FXOR";
|
2007-01-05 15:55:56 +08:00
|
|
|
case X86ISD::FSRL: return "X86ISD::FSRL";
|
2006-01-13 06:54:21 +08:00
|
|
|
case X86ISD::FILD: return "X86ISD::FILD";
|
2006-02-04 10:20:30 +08:00
|
|
|
case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
|
2005-12-20 14:22:03 +08:00
|
|
|
case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
|
|
|
|
case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
|
|
|
|
case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
|
2005-12-21 10:39:21 +08:00
|
|
|
case X86ISD::FLD: return "X86ISD::FLD";
|
2006-01-05 08:27:02 +08:00
|
|
|
case X86ISD::FST: return "X86ISD::FST";
|
|
|
|
case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
|
2005-12-21 10:39:21 +08:00
|
|
|
case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
|
2005-12-20 14:22:03 +08:00
|
|
|
case X86ISD::CALL: return "X86ISD::CALL";
|
|
|
|
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
|
|
|
|
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
|
|
|
|
case X86ISD::CMP: return "X86ISD::CMP";
|
2006-04-06 07:38:46 +08:00
|
|
|
case X86ISD::COMI: return "X86ISD::COMI";
|
|
|
|
case X86ISD::UCOMI: return "X86ISD::UCOMI";
|
2005-12-22 04:21:51 +08:00
|
|
|
case X86ISD::SETCC: return "X86ISD::SETCC";
|
2005-12-20 14:22:03 +08:00
|
|
|
case X86ISD::CMOV: return "X86ISD::CMOV";
|
|
|
|
case X86ISD::BRCOND: return "X86ISD::BRCOND";
|
2005-12-21 10:39:21 +08:00
|
|
|
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
|
2006-03-04 09:12:00 +08:00
|
|
|
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
|
|
|
|
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
|
2006-02-18 08:15:05 +08:00
|
|
|
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
|
2006-02-24 04:41:18 +08:00
|
|
|
case X86ISD::Wrapper: return "X86ISD::Wrapper";
|
2006-03-25 07:15:12 +08:00
|
|
|
case X86ISD::S2VEC: return "X86ISD::S2VEC";
|
2006-04-01 03:22:53 +08:00
|
|
|
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
|
2006-04-01 05:55:24 +08:00
|
|
|
case X86ISD::PINSRW: return "X86ISD::PINSRW";
|
2006-11-11 05:43:37 +08:00
|
|
|
case X86ISD::FMAX: return "X86ISD::FMAX";
|
|
|
|
case X86ISD::FMIN: return "X86ISD::FMIN";
|
2007-07-10 08:05:58 +08:00
|
|
|
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
|
|
|
|
case X86ISD::FRCP: return "X86ISD::FRCP";
|
2007-04-21 05:38:10 +08:00
|
|
|
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
|
|
|
|
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
|
2007-07-14 22:06:15 +08:00
|
|
|
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
|
2007-10-12 03:40:01 +08:00
|
|
|
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
|
2007-11-16 09:31:51 +08:00
|
|
|
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
|
2005-12-20 14:22:03 +08:00
|
|
|
}
|
|
|
|
}
|
2005-12-22 07:05:39 +08:00
|
|
|
|
2007-03-31 07:15:24 +08:00
|
|
|
// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
// by AM is legal for this target, for a load/store of the specified type.
|
|
|
|
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
|
|
|
const Type *Ty) const {
|
|
|
|
// X86 supports extremely general addressing modes.
|
|
|
|
|
|
|
|
// X86 allows a sign-extended 32-bit immediate field as a displacement.
|
|
|
|
if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (AM.BaseGV) {
|
2007-08-02 07:46:47 +08:00
|
|
|
// We can only fold this if we don't need an extra load.
|
2007-03-31 07:15:24 +08:00
|
|
|
if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
|
|
|
|
return false;
|
2007-08-02 07:46:47 +08:00
|
|
|
|
|
|
|
// X86-64 only supports addr of globals in small code model.
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
if (getTargetMachine().getCodeModel() != CodeModel::Small)
|
|
|
|
return false;
|
|
|
|
// If lower 4G is not available, then we must use rip-relative addressing.
|
|
|
|
if (AM.BaseOffs || AM.Scale > 1)
|
|
|
|
return false;
|
|
|
|
}
|
2007-03-31 07:15:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (AM.Scale) {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
// These scales always work.
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
case 5:
|
|
|
|
case 9:
|
|
|
|
// These scales are formed with basereg+scalereg. Only accept if there is
|
|
|
|
// no basereg yet.
|
|
|
|
if (AM.HasBaseReg)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default: // Other stuff never works.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 09:56:11 +08:00
|
|
|
bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
|
|
|
|
if (!Ty1->isInteger() || !Ty2->isInteger())
|
|
|
|
return false;
|
2007-10-29 15:57:50 +08:00
|
|
|
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
|
|
|
|
unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
|
|
|
|
if (NumBits1 <= NumBits2)
|
|
|
|
return false;
|
|
|
|
return Subtarget->is64Bit() || NumBits1 < 64;
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 09:56:11 +08:00
|
|
|
}
|
|
|
|
|
2007-10-30 03:58:20 +08:00
|
|
|
bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1,
|
|
|
|
MVT::ValueType VT2) const {
|
|
|
|
if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2))
|
|
|
|
return false;
|
|
|
|
unsigned NumBits1 = MVT::getSizeInBits(VT1);
|
|
|
|
unsigned NumBits2 = MVT::getSizeInBits(VT2);
|
|
|
|
if (NumBits1 <= NumBits2)
|
|
|
|
return false;
|
|
|
|
return Subtarget->is64Bit() || NumBits1 < 64;
|
|
|
|
}
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 09:56:11 +08:00
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
/// isShuffleMaskLegal - Targets can use this to indicate that they only
|
|
|
|
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
|
|
|
|
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
|
|
|
|
/// are assumed to be legal.
|
|
|
|
bool
|
|
|
|
X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
|
|
|
|
// Only do shuffles on 128-bit vector types for now.
|
|
|
|
if (MVT::getSizeInBits(VT) == 64) return false;
|
|
|
|
return (Mask.Val->getNumOperands() <= 4 ||
|
2007-06-19 08:02:56 +08:00
|
|
|
isIdentityMask(Mask.Val) ||
|
|
|
|
isIdentityMask(Mask.Val, true) ||
|
2006-07-06 06:17:51 +08:00
|
|
|
isSplatMask(Mask.Val) ||
|
|
|
|
isPSHUFHW_PSHUFLWMask(Mask.Val) ||
|
|
|
|
X86::isUNPCKLMask(Mask.Val) ||
|
2007-06-19 08:02:56 +08:00
|
|
|
X86::isUNPCKHMask(Mask.Val) ||
|
2006-07-06 06:17:51 +08:00
|
|
|
X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
|
2007-06-19 08:02:56 +08:00
|
|
|
X86::isUNPCKH_v_undef_Mask(Mask.Val));
|
2006-07-06 06:17:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
|
|
|
|
MVT::ValueType EVT,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
unsigned NumElts = BVOps.size();
|
|
|
|
// Only do shuffles on 128-bit vector types for now.
|
|
|
|
if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
|
|
|
|
if (NumElts == 2) return true;
|
|
|
|
if (NumElts == 4) {
|
2007-02-25 15:10:00 +08:00
|
|
|
return (isMOVLMask(&BVOps[0], 4) ||
|
|
|
|
isCommutedMOVL(&BVOps[0], 4, true) ||
|
|
|
|
isSHUFPMask(&BVOps[0], 4) ||
|
|
|
|
isCommutedSHUFP(&BVOps[0], 4));
|
2006-07-06 06:17:51 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Scheduler Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
MachineBasicBlock *
|
|
|
|
X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB) {
|
2006-11-28 07:37:22 +08:00
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
2006-07-06 06:17:51 +08:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: assert(false && "Unexpected instr type to insert");
|
|
|
|
case X86::CMOV_FR32:
|
|
|
|
case X86::CMOV_FR64:
|
|
|
|
case X86::CMOV_V4F32:
|
|
|
|
case X86::CMOV_V2F64:
|
2007-09-29 08:00:36 +08:00
|
|
|
case X86::CMOV_V2I64: {
|
2006-07-06 06:17:51 +08:00
|
|
|
// To "insert" a SELECT_CC instruction, we actually have to insert the
|
|
|
|
// diamond control-flow pattern. The incoming instruction knows the
|
|
|
|
// destination vreg to set, the condition code register to branch on, the
|
|
|
|
// true/false values to select between, and a branch opcode to use.
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
|
|
|
ilist<MachineBasicBlock>::iterator It = BB;
|
|
|
|
++It;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// TrueVal = ...
|
|
|
|
// cmpTY ccX, r1, r2
|
|
|
|
// bCC copy1MBB
|
|
|
|
// fallthrough --> copy0MBB
|
|
|
|
MachineBasicBlock *thisMBB = BB;
|
|
|
|
MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
|
2006-11-21 08:01:06 +08:00
|
|
|
unsigned Opc =
|
2006-10-21 01:42:20 +08:00
|
|
|
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
|
2006-11-28 07:37:22 +08:00
|
|
|
BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
|
2006-07-06 06:17:51 +08:00
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
F->getBasicBlockList().insert(It, copy0MBB);
|
|
|
|
F->getBasicBlockList().insert(It, sinkMBB);
|
|
|
|
// Update machine-CFG edges by first adding all successors of the current
|
|
|
|
// block to the new block which will contain the Phi node for the select.
|
2006-11-21 08:01:06 +08:00
|
|
|
for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
|
2006-07-06 06:17:51 +08:00
|
|
|
e = BB->succ_end(); i != e; ++i)
|
|
|
|
sinkMBB->addSuccessor(*i);
|
|
|
|
// Next, remove all successors of the current block, and add the true
|
|
|
|
// and fallthrough blocks as its successors.
|
|
|
|
while(!BB->succ_empty())
|
|
|
|
BB->removeSuccessor(BB->succ_begin());
|
|
|
|
BB->addSuccessor(copy0MBB);
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
// copy0MBB:
|
|
|
|
// %FalseValue = ...
|
|
|
|
// # fallthrough to sinkMBB
|
|
|
|
BB = copy0MBB;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
// Update machine-CFG edges
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
// sinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
|
|
|
|
// ...
|
|
|
|
BB = sinkMBB;
|
2006-11-28 07:37:22 +08:00
|
|
|
BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
|
2006-07-06 06:17:51 +08:00
|
|
|
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
|
|
|
|
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
|
|
|
|
|
|
|
|
delete MI; // The pseudo instruction is gone now.
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
2007-07-03 08:53:03 +08:00
|
|
|
case X86::FP32_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP32_TO_INT32_IN_MEM:
|
|
|
|
case X86::FP32_TO_INT64_IN_MEM:
|
|
|
|
case X86::FP64_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP64_TO_INT32_IN_MEM:
|
2007-08-07 09:17:37 +08:00
|
|
|
case X86::FP64_TO_INT64_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT32_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT64_IN_MEM: {
|
2006-07-06 06:17:51 +08:00
|
|
|
// Change the floating point control register to use "round towards zero"
|
|
|
|
// mode when truncating to an integer value.
|
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Load the old value of the high byte of the control word...
|
|
|
|
unsigned OldCW =
|
|
|
|
F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Set the high part to be round to zero...
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
|
|
|
|
.addImm(0xC7F);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Reload the modified control word now...
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Restore the memory image of control word to original value
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
|
|
|
|
.addReg(OldCW);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Get the X86 opcode to use.
|
|
|
|
unsigned Opc;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: assert(0 && "illegal opcode!");
|
2007-07-05 05:07:47 +08:00
|
|
|
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
|
|
|
|
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
|
|
|
|
case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
|
|
|
|
case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
|
|
|
|
case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
|
|
|
|
case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
|
2007-08-07 09:17:37 +08:00
|
|
|
case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
|
|
|
|
case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
|
|
|
|
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
|
2006-07-06 06:17:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
X86AddressMode AM;
|
|
|
|
MachineOperand &Op = MI->getOperand(0);
|
|
|
|
if (Op.isRegister()) {
|
|
|
|
AM.BaseType = X86AddressMode::RegBase;
|
|
|
|
AM.Base.Reg = Op.getReg();
|
|
|
|
} else {
|
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
|
|
|
AM.Base.FrameIndex = Op.getFrameIndex();
|
|
|
|
}
|
|
|
|
Op = MI->getOperand(1);
|
|
|
|
if (Op.isImmediate())
|
2006-10-21 01:42:20 +08:00
|
|
|
AM.Scale = Op.getImm();
|
2006-07-06 06:17:51 +08:00
|
|
|
Op = MI->getOperand(2);
|
|
|
|
if (Op.isImmediate())
|
2006-10-21 01:42:20 +08:00
|
|
|
AM.IndexReg = Op.getImm();
|
2006-07-06 06:17:51 +08:00
|
|
|
Op = MI->getOperand(3);
|
|
|
|
if (Op.isGlobalAddress()) {
|
|
|
|
AM.GV = Op.getGlobal();
|
|
|
|
} else {
|
2006-10-21 01:42:20 +08:00
|
|
|
AM.Disp = Op.getImm();
|
2006-07-06 06:17:51 +08:00
|
|
|
}
|
2006-11-28 07:37:22 +08:00
|
|
|
addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
|
|
|
|
.addReg(MI->getOperand(4).getReg());
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
// Reload the original control word now.
|
2006-11-28 07:37:22 +08:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
|
2006-07-06 06:17:51 +08:00
|
|
|
|
|
|
|
delete MI; // The pseudo instruction is gone now.
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Optimization Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-02-17 05:11:51 +08:00
|
|
|
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
|
|
|
|
uint64_t Mask,
|
2006-11-21 08:01:06 +08:00
|
|
|
uint64_t &KnownZero,
|
2006-02-17 05:11:51 +08:00
|
|
|
uint64_t &KnownOne,
|
2007-06-22 22:59:07 +08:00
|
|
|
const SelectionDAG &DAG,
|
2006-02-17 05:11:51 +08:00
|
|
|
unsigned Depth) const {
|
2005-12-22 07:05:39 +08:00
|
|
|
unsigned Opc = Op.getOpcode();
|
2006-04-05 14:11:20 +08:00
|
|
|
assert((Opc >= ISD::BUILTIN_OP_END ||
|
|
|
|
Opc == ISD::INTRINSIC_WO_CHAIN ||
|
|
|
|
Opc == ISD::INTRINSIC_W_CHAIN ||
|
|
|
|
Opc == ISD::INTRINSIC_VOID) &&
|
|
|
|
"Should use MaskedValueIsZero if you don't know whether Op"
|
|
|
|
" is a target node!");
|
2005-12-22 07:05:39 +08:00
|
|
|
|
2006-04-05 14:11:20 +08:00
|
|
|
KnownZero = KnownOne = 0; // Don't know anything.
|
2005-12-22 07:05:39 +08:00
|
|
|
switch (Opc) {
|
2006-04-05 14:11:20 +08:00
|
|
|
default: break;
|
2006-11-21 08:01:06 +08:00
|
|
|
case X86ISD::SETCC:
|
2006-02-17 05:11:51 +08:00
|
|
|
KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
|
|
|
|
break;
|
2005-12-22 07:05:39 +08:00
|
|
|
}
|
|
|
|
}
|
2006-02-01 03:43:35 +08:00
|
|
|
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
/// getShuffleScalarElt - Returns the scalar element that will make up the ith
|
|
|
|
/// element of the result of the vector shuffle.
|
|
|
|
static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
SDOperand PermMask = N->getOperand(2);
|
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
|
|
|
SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
|
|
|
|
i %= NumElems;
|
|
|
|
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
|
|
|
return (i == 0)
|
2007-10-12 03:40:01 +08:00
|
|
|
? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
|
|
|
|
SDOperand Idx = PermMask.getOperand(i);
|
|
|
|
if (Idx.getOpcode() == ISD::UNDEF)
|
2007-06-15 06:58:02 +08:00
|
|
|
return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
|
|
|
|
}
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
|
|
|
|
/// node is a GlobalAddress + an offset.
|
|
|
|
static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
|
2006-12-01 05:55:46 +08:00
|
|
|
unsigned Opc = N->getOpcode();
|
2006-12-05 12:01:03 +08:00
|
|
|
if (Opc == X86ISD::Wrapper) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
|
|
|
|
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
|
|
|
|
return true;
|
|
|
|
}
|
2006-12-01 05:55:46 +08:00
|
|
|
} else if (Opc == ISD::ADD) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
SDOperand N1 = N->getOperand(0);
|
|
|
|
SDOperand N2 = N->getOperand(1);
|
|
|
|
if (isGAPlusOffset(N1.Val, GA, Offset)) {
|
|
|
|
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
|
|
|
|
if (V) {
|
|
|
|
Offset += V->getSignExtended();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else if (isGAPlusOffset(N2.Val, GA, Offset)) {
|
|
|
|
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
if (V) {
|
|
|
|
Offset += V->getSignExtended();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isConsecutiveLoad - Returns true if N is loading from an address of Base
|
|
|
|
/// + Dist * Size.
|
|
|
|
static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
|
|
|
|
MachineFrameInfo *MFI) {
|
|
|
|
if (N->getOperand(0).Val != Base->getOperand(0).Val)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDOperand Loc = N->getOperand(1);
|
|
|
|
SDOperand BaseLoc = Base->getOperand(1);
|
|
|
|
if (Loc.getOpcode() == ISD::FrameIndex) {
|
|
|
|
if (BaseLoc.getOpcode() != ISD::FrameIndex)
|
|
|
|
return false;
|
2007-07-24 04:24:29 +08:00
|
|
|
int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
|
|
|
|
int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
int FS = MFI->getObjectSize(FI);
|
|
|
|
int BFS = MFI->getObjectSize(BFI);
|
|
|
|
if (FS != BFS || FS != Size) return false;
|
|
|
|
return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
|
|
|
|
} else {
|
|
|
|
GlobalValue *GV1 = NULL;
|
|
|
|
GlobalValue *GV2 = NULL;
|
|
|
|
int64_t Offset1 = 0;
|
|
|
|
int64_t Offset2 = 0;
|
|
|
|
bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
|
|
|
|
bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
|
|
|
|
if (isGA1 && isGA2 && GV1 == GV2)
|
|
|
|
return Offset1 == (Offset2 + Dist*Size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-07-11 05:37:44 +08:00
|
|
|
static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
|
|
|
|
const X86Subtarget *Subtarget) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
GlobalValue *GV;
|
|
|
|
int64_t Offset;
|
|
|
|
if (isGAPlusOffset(Base, GV, Offset))
|
|
|
|
return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
|
|
|
|
else {
|
|
|
|
assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
|
2007-07-24 04:24:29 +08:00
|
|
|
int BFI = cast<FrameIndexSDNode>(Base)->getIndex();
|
2006-07-11 05:37:44 +08:00
|
|
|
if (BFI < 0)
|
|
|
|
// Fixed objects do not specify alignment, however the offsets are known.
|
|
|
|
return ((Subtarget->getStackAlignment() % 16) == 0 &&
|
|
|
|
(MFI->getObjectOffset(BFI) % 16) == 0);
|
|
|
|
else
|
|
|
|
return MFI->getObjectAlignment(BFI) >= 16;
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
|
|
|
|
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
|
|
|
|
/// if the load addresses are consecutive, non-overlapping, and in the right
|
|
|
|
/// order.
|
2006-07-11 05:37:44 +08:00
|
|
|
static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const X86Subtarget *Subtarget) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2007-06-15 06:58:02 +08:00
|
|
|
MVT::ValueType EVT = MVT::getVectorElementType(VT);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
SDOperand PermMask = N->getOperand(2);
|
|
|
|
int NumElems = (int)PermMask.getNumOperands();
|
|
|
|
SDNode *Base = NULL;
|
|
|
|
for (int i = 0; i < NumElems; ++i) {
|
|
|
|
SDOperand Idx = PermMask.getOperand(i);
|
|
|
|
if (Idx.getOpcode() == ISD::UNDEF) {
|
|
|
|
if (!Base) return SDOperand();
|
|
|
|
} else {
|
|
|
|
SDOperand Arg =
|
|
|
|
getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
|
2006-10-10 04:57:25 +08:00
|
|
|
if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
return SDOperand();
|
|
|
|
if (!Base)
|
|
|
|
Base = Arg.Val;
|
|
|
|
else if (!isConsecutiveLoad(Arg.Val, Base,
|
|
|
|
i, MVT::getSizeInBits(EVT)/8,MFI))
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-11 05:37:44 +08:00
|
|
|
bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
|
2007-07-28 01:16:43 +08:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(Base);
|
2006-10-10 04:57:25 +08:00
|
|
|
if (isAlign16) {
|
|
|
|
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
|
2007-07-28 01:16:43 +08:00
|
|
|
LD->getSrcValueOffset(), LD->isVolatile());
|
2006-10-10 04:57:25 +08:00
|
|
|
} else {
|
2007-07-28 01:16:43 +08:00
|
|
|
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
|
|
|
|
LD->getSrcValueOffset(), LD->isVolatile(),
|
|
|
|
LD->getAlignment());
|
2006-08-11 15:35:45 +08:00
|
|
|
}
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
}
|
|
|
|
|
2006-10-04 14:57:07 +08:00
|
|
|
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
|
|
|
|
static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
|
|
|
|
const X86Subtarget *Subtarget) {
|
|
|
|
SDOperand Cond = N->getOperand(0);
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-10-04 14:57:07 +08:00
|
|
|
// If we have SSE[12] support, try to form min/max nodes.
|
|
|
|
if (Subtarget->hasSSE2() &&
|
|
|
|
(N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
|
|
|
|
if (Cond.getOpcode() == ISD::SETCC) {
|
|
|
|
// Get the LHS/RHS of the select.
|
|
|
|
SDOperand LHS = N->getOperand(1);
|
|
|
|
SDOperand RHS = N->getOperand(2);
|
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-11-11 05:43:37 +08:00
|
|
|
unsigned Opcode = 0;
|
2006-10-04 14:57:07 +08:00
|
|
|
if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
|
2006-10-05 12:11:26 +08:00
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETOLE: // (X <= Y) ? X : Y -> min
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETLE:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
|
|
|
|
case ISD::SETLT:
|
2006-11-11 05:43:37 +08:00
|
|
|
Opcode = X86ISD::FMIN;
|
2006-10-05 12:11:26 +08:00
|
|
|
break;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-10-05 12:11:26 +08:00
|
|
|
case ISD::SETOGT: // (X > Y) ? X : Y -> max
|
|
|
|
case ISD::SETUGT:
|
|
|
|
case ISD::SETGT:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
|
|
|
|
case ISD::SETGE:
|
2006-11-11 05:43:37 +08:00
|
|
|
Opcode = X86ISD::FMAX;
|
2006-10-05 12:11:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2006-10-04 14:57:07 +08:00
|
|
|
} else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
|
2006-10-05 12:11:26 +08:00
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETOGT: // (X > Y) ? Y : X -> min
|
|
|
|
case ISD::SETUGT:
|
|
|
|
case ISD::SETGT:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
|
|
|
|
case ISD::SETGE:
|
2006-11-11 05:43:37 +08:00
|
|
|
Opcode = X86ISD::FMIN;
|
2006-10-05 12:11:26 +08:00
|
|
|
break;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-10-05 12:11:26 +08:00
|
|
|
case ISD::SETOLE: // (X <= Y) ? Y : X -> max
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETLE:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
|
|
|
|
case ISD::SETLT:
|
2006-11-11 05:43:37 +08:00
|
|
|
Opcode = X86ISD::FMAX;
|
2006-10-05 12:11:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2006-10-04 14:57:07 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-11-11 05:43:37 +08:00
|
|
|
if (Opcode)
|
|
|
|
return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
|
2006-10-04 14:57:07 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-10-04 14:57:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
DAGCombinerInfo &DCI) const {
|
|
|
|
SelectionDAG &DAG = DCI.DAG;
|
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ISD::VECTOR_SHUFFLE:
|
2006-07-11 05:37:44 +08:00
|
|
|
return PerformShuffleCombine(N, DAG, Subtarget);
|
2006-10-04 14:57:07 +08:00
|
|
|
case ISD::SELECT:
|
|
|
|
return PerformSELECTCombine(N, DAG, Subtarget);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
llvm-svn: 29042
2006-07-07 16:33:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-07-06 06:17:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Inline Assembly Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-07-11 10:54:03 +08:00
|
|
|
/// getConstraintType - Given a constraint letter, return the type of
|
|
|
|
/// constraint it is for this target.
|
|
|
|
X86TargetLowering::ConstraintType
|
2007-03-25 10:14:49 +08:00
|
|
|
X86TargetLowering::getConstraintType(const std::string &Constraint) const {
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
case 'A':
|
|
|
|
case 'r':
|
|
|
|
case 'R':
|
|
|
|
case 'l':
|
|
|
|
case 'q':
|
|
|
|
case 'Q':
|
|
|
|
case 'x':
|
|
|
|
case 'Y':
|
|
|
|
return C_RegisterClass;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2006-07-11 10:54:03 +08:00
|
|
|
}
|
2007-03-25 10:14:49 +08:00
|
|
|
return TargetLowering::getConstraintType(Constraint);
|
2006-07-11 10:54:03 +08:00
|
|
|
}
|
|
|
|
|
2007-08-25 08:47:38 +08:00
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops.
|
|
|
|
void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
|
|
|
|
char Constraint,
|
|
|
|
std::vector<SDOperand>&Ops,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
SDOperand Result(0, 0);
|
|
|
|
|
2006-11-01 04:13:11 +08:00
|
|
|
switch (Constraint) {
|
|
|
|
default: break;
|
2007-03-17 08:13:28 +08:00
|
|
|
case 'I':
|
2007-03-25 09:57:35 +08:00
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
|
2007-08-25 08:47:38 +08:00
|
|
|
if (C->getValue() <= 31) {
|
|
|
|
Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
2007-03-17 08:13:28 +08:00
|
|
|
}
|
2007-08-25 08:47:38 +08:00
|
|
|
return;
|
2007-03-25 09:57:35 +08:00
|
|
|
case 'N':
|
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
|
2007-08-25 08:47:38 +08:00
|
|
|
if (C->getValue() <= 255) {
|
|
|
|
Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
2007-03-25 09:57:35 +08:00
|
|
|
}
|
2007-08-25 08:47:38 +08:00
|
|
|
return;
|
2007-05-04 00:52:29 +08:00
|
|
|
case 'i': {
|
2006-11-01 04:13:11 +08:00
|
|
|
// Literal immediates are always ok.
|
2007-08-25 08:47:38 +08:00
|
|
|
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
|
|
|
|
Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2007-05-04 00:52:29 +08:00
|
|
|
// If we are in non-pic codegen mode, we allow the address of a global (with
|
|
|
|
// an optional displacement) to be used with 'i'.
|
|
|
|
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
|
|
|
|
int64_t Offset = 0;
|
|
|
|
|
|
|
|
// Match either (GA) or (GA+C)
|
|
|
|
if (GA) {
|
|
|
|
Offset = GA->getOffset();
|
|
|
|
} else if (Op.getOpcode() == ISD::ADD) {
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
|
|
|
|
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
|
|
|
|
if (C && GA) {
|
|
|
|
Offset = GA->getOffset()+C->getValue();
|
|
|
|
} else {
|
|
|
|
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
|
|
|
|
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
|
|
|
|
if (C && GA)
|
|
|
|
Offset = GA->getOffset()+C->getValue();
|
|
|
|
else
|
|
|
|
C = 0, GA = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (GA) {
|
|
|
|
// If addressing this global requires a load (e.g. in PIC mode), we can't
|
|
|
|
// match.
|
|
|
|
if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
|
|
|
|
false))
|
2007-08-25 08:47:38 +08:00
|
|
|
return;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2007-05-04 00:52:29 +08:00
|
|
|
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
|
|
|
|
Offset);
|
2007-08-25 08:47:38 +08:00
|
|
|
Result = Op;
|
|
|
|
break;
|
2006-11-01 04:13:11 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-11-01 04:13:11 +08:00
|
|
|
// Otherwise, not valid for this mode.
|
2007-08-25 08:47:38 +08:00
|
|
|
return;
|
2006-11-01 04:13:11 +08:00
|
|
|
}
|
2007-05-04 00:52:29 +08:00
|
|
|
}
|
2007-08-25 08:47:38 +08:00
|
|
|
|
|
|
|
if (Result.Val) {
|
|
|
|
Ops.push_back(Result);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
|
2006-11-01 04:13:11 +08:00
|
|
|
}
|
|
|
|
|
2006-02-01 03:43:35 +08:00
|
|
|
std::vector<unsigned> X86TargetLowering::
|
2006-02-22 08:56:39 +08:00
|
|
|
getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
|
|
|
MVT::ValueType VT) const {
|
2006-02-01 03:43:35 +08:00
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// FIXME: not handling fp-stack yet!
|
|
|
|
switch (Constraint[0]) { // GCC X86 Constraint Letters
|
2006-07-11 10:54:03 +08:00
|
|
|
default: break; // Unknown constraint letter
|
|
|
|
case 'A': // EAX/EDX
|
|
|
|
if (VT == MVT::i32 || VT == MVT::i64)
|
|
|
|
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
|
|
|
|
break;
|
2006-02-01 03:43:35 +08:00
|
|
|
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
|
|
|
|
case 'Q': // Q_REGS
|
2006-05-06 08:29:37 +08:00
|
|
|
if (VT == MVT::i32)
|
|
|
|
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
|
|
|
|
else if (VT == MVT::i16)
|
|
|
|
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
|
|
|
|
else if (VT == MVT::i8)
|
2007-08-14 07:27:11 +08:00
|
|
|
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
|
2007-11-04 14:51:12 +08:00
|
|
|
else if (VT == MVT::i64)
|
|
|
|
return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
|
|
|
|
break;
|
2006-02-01 03:43:35 +08:00
|
|
|
}
|
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-02-22 08:56:39 +08:00
|
|
|
return std::vector<unsigned>();
|
2006-02-01 03:43:35 +08:00
|
|
|
}
|
2006-08-01 07:26:50 +08:00
|
|
|
|
2006-11-21 08:01:06 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*>
|
2006-08-01 07:26:50 +08:00
|
|
|
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
|
|
|
MVT::ValueType VT) const {
|
2007-04-09 13:11:28 +08:00
|
|
|
// First, see if this is a constraint that directly corresponds to an LLVM
|
|
|
|
// register class.
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// GCC Constraint Letters
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
default: break;
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
llvm-svn: 35804
2007-04-09 13:49:22 +08:00
|
|
|
case 'r': // GENERAL_REGS
|
|
|
|
case 'R': // LEGACY_REGS
|
|
|
|
case 'l': // INDEX_REGS
|
|
|
|
if (VT == MVT::i64 && Subtarget->is64Bit())
|
|
|
|
return std::make_pair(0U, X86::GR64RegisterClass);
|
|
|
|
if (VT == MVT::i32)
|
|
|
|
return std::make_pair(0U, X86::GR32RegisterClass);
|
|
|
|
else if (VT == MVT::i16)
|
|
|
|
return std::make_pair(0U, X86::GR16RegisterClass);
|
|
|
|
else if (VT == MVT::i8)
|
|
|
|
return std::make_pair(0U, X86::GR8RegisterClass);
|
|
|
|
break;
|
2007-04-12 12:14:49 +08:00
|
|
|
case 'y': // MMX_REGS if MMX allowed.
|
|
|
|
if (!Subtarget->hasMMX()) break;
|
|
|
|
return std::make_pair(0U, X86::VR64RegisterClass);
|
|
|
|
break;
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
llvm-svn: 35804
2007-04-09 13:49:22 +08:00
|
|
|
case 'Y': // SSE_REGS if SSE2 allowed
|
|
|
|
if (!Subtarget->hasSSE2()) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case 'x': // SSE_REGS if SSE1 allowed
|
|
|
|
if (!Subtarget->hasSSE1()) break;
|
|
|
|
|
|
|
|
switch (VT) {
|
|
|
|
default: break;
|
|
|
|
// Scalar SSE types.
|
|
|
|
case MVT::f32:
|
|
|
|
case MVT::i32:
|
2007-04-09 13:11:28 +08:00
|
|
|
return std::make_pair(0U, X86::FR32RegisterClass);
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
llvm-svn: 35804
2007-04-09 13:49:22 +08:00
|
|
|
case MVT::f64:
|
|
|
|
case MVT::i64:
|
2007-04-09 13:11:28 +08:00
|
|
|
return std::make_pair(0U, X86::FR64RegisterClass);
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
llvm-svn: 35804
2007-04-09 13:49:22 +08:00
|
|
|
// Vector types.
|
|
|
|
case MVT::v16i8:
|
|
|
|
case MVT::v8i16:
|
|
|
|
case MVT::v4i32:
|
|
|
|
case MVT::v2i64:
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v2f64:
|
|
|
|
return std::make_pair(0U, X86::VR128RegisterClass);
|
|
|
|
}
|
2007-04-09 13:11:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-01 07:26:50 +08:00
|
|
|
// Use the default implementation in TargetLowering to convert the register
|
|
|
|
// constraint into a member of a register class.
|
|
|
|
std::pair<unsigned, const TargetRegisterClass*> Res;
|
|
|
|
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
|
2006-11-01 03:42:44 +08:00
|
|
|
|
|
|
|
// Not found as a standard register?
|
|
|
|
if (Res.second == 0) {
|
|
|
|
// GCC calls "st(0)" just plain "st".
|
|
|
|
if (StringsEqualNoCase("{st}", Constraint)) {
|
|
|
|
Res.first = X86::ST0;
|
2007-09-24 13:27:37 +08:00
|
|
|
Res.second = X86::RFP80RegisterClass;
|
2006-11-01 03:42:44 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-11-01 03:42:44 +08:00
|
|
|
return Res;
|
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-08-01 07:26:50 +08:00
|
|
|
// Otherwise, check to see if this is a register class of the wrong value
|
|
|
|
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
|
|
|
|
// turn into {ax},{dx}.
|
|
|
|
if (Res.second->hasType(VT))
|
|
|
|
return Res; // Correct type already, nothing to do.
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-08-01 07:26:50 +08:00
|
|
|
// All of the single-register GCC register classes map their values onto
|
|
|
|
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
|
|
|
|
// really want an 8-bit or 32-bit register, map to the appropriate register
|
|
|
|
// class and return the appropriate register.
|
|
|
|
if (Res.second != X86::GR16RegisterClass)
|
|
|
|
return Res;
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-08-01 07:26:50 +08:00
|
|
|
if (VT == MVT::i8) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::AL; break;
|
|
|
|
case X86::DX: DestReg = X86::DL; break;
|
|
|
|
case X86::CX: DestReg = X86::CL; break;
|
|
|
|
case X86::BX: DestReg = X86::BL; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR8RegisterClass;
|
|
|
|
}
|
|
|
|
} else if (VT == MVT::i32) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::EAX; break;
|
|
|
|
case X86::DX: DestReg = X86::EDX; break;
|
|
|
|
case X86::CX: DestReg = X86::ECX; break;
|
|
|
|
case X86::BX: DestReg = X86::EBX; break;
|
|
|
|
case X86::SI: DestReg = X86::ESI; break;
|
|
|
|
case X86::DI: DestReg = X86::EDI; break;
|
|
|
|
case X86::BP: DestReg = X86::EBP; break;
|
|
|
|
case X86::SP: DestReg = X86::ESP; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR32RegisterClass;
|
|
|
|
}
|
2006-09-08 14:48:29 +08:00
|
|
|
} else if (VT == MVT::i64) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::RAX; break;
|
|
|
|
case X86::DX: DestReg = X86::RDX; break;
|
|
|
|
case X86::CX: DestReg = X86::RCX; break;
|
|
|
|
case X86::BX: DestReg = X86::RBX; break;
|
|
|
|
case X86::SI: DestReg = X86::RSI; break;
|
|
|
|
case X86::DI: DestReg = X86::RDI; break;
|
|
|
|
case X86::BP: DestReg = X86::RBP; break;
|
|
|
|
case X86::SP: DestReg = X86::RSP; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR64RegisterClass;
|
|
|
|
}
|
2006-08-01 07:26:50 +08:00
|
|
|
}
|
2006-11-21 08:01:06 +08:00
|
|
|
|
2006-08-01 07:26:50 +08:00
|
|
|
return Res;
|
|
|
|
}
|