forked from OSchip/llvm-project
This commit changes:
1. Legalize now always promotes truncstore of i1 to i8. 2. Remove patterns and gunk related to truncstore i1 from targets. 3. Rename the StoreXAction stuff to TruncStoreAction in TLI. 4. Make the TLI TruncStoreAction table a 2d table to handle from/to conversions. 5. Mark a wide variety of invalid truncstores as such in various targets, e.g. X86 currently doesn't support truncstore of any of its integer types. 6. Add legalize support for truncstores with invalid value input types. 7. Add a dag combine transform to turn store(truncate) into truncstore when safe. The later allows us to compile CodeGen/X86/storetrunc-fp.ll to: _foo: fldt 20(%esp) fldt 4(%esp) faddp %st(1) movl 36(%esp), %eax fstps (%eax) ret instead of: _foo: subl $4, %esp fldt 24(%esp) fldt 8(%esp) faddp %st(1) fstps (%esp) movl 40(%esp), %eax movss (%esp), %xmm0 movss %xmm0, (%eax) addl $4, %esp ret llvm-svn: 46140
This commit is contained in:
parent
9f7fed1c1b
commit
1ea55cf816
|
@ -301,19 +301,22 @@ public:
|
|||
getLoadXAction(LType, VT) == Custom;
|
||||
}
|
||||
|
||||
/// getStoreXAction - Return how this store with truncation should be treated:
|
||||
/// either it is legal, needs to be promoted to a larger size, needs to be
|
||||
/// expanded to some other code sequence, or the target has a custom expander
|
||||
/// for it.
|
||||
LegalizeAction getStoreXAction(MVT::ValueType VT) const {
|
||||
if (MVT::isExtendedVT(VT)) return getTypeAction(VT);
|
||||
return (LegalizeAction)((StoreXActions >> (2*VT)) & 3);
|
||||
/// getTruncStoreAction - Return how this store with truncation should be
|
||||
/// treated: either it is legal, needs to be promoted to a larger size, needs
|
||||
/// to be expanded to some other code sequence, or the target has a custom
|
||||
/// expander for it.
|
||||
LegalizeAction getTruncStoreAction(MVT::ValueType ValVT,
|
||||
MVT::ValueType MemVT) const {
|
||||
assert(ValVT < MVT::LAST_VALUETYPE && MemVT < 32 &&
|
||||
"Table isn't big enough!");
|
||||
return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3);
|
||||
}
|
||||
|
||||
/// isStoreXLegal - Return true if the specified store with truncation is
|
||||
/// isTruncStoreLegal - Return true if the specified store with truncation is
|
||||
/// legal on this target.
|
||||
bool isStoreXLegal(MVT::ValueType VT) const {
|
||||
return getStoreXAction(VT) == Legal || getStoreXAction(VT) == Custom;
|
||||
bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const {
|
||||
return getTruncStoreAction(ValVT, MemVT) == Legal ||
|
||||
getTruncStoreAction(ValVT, MemVT) == Custom;
|
||||
}
|
||||
|
||||
/// getIndexedLoadAction - Return how the indexed load should be treated:
|
||||
|
@ -760,12 +763,14 @@ protected:
|
|||
LoadXActions[ExtType] |= (uint64_t)Action << VT*2;
|
||||
}
|
||||
|
||||
/// setStoreXAction - Indicate that the specified store with truncation does
|
||||
/// setTruncStoreAction - Indicate that the specified truncating store does
|
||||
/// not work with the with specified type and indicate what to do about it.
|
||||
void setStoreXAction(MVT::ValueType VT, LegalizeAction Action) {
|
||||
assert(VT < 32 && "Table isn't big enough!");
|
||||
StoreXActions &= ~(uint64_t(3UL) << VT*2);
|
||||
StoreXActions |= (uint64_t)Action << VT*2;
|
||||
void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT,
|
||||
LegalizeAction Action) {
|
||||
assert(ValVT < MVT::LAST_VALUETYPE && MemVT < 32 &&
|
||||
"Table isn't big enough!");
|
||||
TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
|
||||
TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
|
||||
}
|
||||
|
||||
/// setIndexedLoadAction - Indicate that the specified indexed load does or
|
||||
|
@ -1183,10 +1188,9 @@ private:
|
|||
/// with the load.
|
||||
uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
|
||||
|
||||
/// StoreXActions - For each store with truncation of each value type, keep a
|
||||
/// LegalizeAction that indicates how instruction selection should deal with
|
||||
/// the store.
|
||||
uint64_t StoreXActions;
|
||||
/// TruncStoreActions - For each truncating store, keep a LegalizeAction that
|
||||
/// indicates how instruction selection should deal with the store.
|
||||
uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
|
||||
|
||||
/// IndexedModeActions - For each indexed mode and each value type, keep a
|
||||
/// pair of LegalizeAction that indicates how instruction selection should
|
||||
|
|
|
@ -1690,8 +1690,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) {
|
|||
if (N1C && N0.getOpcode() == ISD::LOAD) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
if (LN0->getExtensionType() != ISD::SEXTLOAD &&
|
||||
LN0->getAddressingMode() == ISD::UNINDEXED &&
|
||||
N0.hasOneUse()) {
|
||||
LN0->isUnindexed() && N0.hasOneUse()) {
|
||||
MVT::ValueType EVT, LoadedVT;
|
||||
if (N1C->getValue() == 255)
|
||||
EVT = MVT::i8;
|
||||
|
@ -3810,7 +3809,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
|
|||
SDOperand Ptr;
|
||||
MVT::ValueType VT;
|
||||
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
||||
if (LD->getAddressingMode() != ISD::UNINDEXED)
|
||||
if (LD->isIndexed())
|
||||
return false;
|
||||
VT = LD->getLoadedVT();
|
||||
if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
|
||||
|
@ -3818,7 +3817,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
|
|||
return false;
|
||||
Ptr = LD->getBasePtr();
|
||||
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
||||
if (ST->getAddressingMode() != ISD::UNINDEXED)
|
||||
if (ST->isIndexed())
|
||||
return false;
|
||||
VT = ST->getStoredVT();
|
||||
if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
|
||||
|
@ -3937,7 +3936,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
|
|||
SDOperand Ptr;
|
||||
MVT::ValueType VT;
|
||||
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
||||
if (LD->getAddressingMode() != ISD::UNINDEXED)
|
||||
if (LD->isIndexed())
|
||||
return false;
|
||||
VT = LD->getLoadedVT();
|
||||
if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
|
||||
|
@ -3945,7 +3944,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
|
|||
return false;
|
||||
Ptr = LD->getBasePtr();
|
||||
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
||||
if (ST->getAddressingMode() != ISD::UNINDEXED)
|
||||
if (ST->isIndexed())
|
||||
return false;
|
||||
VT = ST->getStoredVT();
|
||||
if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
|
||||
|
@ -4187,7 +4186,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
|||
// If this is a store of a bit convert, store the input value if the
|
||||
// resultant store does not need a higher alignment than the original.
|
||||
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
|
||||
ST->getAddressingMode() == ISD::UNINDEXED) {
|
||||
ST->isUnindexed()) {
|
||||
unsigned Align = ST->getAlignment();
|
||||
MVT::ValueType SVT = Value.getOperand(0).getValueType();
|
||||
unsigned OrigAlign = TLI.getTargetMachine().getTargetData()->
|
||||
|
@ -4285,7 +4284,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
|||
return SDOperand(N, 0);
|
||||
|
||||
// FIXME: is there such a thing as a truncating indexed store?
|
||||
if (ST->isTruncatingStore() && ST->getAddressingMode() == ISD::UNINDEXED &&
|
||||
if (ST->isTruncatingStore() && ST->isUnindexed() &&
|
||||
MVT::isInteger(Value.getValueType())) {
|
||||
// See if we can simplify the input to this truncstore with knowledge that
|
||||
// only the low bits are being used. For example:
|
||||
|
@ -4308,8 +4307,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
|||
// is dead/noop.
|
||||
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
|
||||
if (Ld->getBasePtr() == Ptr && ST->getStoredVT() == Ld->getLoadedVT() &&
|
||||
ST->getAddressingMode() == ISD::UNINDEXED &&
|
||||
!ST->isVolatile() &&
|
||||
ST->isUnindexed() && !ST->isVolatile() &&
|
||||
// There can't be any side effects between the load and store, such as
|
||||
// a call or store.
|
||||
Chain.reachesChainWithoutSideEffects(SDOperand(Ld, 1))) {
|
||||
|
@ -4318,6 +4316,18 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
|||
}
|
||||
}
|
||||
|
||||
// If this is an FP_ROUND or TRUNC followed by a store, fold this into a
|
||||
// truncating store. We can do this even if this is already a truncstore.
|
||||
if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
|
||||
&& TLI.isTypeLegal(Value.getOperand(0).getValueType()) &&
|
||||
Value.Val->hasOneUse() && ST->isUnindexed() &&
|
||||
TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
|
||||
ST->getStoredVT())) {
|
||||
return DAG.getTruncStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(),
|
||||
ST->getSrcValueOffset(), ST->getStoredVT(),
|
||||
ST->isVolatile(), ST->getAlignment());
|
||||
}
|
||||
|
||||
return SDOperand();
|
||||
}
|
||||
|
||||
|
|
|
@ -2239,15 +2239,24 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
|
|||
break;
|
||||
}
|
||||
} else {
|
||||
// Truncating store
|
||||
assert(isTypeLegal(ST->getValue().getValueType()) &&
|
||||
"Cannot handle illegal TRUNCSTORE yet!");
|
||||
Tmp3 = LegalizeOp(ST->getValue());
|
||||
switch (getTypeAction(ST->getValue().getValueType())) {
|
||||
case Legal:
|
||||
Tmp3 = LegalizeOp(ST->getValue());
|
||||
break;
|
||||
case Promote:
|
||||
// We can promote the value, the truncstore will still take care of it.
|
||||
Tmp3 = PromoteOp(ST->getValue());
|
||||
break;
|
||||
case Expand:
|
||||
// Just store the low part. This may become a non-trunc store, so make
|
||||
// sure to use getTruncStore, not UpdateNodeOperands below.
|
||||
ExpandOp(ST->getValue(), Tmp3, Tmp4);
|
||||
return DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
|
||||
SVOffset, MVT::i8, isVolatile, Alignment);
|
||||
}
|
||||
|
||||
// The only promote case we handle is TRUNCSTORE:i1 X into
|
||||
// -> TRUNCSTORE:i8 (and X, 1)
|
||||
if (ST->getStoredVT() == MVT::i1 &&
|
||||
TLI.getStoreXAction(MVT::i1) == TargetLowering::Promote) {
|
||||
// Unconditionally promote TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
|
||||
if (ST->getStoredVT() == MVT::i1) {
|
||||
// Promote the bool to a mask then store.
|
||||
Tmp3 = DAG.getNode(ISD::AND, Tmp3.getValueType(), Tmp3,
|
||||
DAG.getConstant(1, Tmp3.getValueType()));
|
||||
|
@ -2261,7 +2270,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
|
|||
}
|
||||
|
||||
MVT::ValueType StVT = cast<StoreSDNode>(Result.Val)->getStoredVT();
|
||||
switch (TLI.getStoreXAction(StVT)) {
|
||||
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
|
||||
default: assert(0 && "This action is not supported yet!");
|
||||
case TargetLowering::Legal:
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
|
@ -2275,8 +2284,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
|
|||
}
|
||||
break;
|
||||
case TargetLowering::Custom:
|
||||
Tmp1 = TLI.LowerOperation(Result, DAG);
|
||||
if (Tmp1.Val) Result = Tmp1;
|
||||
Result = TLI.LowerOperation(Result, DAG);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ TargetLowering::TargetLowering(TargetMachine &tm)
|
|||
// All operations default to being supported.
|
||||
memset(OpActions, 0, sizeof(OpActions));
|
||||
memset(LoadXActions, 0, sizeof(LoadXActions));
|
||||
memset(&StoreXActions, 0, sizeof(StoreXActions));
|
||||
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
|
||||
memset(&IndexedModeActions, 0, sizeof(IndexedModeActions));
|
||||
memset(&ConvertActions, 0, sizeof(ConvertActions));
|
||||
|
||||
|
|
|
@ -125,6 +125,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
|||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
|
||||
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
|
||||
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
|
||||
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
}
|
||||
computeRegisterProperties();
|
||||
|
||||
|
|
|
@ -1404,14 +1404,6 @@ def : ARMPat<(extloadi1 addrmode2:$addr), (LDRB addrmode2:$addr)>;
|
|||
def : ARMPat<(extloadi8 addrmode2:$addr), (LDRB addrmode2:$addr)>;
|
||||
def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
|
||||
|
||||
// truncstore i1 -> truncstore i8
|
||||
def : ARMPat<(truncstorei1 GPR:$src, addrmode2:$dst),
|
||||
(STRB GPR:$src, addrmode2:$dst)>;
|
||||
def : ARMPat<(pre_truncsti1 GPR:$src, GPR:$base, am2offset:$offset),
|
||||
(STRB_PRE GPR:$src, GPR:$base, am2offset:$offset)>;
|
||||
def : ARMPat<(post_truncsti1 GPR:$src, GPR:$base, am2offset:$offset),
|
||||
(STRB_POST GPR:$src, GPR:$base, am2offset:$offset)>;
|
||||
|
||||
// smul* and smla*
|
||||
def : ARMV5TEPat<(mul (sra (shl GPR:$a, 16), 16), (sra (shl GPR:$b, 16), 16)),
|
||||
(SMULBB GPR:$a, GPR:$b)>;
|
||||
|
|
|
@ -588,10 +588,6 @@ def : ThumbPat<(extloadi1 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
|
|||
def : ThumbPat<(extloadi8 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
|
||||
def : ThumbPat<(extloadi16 t_addrmode_s2:$addr), (tLDRH t_addrmode_s2:$addr)>;
|
||||
|
||||
// truncstore i1 -> truncstore i8
|
||||
def : ThumbPat<(truncstorei1 GPR:$src, t_addrmode_s1:$dst),
|
||||
(tSTRB GPR:$src, t_addrmode_s1:$dst)>;
|
||||
|
||||
// Large immediate handling.
|
||||
|
||||
// Two piece imms.
|
||||
|
|
|
@ -59,8 +59,6 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM)
|
|||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Expand);
|
||||
|
||||
setStoreXAction(MVT::i1, Promote);
|
||||
|
||||
// setOperationAction(ISD::BRIND, MVT::Other, Expand);
|
||||
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
|
||||
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
|
||||
|
|
|
@ -129,13 +129,21 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
|
|||
setLoadXAction(ISD::EXTLOAD, MVT::i1, Custom);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setStoreXAction(MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i8, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i16, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i32, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i64, MVT::i1, Custom);
|
||||
setTruncStoreAction(MVT::i128, MVT::i1, Custom);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i8, Custom);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Custom);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i8, Custom);
|
||||
setStoreXAction(MVT::i8, Custom);
|
||||
|
||||
setTruncStoreAction(MVT::i8 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i16 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i32 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i64 , MVT::i8, Custom);
|
||||
setTruncStoreAction(MVT::i128, MVT::i8, Custom);
|
||||
|
||||
setLoadXAction(ISD::EXTLOAD, MVT::i16, Custom);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Custom);
|
||||
setLoadXAction(ISD::ZEXTLOAD, MVT::i16, Custom);
|
||||
|
|
|
@ -72,9 +72,6 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM)
|
|||
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
||||
// Store operations for i1 types must be promoted
|
||||
setStoreXAction(MVT::i1, Promote);
|
||||
|
||||
// Mips does not have these NodeTypes below.
|
||||
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
|
||||
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
|
||||
|
|
|
@ -534,8 +534,6 @@ def : Pat<(not CPURegs:$in),
|
|||
def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
|
||||
def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
|
||||
def : Pat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
|
||||
def : Pat<(truncstorei1 CPURegs:$src, addr:$addr),
|
||||
(SB CPURegs:$src, addr:$addr)>;
|
||||
|
||||
// some peepholes
|
||||
def : Pat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
|
||||
|
|
|
@ -54,10 +54,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
|||
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
|
||||
|
||||
// PowerPC does not have truncstore for i1.
|
||||
setStoreXAction(MVT::i1, Promote);
|
||||
|
||||
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
|
||||
// PowerPC has pre-inc load and store's.
|
||||
setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
|
||||
setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
|
||||
|
|
|
@ -774,9 +774,3 @@ def : Pat<(i32 (extloadi16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
|
|||
// zextload bool -> zextload byte
|
||||
def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
|
||||
def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
|
||||
|
||||
// truncstore bool -> truncstore byte.
|
||||
def : Pat<(truncstorei1 IntRegs:$src, ADDRrr:$addr),
|
||||
(STBrr ADDRrr:$addr, IntRegs:$src)>;
|
||||
def : Pat<(truncstorei1 IntRegs:$src, ADDRri:$addr),
|
||||
(STBri ADDRri:$addr, IntRegs:$src)>;
|
||||
|
|
|
@ -551,13 +551,6 @@ def store : PatFrag<(ops node:$val, node:$ptr),
|
|||
}]>;
|
||||
|
||||
// truncstore fragments.
|
||||
def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
|
||||
(st node:$val, node:$ptr), [{
|
||||
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
|
||||
return ST->isTruncatingStore() && ST->getStoredVT() == MVT::i1 &&
|
||||
ST->getAddressingMode() == ISD::UNINDEXED;
|
||||
return false;
|
||||
}]>;
|
||||
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
|
||||
(st node:$val, node:$ptr), [{
|
||||
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
|
||||
|
|
|
@ -82,6 +82,14 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
|
|||
|
||||
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
|
||||
|
||||
// We don't accept any truncstore of integer registers.
|
||||
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
|
||||
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
|
||||
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
|
||||
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
|
||||
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
|
||||
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
|
||||
|
||||
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
|
||||
// operation.
|
||||
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
|
||||
|
@ -638,6 +646,8 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
|
|||
AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
|
||||
}
|
||||
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
|
||||
// Custom lower v2i64 and v2f64 selects.
|
||||
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
|
||||
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
|
||||
|
|
|
@ -2598,11 +2598,6 @@ def : Pat<(subc GR32:$src1, imm:$src2),
|
|||
def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
|
||||
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
|
||||
|
||||
def : Pat<(truncstorei1 (i8 imm:$src), addr:$dst),
|
||||
(MOV8mi addr:$dst, imm:$src)>;
|
||||
def : Pat<(truncstorei1 GR8:$src, addr:$dst),
|
||||
(MOV8mr addr:$dst, GR8:$src)>;
|
||||
|
||||
// Comparisons.
|
||||
|
||||
// TEST R,R is smaller than CMP R,0
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
; RUN: llvm-as < %s | llc -march=x86 | not grep flds
|
||||
|
||||
define void @foo(x86_fp80 %a, x86_fp80 %b, float* %fp) {
|
||||
%c = add x86_fp80 %a, %b
|
||||
%d = fptrunc x86_fp80 %c to float
|
||||
store float %d, float* %fp
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue