2005-09-02 07:24:04 +08:00
|
|
|
//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===//
|
2005-09-01 08:19:25 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file was developed by Nate Begeman and is distributed under the
|
|
|
|
// University of Illinois Open Source License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
|
|
|
|
// both before and after the DAG is legalized.
|
|
|
|
//
|
|
|
|
// FIXME: Missing folds
|
|
|
|
// sdiv, udiv, srem, urem (X, const) where X is an integer can be expanded into
|
|
|
|
// a sequence of multiplies, shifts, and adds. This should be controlled by
|
|
|
|
// some kind of hint from the target that int div is expensive.
|
|
|
|
// various folds of mulh[s,u] by constants such as -1, powers of 2, etc.
|
|
|
|
//
|
2005-09-20 06:34:01 +08:00
|
|
|
// FIXME: select C, pow2, pow2 -> something smart
|
|
|
|
// FIXME: trunc(select X, Y, Z) -> select X, trunc(Y), trunc(Z)
|
|
|
|
// FIXME: Dead stores -> nuke
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
// FIXME: shr X, (and Y,31) -> shr X, Y (TRICKY!)
|
2005-09-01 08:19:25 +08:00
|
|
|
// FIXME: mul (x, const) -> shifts + adds
|
|
|
|
// FIXME: undef values
|
2005-09-03 05:18:40 +08:00
|
|
|
// FIXME: divide by zero is currently left unfolded. do we want to turn this
|
|
|
|
// into an undef?
|
2005-10-08 08:29:44 +08:00
|
|
|
// FIXME: select ne (select cc, 1, 0), 0, true, false -> select cc, true, false
|
2005-09-01 08:19:25 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "dagcombine"
|
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2007-05-16 14:37:59 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2007-05-08 05:27:48 +08:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2005-09-01 08:19:25 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2007-05-08 05:27:48 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2007-01-09 07:04:05 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-05-16 14:37:59 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2006-08-27 20:54:02 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2006-09-22 00:28:59 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2007-05-16 14:37:59 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2005-09-10 07:53:39 +08:00
|
|
|
#include <algorithm>
|
2005-09-01 08:19:25 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2006-12-20 06:41:21 +08:00
|
|
|
STATISTIC(NodesCombined , "Number of dag nodes combined");
|
|
|
|
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
|
|
|
|
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
namespace {
|
2007-01-16 12:55:25 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before the first "
|
|
|
|
"dag combine pass"));
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before the second "
|
|
|
|
"dag combine pass"));
|
|
|
|
#else
|
|
|
|
static const bool ViewDAGCombine1 = false;
|
|
|
|
static const bool ViewDAGCombine2 = false;
|
|
|
|
#endif
|
|
|
|
|
2006-10-08 07:37:56 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
CombinerAA("combiner-alias-analysis", cl::Hidden,
|
2006-10-18 03:33:52 +08:00
|
|
|
cl::desc("Turn on alias analysis during testing"));
|
2006-10-12 23:22:24 +08:00
|
|
|
|
2006-10-19 03:08:31 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
|
|
|
|
cl::desc("Include global information in alias analysis"));
|
|
|
|
|
2006-10-05 23:07:25 +08:00
|
|
|
//------------------------------ DAGCombiner ---------------------------------//
|
|
|
|
|
2006-10-08 07:37:56 +08:00
|
|
|
class VISIBILITY_HIDDEN DAGCombiner {
|
2005-09-01 08:19:25 +08:00
|
|
|
SelectionDAG &DAG;
|
|
|
|
TargetLowering &TLI;
|
2005-09-02 07:24:04 +08:00
|
|
|
bool AfterLegalize;
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// Worklist of all of the nodes that need to be simplified.
|
|
|
|
std::vector<SDNode*> WorkList;
|
|
|
|
|
2006-10-17 04:52:31 +08:00
|
|
|
// AA - Used for DAG load/store alias analysis.
|
|
|
|
AliasAnalysis &AA;
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
/// AddUsersToWorkList - When an instruction is simplified, add all users of
|
|
|
|
/// the instruction to the work lists because they might get more simplified
|
|
|
|
/// now.
|
|
|
|
///
|
|
|
|
void AddUsersToWorkList(SDNode *N) {
|
|
|
|
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
|
2005-09-02 07:24:04 +08:00
|
|
|
UI != UE; ++UI)
|
2006-10-05 00:53:27 +08:00
|
|
|
AddToWorkList(*UI);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// removeFromWorkList - remove all instances of N from the worklist.
|
2006-03-01 12:03:14 +08:00
|
|
|
///
|
2005-09-01 08:19:25 +08:00
|
|
|
void removeFromWorkList(SDNode *N) {
|
|
|
|
WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), N),
|
|
|
|
WorkList.end());
|
|
|
|
}
|
|
|
|
|
2006-03-01 12:53:38 +08:00
|
|
|
public:
|
2006-10-05 00:53:27 +08:00
|
|
|
/// AddToWorkList - Add to the work list making sure it's instance is at the
|
|
|
|
/// the back (next to be processed.)
|
2006-03-01 12:03:14 +08:00
|
|
|
void AddToWorkList(SDNode *N) {
|
2006-10-05 00:53:27 +08:00
|
|
|
removeFromWorkList(N);
|
2006-03-01 12:03:14 +08:00
|
|
|
WorkList.push_back(N);
|
|
|
|
}
|
2006-10-05 00:53:27 +08:00
|
|
|
|
2006-10-14 07:32:28 +08:00
|
|
|
SDOperand CombineTo(SDNode *N, const SDOperand *To, unsigned NumTo,
|
|
|
|
bool AddTo = true) {
|
2006-08-12 01:56:38 +08:00
|
|
|
assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
|
2005-10-11 06:31:19 +08:00
|
|
|
++NodesCombined;
|
2006-12-08 04:04:42 +08:00
|
|
|
DOUT << "\nReplacing.1 "; DEBUG(N->dump());
|
|
|
|
DOUT << "\nWith: "; DEBUG(To[0].Val->dump(&DAG));
|
|
|
|
DOUT << " and " << NumTo-1 << " other values\n";
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
std::vector<SDNode*> NowDead;
|
2006-08-12 01:56:38 +08:00
|
|
|
DAG.ReplaceAllUsesWith(N, To, &NowDead);
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
|
2006-10-14 07:32:28 +08:00
|
|
|
if (AddTo) {
|
|
|
|
// Push the new nodes and any users onto the worklist
|
|
|
|
for (unsigned i = 0, e = NumTo; i != e; ++i) {
|
|
|
|
AddToWorkList(To[i].Val);
|
|
|
|
AddUsersToWorkList(To[i].Val);
|
|
|
|
}
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
}
|
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// Nodes can be reintroduced into the worklist. Make sure we do not
|
|
|
|
// process a node that has been replaced.
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
removeFromWorkList(N);
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
|
|
|
|
// Finally, since the node is now dead, remove it from the graph.
|
|
|
|
DAG.DeleteNode(N);
|
|
|
|
return SDOperand(N, 0);
|
|
|
|
}
|
2006-02-17 05:11:51 +08:00
|
|
|
|
2006-10-14 07:32:28 +08:00
|
|
|
SDOperand CombineTo(SDNode *N, SDOperand Res, bool AddTo = true) {
|
|
|
|
return CombineTo(N, &Res, 1, AddTo);
|
2006-03-01 12:53:38 +08:00
|
|
|
}
|
|
|
|
|
2006-10-14 07:32:28 +08:00
|
|
|
SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1,
|
|
|
|
bool AddTo = true) {
|
2006-08-12 01:56:38 +08:00
|
|
|
SDOperand To[] = { Res0, Res1 };
|
2006-10-14 07:32:28 +08:00
|
|
|
return CombineTo(N, To, 2, AddTo);
|
2006-03-01 12:53:38 +08:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
|
2006-02-18 05:58:01 +08:00
|
|
|
/// SimplifyDemandedBits - Check the specified integer node value to see if
|
2006-03-02 03:55:35 +08:00
|
|
|
/// it can be simplified or if things it uses can be simplified by bit
|
2006-02-18 05:58:01 +08:00
|
|
|
/// propagation. If so, return true.
|
|
|
|
bool SimplifyDemandedBits(SDOperand Op) {
|
2006-02-17 05:11:51 +08:00
|
|
|
TargetLowering::TargetLoweringOpt TLO(DAG);
|
|
|
|
uint64_t KnownZero, KnownOne;
|
2006-02-18 05:58:01 +08:00
|
|
|
uint64_t Demanded = MVT::getIntVTBitMask(Op.getValueType());
|
|
|
|
if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Revisit the node.
|
2006-10-05 00:53:27 +08:00
|
|
|
AddToWorkList(Op.Val);
|
2006-02-18 05:58:01 +08:00
|
|
|
|
|
|
|
// Replace the old value with the new one.
|
|
|
|
++NodesCombined;
|
2006-12-08 04:04:42 +08:00
|
|
|
DOUT << "\nReplacing.2 "; DEBUG(TLO.Old.Val->dump());
|
|
|
|
DOUT << "\nWith: "; DEBUG(TLO.New.Val->dump(&DAG));
|
|
|
|
DOUT << '\n';
|
2006-02-18 05:58:01 +08:00
|
|
|
|
|
|
|
std::vector<SDNode*> NowDead;
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, NowDead);
|
|
|
|
|
2006-02-20 14:51:04 +08:00
|
|
|
// Push the new node and any (possibly new) users onto the worklist.
|
2006-10-05 00:53:27 +08:00
|
|
|
AddToWorkList(TLO.New.Val);
|
2006-02-18 05:58:01 +08:00
|
|
|
AddUsersToWorkList(TLO.New.Val);
|
|
|
|
|
|
|
|
// Nodes can end up on the worklist more than once. Make sure we do
|
|
|
|
// not process a node that has been replaced.
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
|
2006-02-20 14:51:04 +08:00
|
|
|
// Finally, if the node is now dead, remove it from the graph. The node
|
|
|
|
// may not be dead if the replacement process recursively simplified to
|
|
|
|
// something else needing this node.
|
|
|
|
if (TLO.Old.Val->use_empty()) {
|
|
|
|
removeFromWorkList(TLO.Old.Val);
|
2007-04-18 11:05:22 +08:00
|
|
|
|
|
|
|
// If the operands of this node are only used by the node, they will now
|
|
|
|
// be dead. Make sure to visit them first to delete dead nodes early.
|
|
|
|
for (unsigned i = 0, e = TLO.Old.Val->getNumOperands(); i != e; ++i)
|
|
|
|
if (TLO.Old.Val->getOperand(i).Val->hasOneUse())
|
|
|
|
AddToWorkList(TLO.Old.Val->getOperand(i).Val);
|
|
|
|
|
2006-02-20 14:51:04 +08:00
|
|
|
DAG.DeleteNode(TLO.Old.Val);
|
|
|
|
}
|
2006-02-18 05:58:01 +08:00
|
|
|
return true;
|
2006-02-17 05:11:51 +08:00
|
|
|
}
|
2005-10-11 06:31:19 +08:00
|
|
|
|
2006-11-11 08:39:41 +08:00
|
|
|
bool CombineToPreIndexedLoadStore(SDNode *N);
|
|
|
|
bool CombineToPostIndexedLoadStore(SDNode *N);
|
|
|
|
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
/// visit - call the node-specific routine that knows how to fold each
|
|
|
|
/// particular type of node.
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visit(SDNode *N);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// Visitation implementation - Implement dag node combining for different
|
|
|
|
// node types. The semantics are as follows:
|
|
|
|
// Return Value:
|
2005-09-07 08:15:36 +08:00
|
|
|
// SDOperand.Val == 0 - No change was made
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
// SDOperand.Val == N - N was replaced, is dead, and is already handled.
|
2005-09-07 08:15:36 +08:00
|
|
|
// otherwise - N should be replaced by the returned Operand.
|
2005-09-01 08:19:25 +08:00
|
|
|
//
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitTokenFactor(SDNode *N);
|
|
|
|
SDOperand visitADD(SDNode *N);
|
|
|
|
SDOperand visitSUB(SDNode *N);
|
2007-03-05 04:03:15 +08:00
|
|
|
SDOperand visitADDC(SDNode *N);
|
|
|
|
SDOperand visitADDE(SDNode *N);
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitMUL(SDNode *N);
|
|
|
|
SDOperand visitSDIV(SDNode *N);
|
|
|
|
SDOperand visitUDIV(SDNode *N);
|
|
|
|
SDOperand visitSREM(SDNode *N);
|
|
|
|
SDOperand visitUREM(SDNode *N);
|
|
|
|
SDOperand visitMULHU(SDNode *N);
|
|
|
|
SDOperand visitMULHS(SDNode *N);
|
|
|
|
SDOperand visitAND(SDNode *N);
|
|
|
|
SDOperand visitOR(SDNode *N);
|
|
|
|
SDOperand visitXOR(SDNode *N);
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
SDOperand visitVBinOp(SDNode *N, ISD::NodeType IntOp, ISD::NodeType FPOp);
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitSHL(SDNode *N);
|
|
|
|
SDOperand visitSRA(SDNode *N);
|
|
|
|
SDOperand visitSRL(SDNode *N);
|
|
|
|
SDOperand visitCTLZ(SDNode *N);
|
|
|
|
SDOperand visitCTTZ(SDNode *N);
|
|
|
|
SDOperand visitCTPOP(SDNode *N);
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand visitSELECT(SDNode *N);
|
|
|
|
SDOperand visitSELECT_CC(SDNode *N);
|
|
|
|
SDOperand visitSETCC(SDNode *N);
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitSIGN_EXTEND(SDNode *N);
|
|
|
|
SDOperand visitZERO_EXTEND(SDNode *N);
|
2006-05-05 13:58:59 +08:00
|
|
|
SDOperand visitANY_EXTEND(SDNode *N);
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitSIGN_EXTEND_INREG(SDNode *N);
|
|
|
|
SDOperand visitTRUNCATE(SDNode *N);
|
constant fold bits_convert in getNode and in the dag combiner for fp<->int
conversions. This allows V8 to compiles this:
void %test() {
call float %test2( float 1.000000e+00, float 2.000000e+00, double 3.000000e+00, double* null )
ret void
}
into:
test:
save -96, %o6, %o6
sethi 0, %o3
sethi 1049088, %o2
sethi 1048576, %o1
sethi 1040384, %o0
or %g0, %o3, %o4
call test2
nop
restore %g0, %g0, %g0
retl
nop
instead of:
test:
save -112, %o6, %o6
sethi 0, %o4
sethi 1049088, %l0
st %o4, [%i6+-12]
st %l0, [%i6+-16]
ld [%i6+-12], %o3
ld [%i6+-16], %o2
sethi 1048576, %o1
sethi 1040384, %o0
call test2
nop
restore %g0, %g0, %g0
retl
nop
llvm-svn: 24980
2005-12-23 13:30:37 +08:00
|
|
|
SDOperand visitBIT_CONVERT(SDNode *N);
|
2006-04-02 10:53:43 +08:00
|
|
|
SDOperand visitVBIT_CONVERT(SDNode *N);
|
2005-09-29 06:28:18 +08:00
|
|
|
SDOperand visitFADD(SDNode *N);
|
|
|
|
SDOperand visitFSUB(SDNode *N);
|
|
|
|
SDOperand visitFMUL(SDNode *N);
|
|
|
|
SDOperand visitFDIV(SDNode *N);
|
|
|
|
SDOperand visitFREM(SDNode *N);
|
2006-03-05 13:30:57 +08:00
|
|
|
SDOperand visitFCOPYSIGN(SDNode *N);
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand visitSINT_TO_FP(SDNode *N);
|
|
|
|
SDOperand visitUINT_TO_FP(SDNode *N);
|
|
|
|
SDOperand visitFP_TO_SINT(SDNode *N);
|
|
|
|
SDOperand visitFP_TO_UINT(SDNode *N);
|
|
|
|
SDOperand visitFP_ROUND(SDNode *N);
|
|
|
|
SDOperand visitFP_ROUND_INREG(SDNode *N);
|
|
|
|
SDOperand visitFP_EXTEND(SDNode *N);
|
|
|
|
SDOperand visitFNEG(SDNode *N);
|
|
|
|
SDOperand visitFABS(SDNode *N);
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand visitBRCOND(SDNode *N);
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand visitBR_CC(SDNode *N);
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
SDOperand visitLOAD(SDNode *N);
|
2005-10-11 06:31:19 +08:00
|
|
|
SDOperand visitSTORE(SDNode *N);
|
2006-03-19 09:27:56 +08:00
|
|
|
SDOperand visitINSERT_VECTOR_ELT(SDNode *N);
|
|
|
|
SDOperand visitVINSERT_VECTOR_ELT(SDNode *N);
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
SDOperand visitVBUILD_VECTOR(SDNode *N);
|
2006-03-29 06:11:53 +08:00
|
|
|
SDOperand visitVECTOR_SHUFFLE(SDNode *N);
|
2006-04-01 06:16:43 +08:00
|
|
|
SDOperand visitVVECTOR_SHUFFLE(SDNode *N);
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
|
2006-04-20 16:56:16 +08:00
|
|
|
SDOperand XformToShuffleWithZero(SDNode *N);
|
2006-02-03 14:46:56 +08:00
|
|
|
SDOperand ReassociateOps(unsigned Opc, SDOperand LHS, SDOperand RHS);
|
|
|
|
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
bool SimplifySelectOps(SDNode *SELECT, SDOperand LHS, SDOperand RHS);
|
2006-05-05 13:51:50 +08:00
|
|
|
SDOperand SimplifyBinOpWithSameOpcodeHands(SDNode *N);
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2);
|
|
|
|
SDOperand SimplifySelectCC(SDOperand N0, SDOperand N1, SDOperand N2,
|
2007-04-11 14:50:51 +08:00
|
|
|
SDOperand N3, ISD::CondCode CC,
|
|
|
|
bool NotExtCompare = false);
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
|
2005-10-06 05:43:42 +08:00
|
|
|
ISD::CondCode Cond, bool foldBooleans = true);
|
2006-04-02 10:53:43 +08:00
|
|
|
SDOperand ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(SDNode *, MVT::ValueType);
|
2005-10-20 10:15:44 +08:00
|
|
|
SDOperand BuildSDIV(SDNode *N);
|
2006-09-15 04:50:57 +08:00
|
|
|
SDOperand BuildUDIV(SDNode *N);
|
|
|
|
SDNode *MatchRotate(SDOperand LHS, SDOperand RHS);
|
2007-03-22 09:54:19 +08:00
|
|
|
SDOperand ReduceLoadWidth(SDNode *N);
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
|
|
|
|
/// looking for aliasing nodes and adding them to the Aliases vector.
|
2006-10-05 23:07:25 +08:00
|
|
|
void GatherAllAliases(SDNode *N, SDOperand OriginalChain,
|
2006-10-05 00:53:27 +08:00
|
|
|
SmallVector<SDOperand, 8> &Aliases);
|
|
|
|
|
2006-10-18 20:29:57 +08:00
|
|
|
/// isAlias - Return true if there is any possibility that the two addresses
|
|
|
|
/// overlap.
|
|
|
|
bool isAlias(SDOperand Ptr1, int64_t Size1,
|
|
|
|
const Value *SrcValue1, int SrcValueOffset1,
|
|
|
|
SDOperand Ptr2, int64_t Size2,
|
2006-11-06 03:31:28 +08:00
|
|
|
const Value *SrcValue2, int SrcValueOffset2);
|
2006-10-18 20:29:57 +08:00
|
|
|
|
2006-10-11 21:47:09 +08:00
|
|
|
/// FindAliasInfo - Extracts the relevant alias information from the memory
|
|
|
|
/// node. Returns true if the operand was a load.
|
|
|
|
bool FindAliasInfo(SDNode *N,
|
2006-10-18 20:29:57 +08:00
|
|
|
SDOperand &Ptr, int64_t &Size,
|
|
|
|
const Value *&SrcValue, int &SrcValueOffset);
|
2006-10-11 21:47:09 +08:00
|
|
|
|
2006-09-26 00:29:54 +08:00
|
|
|
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
|
2006-10-05 00:53:27 +08:00
|
|
|
/// looking for a better chain (aliasing node.)
|
2006-09-26 00:29:54 +08:00
|
|
|
SDOperand FindBetterChain(SDNode *N, SDOperand Chain);
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
public:
|
2006-10-17 04:52:31 +08:00
|
|
|
DAGCombiner(SelectionDAG &D, AliasAnalysis &A)
|
|
|
|
: DAG(D),
|
|
|
|
TLI(D.getTargetLoweringInfo()),
|
|
|
|
AfterLegalize(false),
|
|
|
|
AA(A) {}
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
/// Run - runs the dag combiner on all nodes in the work list
|
2005-09-02 07:24:04 +08:00
|
|
|
void Run(bool RunningAfterLegalize);
|
2005-09-01 08:19:25 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2006-03-01 12:53:38 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TargetLowering::DAGCombinerInfo implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
|
|
|
|
((DAGCombiner*)DC)->AddToWorkList(N);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand TargetLowering::DAGCombinerInfo::
|
|
|
|
CombineTo(SDNode *N, const std::vector<SDOperand> &To) {
|
2006-08-12 01:56:38 +08:00
|
|
|
return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size());
|
2006-03-01 12:53:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand TargetLowering::DAGCombinerInfo::
|
|
|
|
CombineTo(SDNode *N, SDOperand Res) {
|
|
|
|
return ((DAGCombiner*)DC)->CombineTo(N, Res);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
SDOperand TargetLowering::DAGCombinerInfo::
|
|
|
|
CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1) {
|
|
|
|
return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Helper Functions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-03-01 12:53:38 +08:00
|
|
|
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
/// isNegatibleForFree - Return 1 if we can compute the negated form of the
|
|
|
|
/// specified expression for the same cost as the expression itself, or 2 if we
|
|
|
|
/// can compute the negated form more cheaply than the expression itself.
|
2007-05-23 15:35:22 +08:00
|
|
|
static char isNegatibleForFree(SDOperand Op, unsigned Depth = 0) {
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
// fneg is removable even if it has multiple uses.
|
|
|
|
if (Op.getOpcode() == ISD::FNEG) return 2;
|
|
|
|
|
|
|
|
// Don't allow anything with multiple uses.
|
|
|
|
if (!Op.hasOneUse()) return 0;
|
|
|
|
|
2007-05-25 10:19:06 +08:00
|
|
|
// Don't recurse exponentially.
|
|
|
|
if (Depth > 6) return 0;
|
|
|
|
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case ISD::ConstantFP:
|
|
|
|
return 1;
|
|
|
|
case ISD::FADD:
|
|
|
|
// FIXME: determine better conditions for this xform.
|
|
|
|
if (!UnsafeFPMath) return 0;
|
|
|
|
|
|
|
|
// -(A+B) -> -A - B
|
2007-05-23 15:35:22 +08:00
|
|
|
if (char V = isNegatibleForFree(Op.getOperand(0), Depth+1))
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
return V;
|
|
|
|
// -(A+B) -> -B - A
|
2007-05-23 15:35:22 +08:00
|
|
|
return isNegatibleForFree(Op.getOperand(1), Depth+1);
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
case ISD::FSUB:
|
|
|
|
// We can't turn -(A-B) into B-A when we honor signed zeros.
|
|
|
|
if (!UnsafeFPMath) return 0;
|
|
|
|
|
|
|
|
// -(A-B) -> B-A
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
case ISD::FMUL:
|
|
|
|
case ISD::FDIV:
|
|
|
|
if (HonorSignDependentRoundingFPMath()) return 0;
|
|
|
|
|
|
|
|
// -(X*Y) -> (-X * Y) or (X*-Y)
|
2007-05-23 15:35:22 +08:00
|
|
|
if (char V = isNegatibleForFree(Op.getOperand(0), Depth+1))
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
return V;
|
|
|
|
|
2007-05-23 15:35:22 +08:00
|
|
|
return isNegatibleForFree(Op.getOperand(1), Depth+1);
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
|
|
|
|
case ISD::FP_EXTEND:
|
|
|
|
case ISD::FP_ROUND:
|
|
|
|
case ISD::FSIN:
|
2007-05-23 15:35:22 +08:00
|
|
|
return isNegatibleForFree(Op.getOperand(0), Depth+1);
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
}
|
|
|
|
}
|
2006-03-01 12:53:38 +08:00
|
|
|
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
/// GetNegatedExpression - If isNegatibleForFree returns true, this function
|
|
|
|
/// returns the newly negated expression.
|
2007-05-25 10:19:06 +08:00
|
|
|
static SDOperand GetNegatedExpression(SDOperand Op, SelectionDAG &DAG,
|
|
|
|
unsigned Depth = 0) {
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
// fneg is removable even if it has multiple uses.
|
|
|
|
if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
|
|
|
|
|
|
|
|
// Don't allow anything with multiple uses.
|
|
|
|
assert(Op.hasOneUse() && "Unknown reuse!");
|
|
|
|
|
2007-05-25 10:19:06 +08:00
|
|
|
assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: assert(0 && "Unknown code");
|
|
|
|
case ISD::ConstantFP:
|
|
|
|
return DAG.getConstantFP(-cast<ConstantFPSDNode>(Op)->getValue(),
|
|
|
|
Op.getValueType());
|
|
|
|
case ISD::FADD:
|
|
|
|
// FIXME: determine better conditions for this xform.
|
|
|
|
assert(UnsafeFPMath);
|
|
|
|
|
|
|
|
// -(A+B) -> -A - B
|
2007-05-25 10:19:06 +08:00
|
|
|
if (isNegatibleForFree(Op.getOperand(0), Depth+1))
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
return DAG.getNode(ISD::FSUB, Op.getValueType(),
|
2007-05-25 10:19:06 +08:00
|
|
|
GetNegatedExpression(Op.getOperand(0), DAG, Depth+1),
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
Op.getOperand(1));
|
|
|
|
// -(A+B) -> -B - A
|
|
|
|
return DAG.getNode(ISD::FSUB, Op.getValueType(),
|
2007-05-25 10:19:06 +08:00
|
|
|
GetNegatedExpression(Op.getOperand(1), DAG, Depth+1),
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
Op.getOperand(0));
|
|
|
|
case ISD::FSUB:
|
|
|
|
// We can't turn -(A-B) into B-A when we honor signed zeros.
|
|
|
|
assert(UnsafeFPMath);
|
|
|
|
|
|
|
|
// -(A-B) -> B-A
|
|
|
|
return DAG.getNode(ISD::FSUB, Op.getValueType(), Op.getOperand(1),
|
|
|
|
Op.getOperand(0));
|
|
|
|
|
|
|
|
case ISD::FMUL:
|
|
|
|
case ISD::FDIV:
|
|
|
|
assert(!HonorSignDependentRoundingFPMath());
|
|
|
|
|
|
|
|
// -(X*Y) -> -X * Y
|
2007-05-25 10:19:06 +08:00
|
|
|
if (isNegatibleForFree(Op.getOperand(0), Depth+1))
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
|
2007-05-25 10:19:06 +08:00
|
|
|
GetNegatedExpression(Op.getOperand(0), DAG, Depth+1),
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
Op.getOperand(1));
|
|
|
|
|
|
|
|
// -(X*Y) -> X * -Y
|
|
|
|
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
|
|
|
|
Op.getOperand(0),
|
2007-05-25 10:19:06 +08:00
|
|
|
GetNegatedExpression(Op.getOperand(1), DAG, Depth+1));
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
|
|
|
|
case ISD::FP_EXTEND:
|
|
|
|
case ISD::FP_ROUND:
|
|
|
|
case ISD::FSIN:
|
|
|
|
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
|
2007-05-25 10:19:06 +08:00
|
|
|
GetNegatedExpression(Op.getOperand(0), DAG, Depth+1));
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
}
|
|
|
|
}
|
2006-03-01 12:53:38 +08:00
|
|
|
|
|
|
|
|
2005-09-02 07:24:04 +08:00
|
|
|
// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc
|
|
|
|
// that selects between the values 1 and 0, making it equivalent to a setcc.
|
2005-09-03 05:18:40 +08:00
|
|
|
// Also, set the incoming LHS, RHS, and CC references to the appropriate
|
|
|
|
// nodes based on the type of node we are checking. This simplifies life a
|
|
|
|
// bit for the callers.
|
|
|
|
static bool isSetCCEquivalent(SDOperand N, SDOperand &LHS, SDOperand &RHS,
|
|
|
|
SDOperand &CC) {
|
|
|
|
if (N.getOpcode() == ISD::SETCC) {
|
|
|
|
LHS = N.getOperand(0);
|
|
|
|
RHS = N.getOperand(1);
|
|
|
|
CC = N.getOperand(2);
|
2005-09-02 07:24:04 +08:00
|
|
|
return true;
|
2005-09-03 05:18:40 +08:00
|
|
|
}
|
2005-09-01 08:19:25 +08:00
|
|
|
if (N.getOpcode() == ISD::SELECT_CC &&
|
|
|
|
N.getOperand(2).getOpcode() == ISD::Constant &&
|
|
|
|
N.getOperand(3).getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(N.getOperand(2))->getValue() == 1 &&
|
2005-09-03 05:18:40 +08:00
|
|
|
cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) {
|
|
|
|
LHS = N.getOperand(0);
|
|
|
|
RHS = N.getOperand(1);
|
|
|
|
CC = N.getOperand(4);
|
2005-09-01 08:19:25 +08:00
|
|
|
return true;
|
2005-09-03 05:18:40 +08:00
|
|
|
}
|
2005-09-01 08:19:25 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-09-08 07:25:52 +08:00
|
|
|
// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only
|
|
|
|
// one use. If this is true, it allows the users to invert the operation for
|
|
|
|
// free when it is profitable to do so.
|
|
|
|
static bool isOneUseSetCC(SDOperand N) {
|
2005-09-03 05:18:40 +08:00
|
|
|
SDOperand N0, N1, N2;
|
|
|
|
if (isSetCCEquivalent(N, N0, N1, N2) && N.Val->hasOneUse())
|
2005-09-02 07:24:04 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-02-03 14:46:56 +08:00
|
|
|
SDOperand DAGCombiner::ReassociateOps(unsigned Opc, SDOperand N0, SDOperand N1){
|
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
// reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
|
|
|
|
// reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
|
|
|
|
if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
if (isa<ConstantSDNode>(N1)) {
|
|
|
|
SDOperand OpNode = DAG.getNode(Opc, VT, N0.getOperand(1), N1);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(OpNode.Val);
|
2006-02-03 14:46:56 +08:00
|
|
|
return DAG.getNode(Opc, VT, OpNode, N0.getOperand(0));
|
|
|
|
} else if (N0.hasOneUse()) {
|
|
|
|
SDOperand OpNode = DAG.getNode(Opc, VT, N0.getOperand(0), N1);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(OpNode.Val);
|
2006-02-03 14:46:56 +08:00
|
|
|
return DAG.getNode(Opc, VT, OpNode, N0.getOperand(1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use
|
|
|
|
// reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
|
|
|
|
if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) {
|
|
|
|
if (isa<ConstantSDNode>(N0)) {
|
|
|
|
SDOperand OpNode = DAG.getNode(Opc, VT, N1.getOperand(1), N0);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(OpNode.Val);
|
2006-02-03 14:46:56 +08:00
|
|
|
return DAG.getNode(Opc, VT, OpNode, N1.getOperand(0));
|
|
|
|
} else if (N1.hasOneUse()) {
|
|
|
|
SDOperand OpNode = DAG.getNode(Opc, VT, N1.getOperand(0), N0);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(OpNode.Val);
|
2006-02-03 14:46:56 +08:00
|
|
|
return DAG.getNode(Opc, VT, OpNode, N1.getOperand(1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Main DAG Combiner implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-09-02 07:24:04 +08:00
|
|
|
void DAGCombiner::Run(bool RunningAfterLegalize) {
|
|
|
|
// set the instance variable, so that the various visit routines may use it.
|
|
|
|
AfterLegalize = RunningAfterLegalize;
|
|
|
|
|
2005-09-03 05:18:40 +08:00
|
|
|
// Add all the dag nodes to the worklist.
|
2005-11-10 07:47:37 +08:00
|
|
|
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
|
|
|
|
E = DAG.allnodes_end(); I != E; ++I)
|
|
|
|
WorkList.push_back(I);
|
2005-09-06 12:43:02 +08:00
|
|
|
|
2005-10-05 14:35:28 +08:00
|
|
|
// Create a dummy node (which is not added to allnodes), that adds a reference
|
|
|
|
// to the root node, preventing it from being deleted, and tracking any
|
|
|
|
// changes of the root.
|
|
|
|
HandleSDNode Dummy(DAG.getRoot());
|
|
|
|
|
2006-10-18 03:33:52 +08:00
|
|
|
// The root of the dag may dangle to deleted nodes until the dag combiner is
|
|
|
|
// done. Set it to null to avoid confusion.
|
|
|
|
DAG.setRoot(SDOperand());
|
2006-03-01 12:53:38 +08:00
|
|
|
|
|
|
|
/// DagCombineInfo - Expose the DAG combiner to the target combiner impls.
|
|
|
|
TargetLowering::DAGCombinerInfo
|
2007-02-09 06:13:59 +08:00
|
|
|
DagCombineInfo(DAG, !RunningAfterLegalize, false, this);
|
2006-10-05 00:53:27 +08:00
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// while the worklist isn't empty, inspect the node on the end of it and
|
|
|
|
// try and combine it.
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
SDNode *N = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
|
|
|
|
// If N has no uses, it is dead. Make sure to revisit all N's operands once
|
2005-10-05 14:35:28 +08:00
|
|
|
// N is deleted from the DAG, since they too may now be dead or may have a
|
|
|
|
// reduced number of uses, allowing other xforms.
|
|
|
|
if (N->use_empty() && N != &Dummy) {
|
2005-09-01 08:19:25 +08:00
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
|
2006-10-05 00:53:27 +08:00
|
|
|
AddToWorkList(N->getOperand(i).Val);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
2005-10-05 14:35:28 +08:00
|
|
|
DAG.DeleteNode(N);
|
2005-09-01 08:19:25 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand RV = visit(N);
|
2006-03-01 12:53:38 +08:00
|
|
|
|
|
|
|
// If nothing happened, try a target-specific DAG combine.
|
|
|
|
if (RV.Val == 0) {
|
2006-05-27 08:43:02 +08:00
|
|
|
assert(N->getOpcode() != ISD::DELETED_NODE &&
|
|
|
|
"Node was deleted but visit returned NULL!");
|
2006-03-01 12:53:38 +08:00
|
|
|
if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
|
|
|
|
TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode()))
|
|
|
|
RV = TLI.PerformDAGCombine(N, DagCombineInfo);
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
if (RV.Val) {
|
2005-09-01 08:19:25 +08:00
|
|
|
++NodesCombined;
|
2005-09-03 05:18:40 +08:00
|
|
|
// If we get back the same node we passed in, rather than a new node or
|
|
|
|
// zero, we know that the node must have defined multiple values and
|
|
|
|
// CombineTo was used. Since CombineTo takes care of the worklist
|
|
|
|
// mechanics for us, we have no work to do in this case.
|
2005-09-06 12:43:02 +08:00
|
|
|
if (RV.Val != N) {
|
2006-05-27 08:43:02 +08:00
|
|
|
assert(N->getOpcode() != ISD::DELETED_NODE &&
|
|
|
|
RV.Val->getOpcode() != ISD::DELETED_NODE &&
|
|
|
|
"Node was deleted but visit returned new node!");
|
|
|
|
|
2006-12-08 04:04:42 +08:00
|
|
|
DOUT << "\nReplacing.3 "; DEBUG(N->dump());
|
|
|
|
DOUT << "\nWith: "; DEBUG(RV.Val->dump(&DAG));
|
|
|
|
DOUT << '\n';
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
std::vector<SDNode*> NowDead;
|
2006-09-22 03:04:05 +08:00
|
|
|
if (N->getNumValues() == RV.Val->getNumValues())
|
|
|
|
DAG.ReplaceAllUsesWith(N, RV.Val, &NowDead);
|
|
|
|
else {
|
|
|
|
assert(N->getValueType(0) == RV.getValueType() && "Type mismatch");
|
|
|
|
SDOperand OpV = RV;
|
|
|
|
DAG.ReplaceAllUsesWith(N, &OpV, &NowDead);
|
|
|
|
}
|
2005-09-03 05:18:40 +08:00
|
|
|
|
|
|
|
// Push the new node and any users onto the worklist
|
2006-10-05 00:53:27 +08:00
|
|
|
AddToWorkList(RV.Val);
|
2005-09-06 12:43:02 +08:00
|
|
|
AddUsersToWorkList(RV.Val);
|
2005-09-03 05:18:40 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// Nodes can be reintroduced into the worklist. Make sure we do not
|
|
|
|
// process a node that has been replaced.
|
2005-09-03 05:18:40 +08:00
|
|
|
removeFromWorkList(N);
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
2005-10-05 14:11:08 +08:00
|
|
|
|
|
|
|
// Finally, since the node is now dead, remove it from the graph.
|
|
|
|
DAG.DeleteNode(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
}
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
}
|
2005-10-05 14:35:28 +08:00
|
|
|
|
|
|
|
// If the root changed (e.g. it was a dead load, update the root).
|
|
|
|
DAG.setRoot(Dummy.getValue());
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visit(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
switch(N->getOpcode()) {
|
|
|
|
default: break;
|
2005-09-01 08:33:32 +08:00
|
|
|
case ISD::TokenFactor: return visitTokenFactor(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
case ISD::ADD: return visitADD(N);
|
|
|
|
case ISD::SUB: return visitSUB(N);
|
2007-03-05 04:03:15 +08:00
|
|
|
case ISD::ADDC: return visitADDC(N);
|
|
|
|
case ISD::ADDE: return visitADDE(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
case ISD::MUL: return visitMUL(N);
|
|
|
|
case ISD::SDIV: return visitSDIV(N);
|
|
|
|
case ISD::UDIV: return visitUDIV(N);
|
|
|
|
case ISD::SREM: return visitSREM(N);
|
|
|
|
case ISD::UREM: return visitUREM(N);
|
|
|
|
case ISD::MULHU: return visitMULHU(N);
|
|
|
|
case ISD::MULHS: return visitMULHS(N);
|
|
|
|
case ISD::AND: return visitAND(N);
|
|
|
|
case ISD::OR: return visitOR(N);
|
|
|
|
case ISD::XOR: return visitXOR(N);
|
|
|
|
case ISD::SHL: return visitSHL(N);
|
|
|
|
case ISD::SRA: return visitSRA(N);
|
|
|
|
case ISD::SRL: return visitSRL(N);
|
|
|
|
case ISD::CTLZ: return visitCTLZ(N);
|
|
|
|
case ISD::CTTZ: return visitCTTZ(N);
|
|
|
|
case ISD::CTPOP: return visitCTPOP(N);
|
2005-09-16 08:54:12 +08:00
|
|
|
case ISD::SELECT: return visitSELECT(N);
|
|
|
|
case ISD::SELECT_CC: return visitSELECT_CC(N);
|
|
|
|
case ISD::SETCC: return visitSETCC(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
|
|
|
|
case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
|
2006-05-05 13:58:59 +08:00
|
|
|
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
|
|
|
|
case ISD::TRUNCATE: return visitTRUNCATE(N);
|
constant fold bits_convert in getNode and in the dag combiner for fp<->int
conversions. This allows V8 to compiles this:
void %test() {
call float %test2( float 1.000000e+00, float 2.000000e+00, double 3.000000e+00, double* null )
ret void
}
into:
test:
save -96, %o6, %o6
sethi 0, %o3
sethi 1049088, %o2
sethi 1048576, %o1
sethi 1040384, %o0
or %g0, %o3, %o4
call test2
nop
restore %g0, %g0, %g0
retl
nop
instead of:
test:
save -112, %o6, %o6
sethi 0, %o4
sethi 1049088, %l0
st %o4, [%i6+-12]
st %l0, [%i6+-16]
ld [%i6+-12], %o3
ld [%i6+-16], %o2
sethi 1048576, %o1
sethi 1040384, %o0
call test2
nop
restore %g0, %g0, %g0
retl
nop
llvm-svn: 24980
2005-12-23 13:30:37 +08:00
|
|
|
case ISD::BIT_CONVERT: return visitBIT_CONVERT(N);
|
2006-04-02 10:53:43 +08:00
|
|
|
case ISD::VBIT_CONVERT: return visitVBIT_CONVERT(N);
|
2005-09-29 06:28:18 +08:00
|
|
|
case ISD::FADD: return visitFADD(N);
|
|
|
|
case ISD::FSUB: return visitFSUB(N);
|
|
|
|
case ISD::FMUL: return visitFMUL(N);
|
|
|
|
case ISD::FDIV: return visitFDIV(N);
|
|
|
|
case ISD::FREM: return visitFREM(N);
|
2006-03-05 13:30:57 +08:00
|
|
|
case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
|
2005-09-03 05:18:40 +08:00
|
|
|
case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
|
|
|
|
case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
|
|
|
|
case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
|
|
|
|
case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
|
|
|
|
case ISD::FP_ROUND: return visitFP_ROUND(N);
|
|
|
|
case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N);
|
|
|
|
case ISD::FP_EXTEND: return visitFP_EXTEND(N);
|
|
|
|
case ISD::FNEG: return visitFNEG(N);
|
|
|
|
case ISD::FABS: return visitFABS(N);
|
2005-09-20 06:34:01 +08:00
|
|
|
case ISD::BRCOND: return visitBRCOND(N);
|
|
|
|
case ISD::BR_CC: return visitBR_CC(N);
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
case ISD::LOAD: return visitLOAD(N);
|
2005-10-11 06:31:19 +08:00
|
|
|
case ISD::STORE: return visitSTORE(N);
|
2006-03-19 09:27:56 +08:00
|
|
|
case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
|
|
|
|
case ISD::VINSERT_VECTOR_ELT: return visitVINSERT_VECTOR_ELT(N);
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
case ISD::VBUILD_VECTOR: return visitVBUILD_VECTOR(N);
|
2006-03-29 06:11:53 +08:00
|
|
|
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
|
2006-04-01 06:16:43 +08:00
|
|
|
case ISD::VVECTOR_SHUFFLE: return visitVVECTOR_SHUFFLE(N);
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
case ISD::VADD: return visitVBinOp(N, ISD::ADD , ISD::FADD);
|
|
|
|
case ISD::VSUB: return visitVBinOp(N, ISD::SUB , ISD::FSUB);
|
|
|
|
case ISD::VMUL: return visitVBinOp(N, ISD::MUL , ISD::FMUL);
|
|
|
|
case ISD::VSDIV: return visitVBinOp(N, ISD::SDIV, ISD::FDIV);
|
|
|
|
case ISD::VUDIV: return visitVBinOp(N, ISD::UDIV, ISD::UDIV);
|
|
|
|
case ISD::VAND: return visitVBinOp(N, ISD::AND , ISD::AND);
|
|
|
|
case ISD::VOR: return visitVBinOp(N, ISD::OR , ISD::OR);
|
|
|
|
case ISD::VXOR: return visitVBinOp(N, ISD::XOR , ISD::XOR);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
Eliminate more token factors by taking advantage of transitivity:
if TF depends on A and B, and A depends on B, TF just needs to depend on
A. With Jim's alias-analysis stuff enabled, this compiles the testcase in
PR892 into:
__Z4test3Val:
subl $44, %esp
call L__Z3foov$stub
movl %edx, 28(%esp)
movl %eax, 32(%esp)
movl %eax, 24(%esp)
movl %edx, 36(%esp)
movl 52(%esp), %ecx
movl %ecx, 4(%esp)
movl %eax, 8(%esp)
movl %edx, 12(%esp)
movl 48(%esp), %eax
movl %eax, (%esp)
call L__Z3bar3ValS_$stub
addl $44, %esp
ret
instead of:
__Z4test3Val:
subl $44, %esp
call L__Z3foov$stub
movl %eax, 24(%esp)
movl %edx, 28(%esp)
movl 24(%esp), %eax
movl %eax, 32(%esp)
movl 28(%esp), %eax
movl %eax, 36(%esp)
movl 32(%esp), %eax
movl 36(%esp), %ecx
movl 52(%esp), %edx
movl %edx, 4(%esp)
movl %eax, 8(%esp)
movl %ecx, 12(%esp)
movl 48(%esp), %eax
movl %eax, (%esp)
call L__Z3bar3ValS_$stub
addl $44, %esp
ret
llvm-svn: 30821
2006-10-09 06:57:01 +08:00
|
|
|
/// getInputChainForNode - Given a node, return its input chain if it has one,
|
|
|
|
/// otherwise return a null sd operand.
|
|
|
|
static SDOperand getInputChainForNode(SDNode *N) {
|
|
|
|
if (unsigned NumOps = N->getNumOperands()) {
|
|
|
|
if (N->getOperand(0).getValueType() == MVT::Other)
|
|
|
|
return N->getOperand(0);
|
|
|
|
else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
|
|
|
|
return N->getOperand(NumOps-1);
|
|
|
|
for (unsigned i = 1; i < NumOps-1; ++i)
|
|
|
|
if (N->getOperand(i).getValueType() == MVT::Other)
|
|
|
|
return N->getOperand(i);
|
|
|
|
}
|
|
|
|
return SDOperand(0, 0);
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitTokenFactor(SDNode *N) {
|
Eliminate more token factors by taking advantage of transitivity:
if TF depends on A and B, and A depends on B, TF just needs to depend on
A. With Jim's alias-analysis stuff enabled, this compiles the testcase in
PR892 into:
__Z4test3Val:
subl $44, %esp
call L__Z3foov$stub
movl %edx, 28(%esp)
movl %eax, 32(%esp)
movl %eax, 24(%esp)
movl %edx, 36(%esp)
movl 52(%esp), %ecx
movl %ecx, 4(%esp)
movl %eax, 8(%esp)
movl %edx, 12(%esp)
movl 48(%esp), %eax
movl %eax, (%esp)
call L__Z3bar3ValS_$stub
addl $44, %esp
ret
instead of:
__Z4test3Val:
subl $44, %esp
call L__Z3foov$stub
movl %eax, 24(%esp)
movl %edx, 28(%esp)
movl 24(%esp), %eax
movl %eax, 32(%esp)
movl 28(%esp), %eax
movl %eax, 36(%esp)
movl 32(%esp), %eax
movl 36(%esp), %ecx
movl 52(%esp), %edx
movl %edx, 4(%esp)
movl %eax, 8(%esp)
movl %ecx, 12(%esp)
movl 48(%esp), %eax
movl %eax, (%esp)
call L__Z3bar3ValS_$stub
addl $44, %esp
ret
llvm-svn: 30821
2006-10-09 06:57:01 +08:00
|
|
|
// If N has two operands, where one has an input chain equal to the other,
|
|
|
|
// the 'other' chain is redundant.
|
|
|
|
if (N->getNumOperands() == 2) {
|
|
|
|
if (getInputChainForNode(N->getOperand(0).Val) == N->getOperand(1))
|
|
|
|
return N->getOperand(0);
|
|
|
|
if (getInputChainForNode(N->getOperand(1).Val) == N->getOperand(0))
|
|
|
|
return N->getOperand(1);
|
|
|
|
}
|
|
|
|
|
2007-05-16 14:37:59 +08:00
|
|
|
SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
|
|
|
|
SmallVector<SDOperand, 8> Ops; // Ops for replacing token factor.
|
|
|
|
SmallPtrSet<SDNode*, 16> SeenOps;
|
|
|
|
bool Changed = false; // If we should replace this token factor.
|
2006-10-05 00:53:27 +08:00
|
|
|
|
|
|
|
// Start out with this token factor.
|
2006-09-26 00:29:54 +08:00
|
|
|
TFs.push_back(N);
|
|
|
|
|
2006-10-08 07:37:56 +08:00
|
|
|
// Iterate through token factors. The TFs grows when new token factors are
|
2006-10-05 23:07:25 +08:00
|
|
|
// encountered.
|
|
|
|
for (unsigned i = 0; i < TFs.size(); ++i) {
|
|
|
|
SDNode *TF = TFs[i];
|
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// Check each of the operands.
|
|
|
|
for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
|
|
|
|
SDOperand Op = TF->getOperand(i);
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
case ISD::EntryToken:
|
2006-10-05 23:07:25 +08:00
|
|
|
// Entry tokens don't need to be added to the list. They are
|
|
|
|
// rededundant.
|
|
|
|
Changed = true;
|
2006-10-05 00:53:27 +08:00
|
|
|
break;
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
case ISD::TokenFactor:
|
2006-10-05 23:07:25 +08:00
|
|
|
if ((CombinerAA || Op.hasOneUse()) &&
|
|
|
|
std::find(TFs.begin(), TFs.end(), Op.Val) == TFs.end()) {
|
2006-10-05 00:53:27 +08:00
|
|
|
// Queue up for processing.
|
|
|
|
TFs.push_back(Op.Val);
|
|
|
|
// Clean up in case the token factor is removed.
|
|
|
|
AddToWorkList(Op.Val);
|
|
|
|
Changed = true;
|
|
|
|
break;
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
2006-10-05 00:53:27 +08:00
|
|
|
// Fall thru
|
|
|
|
|
|
|
|
default:
|
2007-05-16 14:37:59 +08:00
|
|
|
// Only add if it isn't already in the list.
|
|
|
|
if (SeenOps.insert(Op.Val))
|
2006-10-05 23:07:25 +08:00
|
|
|
Ops.push_back(Op);
|
2007-05-16 14:37:59 +08:00
|
|
|
else
|
|
|
|
Changed = true;
|
2006-10-05 00:53:27 +08:00
|
|
|
break;
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
|
|
|
}
|
2006-10-05 00:53:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand Result;
|
|
|
|
|
|
|
|
// If we've change things around then replace token factor.
|
|
|
|
if (Changed) {
|
|
|
|
if (Ops.size() == 0) {
|
|
|
|
// The entry token is the only possible outcome.
|
|
|
|
Result = DAG.getEntryNode();
|
|
|
|
} else {
|
|
|
|
// New and improved token factor.
|
|
|
|
Result = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], Ops.size());
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
2006-10-14 07:32:28 +08:00
|
|
|
|
|
|
|
// Don't add users to work list.
|
|
|
|
return CombineTo(N, Result, false);
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
return Result;
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
Remove this xform:
(shl (add x, c1), c2) -> (add (shl x, c2), c1<<c2)
Replace it with:
(add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
This fixes test/CodeGen/ARM/smul.ll
llvm-svn: 33361
2007-01-20 01:51:44 +08:00
|
|
|
static
|
|
|
|
SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) {
|
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
SDOperand N00 = N0.getOperand(0);
|
|
|
|
SDOperand N01 = N0.getOperand(1);
|
|
|
|
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
|
|
|
|
if (N01C && N00.getOpcode() == ISD::ADD && N00.Val->hasOneUse() &&
|
|
|
|
isa<ConstantSDNode>(N00.getOperand(1))) {
|
|
|
|
N0 = DAG.getNode(ISD::ADD, VT,
|
|
|
|
DAG.getNode(ISD::SHL, VT, N00.getOperand(0), N01),
|
|
|
|
DAG.getNode(ISD::SHL, VT, N00.getOperand(1), N01));
|
|
|
|
return DAG.getNode(ISD::ADD, VT, N0, N1);
|
|
|
|
}
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitADD(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-08 00:09:19 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (add c1, c2) -> c1+c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::ADD, VT, N0, N1);
|
2005-09-08 07:25:52 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(ISD::ADD, VT, N1, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (add x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2006-01-13 04:22:43 +08:00
|
|
|
// fold ((c1-A)+c2) -> (c1+c2)-A
|
|
|
|
if (N1C && N0.getOpcode() == ISD::SUB)
|
|
|
|
if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
|
|
|
|
return DAG.getNode(ISD::SUB, VT,
|
|
|
|
DAG.getConstant(N1C->getValue()+N0C->getValue(), VT),
|
|
|
|
N0.getOperand(1));
|
2006-02-03 14:46:56 +08:00
|
|
|
// reassociate add
|
|
|
|
SDOperand RADD = ReassociateOps(ISD::ADD, N0, N1);
|
|
|
|
if (RADD.Val != 0)
|
|
|
|
return RADD;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold ((0-A) + B) -> B-A
|
|
|
|
if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
|
|
|
|
cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
|
2005-09-08 00:09:19 +08:00
|
|
|
return DAG.getNode(ISD::SUB, VT, N1, N0.getOperand(1));
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (A + (0-B)) -> A-B
|
|
|
|
if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
|
|
|
|
cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
|
2005-09-08 00:09:19 +08:00
|
|
|
return DAG.getNode(ISD::SUB, VT, N0, N1.getOperand(1));
|
2005-09-29 06:28:18 +08:00
|
|
|
// fold (A+(B-A)) -> B
|
|
|
|
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1.getOperand(0);
|
2006-03-13 14:51:27 +08:00
|
|
|
|
2006-03-01 09:09:54 +08:00
|
|
|
if (!MVT::isVector(VT) && SimplifyDemandedBits(SDOperand(N, 0)))
|
2006-04-21 23:32:26 +08:00
|
|
|
return SDOperand(N, 0);
|
2006-03-13 14:51:27 +08:00
|
|
|
|
|
|
|
// fold (a+b) -> (a|b) iff a and b share no bits.
|
|
|
|
if (MVT::isInteger(VT) && !MVT::isVector(VT)) {
|
|
|
|
uint64_t LHSZero, LHSOne;
|
|
|
|
uint64_t RHSZero, RHSOne;
|
|
|
|
uint64_t Mask = MVT::getIntVTBitMask(VT);
|
|
|
|
TLI.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
|
|
|
|
if (LHSZero) {
|
|
|
|
TLI.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
|
|
|
|
|
|
|
|
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
|
|
|
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
|
|
|
if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
|
|
|
|
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
|
|
|
|
return DAG.getNode(ISD::OR, VT, N0, N1);
|
|
|
|
}
|
|
|
|
}
|
2006-11-06 16:14:30 +08:00
|
|
|
|
Remove this xform:
(shl (add x, c1), c2) -> (add (shl x, c2), c1<<c2)
Replace it with:
(add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
This fixes test/CodeGen/ARM/smul.ll
llvm-svn: 33361
2007-01-20 01:51:44 +08:00
|
|
|
// fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
|
|
|
|
if (N0.getOpcode() == ISD::SHL && N0.Val->hasOneUse()) {
|
|
|
|
SDOperand Result = combineShlAddConstant(N0, N1, DAG);
|
|
|
|
if (Result.Val) return Result;
|
|
|
|
}
|
|
|
|
if (N1.getOpcode() == ISD::SHL && N1.Val->hasOneUse()) {
|
|
|
|
SDOperand Result = combineShlAddConstant(N1, N0, DAG);
|
|
|
|
if (Result.Val) return Result;
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2007-03-05 04:03:15 +08:00
|
|
|
SDOperand DAGCombiner::visitADDC(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
|
|
|
|
// If the flag result is dead, turn this into an ADD.
|
|
|
|
if (N->hasNUsesOfValue(0, 1))
|
|
|
|
return CombineTo(N, DAG.getNode(ISD::ADD, VT, N1, N0),
|
2007-03-05 04:40:38 +08:00
|
|
|
DAG.getNode(ISD::CARRY_FALSE, MVT::Flag));
|
2007-03-05 04:03:15 +08:00
|
|
|
|
|
|
|
// canonicalize constant to RHS.
|
2007-03-05 04:08:45 +08:00
|
|
|
if (N0C && !N1C) {
|
|
|
|
SDOperand Ops[] = { N1, N0 };
|
|
|
|
return DAG.getNode(ISD::ADDC, N->getVTList(), Ops, 2);
|
|
|
|
}
|
2007-03-05 04:03:15 +08:00
|
|
|
|
2007-03-05 04:40:38 +08:00
|
|
|
// fold (addc x, 0) -> x + no carry out
|
|
|
|
if (N1C && N1C->isNullValue())
|
|
|
|
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, MVT::Flag));
|
|
|
|
|
|
|
|
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
|
|
|
|
uint64_t LHSZero, LHSOne;
|
|
|
|
uint64_t RHSZero, RHSOne;
|
|
|
|
uint64_t Mask = MVT::getIntVTBitMask(VT);
|
|
|
|
TLI.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
|
|
|
|
if (LHSZero) {
|
|
|
|
TLI.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
|
|
|
|
|
|
|
|
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
|
|
|
|
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
|
|
|
|
if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
|
|
|
|
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
|
|
|
|
return CombineTo(N, DAG.getNode(ISD::OR, VT, N0, N1),
|
|
|
|
DAG.getNode(ISD::CARRY_FALSE, MVT::Flag));
|
|
|
|
}
|
2007-03-05 04:03:15 +08:00
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitADDE(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2007-03-05 04:40:38 +08:00
|
|
|
SDOperand CarryIn = N->getOperand(2);
|
2007-03-05 04:03:15 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2007-03-05 04:08:45 +08:00
|
|
|
//MVT::ValueType VT = N0.getValueType();
|
2007-03-05 04:03:15 +08:00
|
|
|
|
|
|
|
// canonicalize constant to RHS
|
2007-03-05 04:08:45 +08:00
|
|
|
if (N0C && !N1C) {
|
2007-03-05 04:40:38 +08:00
|
|
|
SDOperand Ops[] = { N1, N0, CarryIn };
|
2007-03-05 04:08:45 +08:00
|
|
|
return DAG.getNode(ISD::ADDE, N->getVTList(), Ops, 3);
|
|
|
|
}
|
2007-03-05 04:03:15 +08:00
|
|
|
|
2007-03-05 04:40:38 +08:00
|
|
|
// fold (adde x, y, false) -> (addc x, y)
|
|
|
|
if (CarryIn.getOpcode() == ISD::CARRY_FALSE) {
|
|
|
|
SDOperand Ops[] = { N1, N0 };
|
|
|
|
return DAG.getNode(ISD::ADDC, N->getVTList(), Ops, 2);
|
|
|
|
}
|
2007-03-05 04:03:15 +08:00
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSUB(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
2005-09-01 08:19:25 +08:00
|
|
|
|
2005-10-17 09:07:11 +08:00
|
|
|
// fold (sub x, x) -> 0
|
|
|
|
if (N0 == N1)
|
|
|
|
return DAG.getConstant(0, N->getValueType(0));
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sub c1, c2) -> c1-c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SUB, VT, N0, N1);
|
2005-10-11 14:07:15 +08:00
|
|
|
// fold (sub x, c) -> (add x, -c)
|
|
|
|
if (N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::ADD, VT, N0, DAG.getConstant(-N1C->getValue(), VT));
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (A+B)-A -> B
|
2005-09-29 06:28:18 +08:00
|
|
|
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0.getOperand(1);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (A+B)-B -> A
|
2005-09-29 06:28:18 +08:00
|
|
|
if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0.getOperand(0);
|
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitMUL(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-09 04:18:10 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (mul c1, c2) -> c1*c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::MUL, VT, N0, N1);
|
2005-09-08 07:25:52 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(ISD::MUL, VT, N1, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (mul x, 0) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (mul x, -1) -> 0-x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isAllOnesValue())
|
2005-10-21 08:02:42 +08:00
|
|
|
return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT), N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (mul x, (1 << c)) -> x << c
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && isPowerOf2_64(N1C->getValue()))
|
2005-10-30 14:41:49 +08:00
|
|
|
return DAG.getNode(ISD::SHL, VT, N0,
|
2005-09-03 05:18:40 +08:00
|
|
|
DAG.getConstant(Log2_64(N1C->getValue()),
|
2005-09-06 12:43:02 +08:00
|
|
|
TLI.getShiftAmountTy()));
|
2005-10-30 14:41:49 +08:00
|
|
|
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
|
|
|
|
if (N1C && isPowerOf2_64(-N1C->getSignExtended())) {
|
|
|
|
// FIXME: If the input is something that is easily negated (e.g. a
|
|
|
|
// single-use add), we should put the negate there.
|
|
|
|
return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT),
|
|
|
|
DAG.getNode(ISD::SHL, VT, N0,
|
|
|
|
DAG.getConstant(Log2_64(-N1C->getSignExtended()),
|
|
|
|
TLI.getShiftAmountTy())));
|
|
|
|
}
|
2006-04-03 05:42:45 +08:00
|
|
|
|
2006-03-01 11:44:24 +08:00
|
|
|
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
|
|
|
|
if (N1C && N0.getOpcode() == ISD::SHL &&
|
|
|
|
isa<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
SDOperand C3 = DAG.getNode(ISD::SHL, VT, N1, N0.getOperand(1));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(C3.Val);
|
2006-03-01 11:44:24 +08:00
|
|
|
return DAG.getNode(ISD::MUL, VT, N0.getOperand(0), C3);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
|
|
|
|
// use.
|
|
|
|
{
|
|
|
|
SDOperand Sh(0,0), Y(0,0);
|
|
|
|
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
|
|
|
|
if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
|
|
|
|
N0.Val->hasOneUse()) {
|
|
|
|
Sh = N0; Y = N1;
|
|
|
|
} else if (N1.getOpcode() == ISD::SHL &&
|
|
|
|
isa<ConstantSDNode>(N1.getOperand(1)) && N1.Val->hasOneUse()) {
|
|
|
|
Sh = N1; Y = N0;
|
|
|
|
}
|
|
|
|
if (Sh.Val) {
|
|
|
|
SDOperand Mul = DAG.getNode(ISD::MUL, VT, Sh.getOperand(0), Y);
|
|
|
|
return DAG.getNode(ISD::SHL, VT, Mul, Sh.getOperand(1));
|
|
|
|
}
|
|
|
|
}
|
fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
fold (shl (add x, c1), c2) -> (add (shl x, c2), c1<<c2)
This allows us to compile CodeGen/PowerPC/addi-reassoc.ll into:
_test1:
slwi r2, r4, 4
add r2, r2, r3
lwz r3, 36(r2)
blr
_test2:
mulli r2, r4, 5
add r2, r2, r3
lbz r2, 11(r2)
extsb r3, r2
blr
instead of:
_test1:
addi r2, r4, 2
slwi r2, r2, 4
add r2, r3, r2
lwz r3, 4(r2)
blr
_test2:
addi r2, r4, 2
mulli r2, r2, 5
add r2, r3, r2
lbz r2, 1(r2)
extsb r3, r2
blr
llvm-svn: 26535
2006-03-05 07:33:26 +08:00
|
|
|
// fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
|
|
|
|
if (N1C && N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse() &&
|
|
|
|
isa<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
return DAG.getNode(ISD::ADD, VT,
|
|
|
|
DAG.getNode(ISD::MUL, VT, N0.getOperand(0), N1),
|
|
|
|
DAG.getNode(ISD::MUL, VT, N0.getOperand(1), N1));
|
|
|
|
}
|
2006-03-01 11:44:24 +08:00
|
|
|
|
2006-02-03 14:46:56 +08:00
|
|
|
// reassociate mul
|
|
|
|
SDOperand RMUL = ReassociateOps(ISD::MUL, N0, N1);
|
|
|
|
if (RMUL.Val != 0)
|
|
|
|
return RMUL;
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSDIV(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (sdiv c1, c2) -> c1/c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C && !N1C->isNullValue())
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SDIV, VT, N0, N1);
|
2005-10-21 08:02:42 +08:00
|
|
|
// fold (sdiv X, 1) -> X
|
|
|
|
if (N1C && N1C->getSignExtended() == 1LL)
|
|
|
|
return N0;
|
|
|
|
// fold (sdiv X, -1) -> 0-X
|
|
|
|
if (N1C && N1C->isAllOnesValue())
|
|
|
|
return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT), N0);
|
2005-10-07 14:10:46 +08:00
|
|
|
// If we know the sign bits of both operands are zero, strength reduce to a
|
|
|
|
// udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
|
|
|
|
uint64_t SignBit = 1ULL << (MVT::getSizeInBits(VT)-1);
|
2006-01-30 12:09:27 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N1, SignBit) &&
|
|
|
|
TLI.MaskedValueIsZero(N0, SignBit))
|
2005-10-07 14:10:46 +08:00
|
|
|
return DAG.getNode(ISD::UDIV, N1.getValueType(), N0, N1);
|
2006-02-17 15:26:20 +08:00
|
|
|
// fold (sdiv X, pow2) -> simple ops after legalize
|
2006-02-18 03:54:08 +08:00
|
|
|
if (N1C && N1C->getValue() && !TLI.isIntDivCheap() &&
|
2005-10-21 08:02:42 +08:00
|
|
|
(isPowerOf2_64(N1C->getSignExtended()) ||
|
|
|
|
isPowerOf2_64(-N1C->getSignExtended()))) {
|
|
|
|
// If dividing by powers of two is cheap, then don't perform the following
|
|
|
|
// fold.
|
|
|
|
if (TLI.isPow2DivCheap())
|
|
|
|
return SDOperand();
|
|
|
|
int64_t pow2 = N1C->getSignExtended();
|
|
|
|
int64_t abs2 = pow2 > 0 ? pow2 : -pow2;
|
2006-02-16 16:02:36 +08:00
|
|
|
unsigned lg2 = Log2_64(abs2);
|
|
|
|
// Splat the sign bit into the register
|
|
|
|
SDOperand SGN = DAG.getNode(ISD::SRA, VT, N0,
|
2005-10-21 08:02:42 +08:00
|
|
|
DAG.getConstant(MVT::getSizeInBits(VT)-1,
|
|
|
|
TLI.getShiftAmountTy()));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(SGN.Val);
|
2006-02-16 16:02:36 +08:00
|
|
|
// Add (N0 < 0) ? abs2 - 1 : 0;
|
|
|
|
SDOperand SRL = DAG.getNode(ISD::SRL, VT, SGN,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(VT)-lg2,
|
2005-10-21 08:02:42 +08:00
|
|
|
TLI.getShiftAmountTy()));
|
2006-02-16 16:02:36 +08:00
|
|
|
SDOperand ADD = DAG.getNode(ISD::ADD, VT, N0, SRL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(SRL.Val);
|
|
|
|
AddToWorkList(ADD.Val); // Divide by pow2
|
2006-02-16 16:02:36 +08:00
|
|
|
SDOperand SRA = DAG.getNode(ISD::SRA, VT, ADD,
|
|
|
|
DAG.getConstant(lg2, TLI.getShiftAmountTy()));
|
2005-10-21 08:02:42 +08:00
|
|
|
// If we're dividing by a positive value, we're done. Otherwise, we must
|
|
|
|
// negate the result.
|
|
|
|
if (pow2 > 0)
|
|
|
|
return SRA;
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(SRA.Val);
|
2005-10-21 08:02:42 +08:00
|
|
|
return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT), SRA);
|
|
|
|
}
|
2005-10-20 10:15:44 +08:00
|
|
|
// if integer divide is expensive and we satisfy the requirements, emit an
|
|
|
|
// alternate sequence.
|
2005-10-21 08:02:42 +08:00
|
|
|
if (N1C && (N1C->getSignExtended() < -1 || N1C->getSignExtended() > 1) &&
|
2005-10-23 02:50:15 +08:00
|
|
|
!TLI.isIntDivCheap()) {
|
|
|
|
SDOperand Op = BuildSDIV(N);
|
|
|
|
if (Op.Val) return Op;
|
2005-10-20 10:15:44 +08:00
|
|
|
}
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitUDIV(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (udiv c1, c2) -> c1/c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C && !N1C->isNullValue())
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::UDIV, VT, N0, N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (udiv x, (1 << c)) -> x >>u c
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && isPowerOf2_64(N1C->getValue()))
|
2006-02-05 15:20:23 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, N0,
|
2005-09-03 05:18:40 +08:00
|
|
|
DAG.getConstant(Log2_64(N1C->getValue()),
|
2005-09-06 12:43:02 +08:00
|
|
|
TLI.getShiftAmountTy()));
|
2006-02-05 15:20:23 +08:00
|
|
|
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
|
|
|
|
if (N1.getOpcode() == ISD::SHL) {
|
|
|
|
if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
|
|
|
|
if (isPowerOf2_64(SHC->getValue())) {
|
|
|
|
MVT::ValueType ADDVT = N1.getOperand(1).getValueType();
|
2006-02-05 15:36:48 +08:00
|
|
|
SDOperand Add = DAG.getNode(ISD::ADD, ADDVT, N1.getOperand(1),
|
|
|
|
DAG.getConstant(Log2_64(SHC->getValue()),
|
|
|
|
ADDVT));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Add.Val);
|
2006-02-05 15:36:48 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, N0, Add);
|
2006-02-05 15:20:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-10-20 10:15:44 +08:00
|
|
|
// fold (udiv x, c) -> alternate
|
2005-10-23 02:50:15 +08:00
|
|
|
if (N1C && N1C->getValue() && !TLI.isIntDivCheap()) {
|
|
|
|
SDOperand Op = BuildUDIV(N);
|
|
|
|
if (Op.Val) return Op;
|
|
|
|
}
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSREM(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (srem c1, c2) -> c1%c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C && !N1C->isNullValue())
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SREM, VT, N0, N1);
|
2005-10-11 05:26:48 +08:00
|
|
|
// If we know the sign bits of both operands are zero, strength reduce to a
|
|
|
|
// urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
|
|
|
|
uint64_t SignBit = 1ULL << (MVT::getSizeInBits(VT)-1);
|
2006-01-30 12:09:27 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N1, SignBit) &&
|
|
|
|
TLI.MaskedValueIsZero(N0, SignBit))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::UREM, VT, N0, N1);
|
Lower X%C into X/C+stuff. This allows the 'division by a constant' logic to
apply to rems as well as divs. This fixes PR945 and speeds up ReedSolomon
from 14.57s to 10.90s (which is now faster than gcc).
It compiles CodeGen/X86/rem.ll into:
_test1:
subl $4, %esp
movl %esi, (%esp)
movl $2155905153, %ecx
movl 8(%esp), %esi
movl %esi, %eax
imull %ecx
addl %esi, %edx
movl %edx, %eax
shrl $31, %eax
sarl $7, %edx
addl %eax, %edx
imull $255, %edx, %eax
subl %eax, %esi
movl %esi, %eax
movl (%esp), %esi
addl $4, %esp
ret
_test2:
movl 4(%esp), %eax
movl %eax, %ecx
sarl $31, %ecx
shrl $24, %ecx
addl %eax, %ecx
andl $4294967040, %ecx
subl %ecx, %eax
ret
_test3:
subl $4, %esp
movl %esi, (%esp)
movl $2155905153, %ecx
movl 8(%esp), %esi
movl %esi, %eax
mull %ecx
shrl $7, %edx
imull $255, %edx, %eax
subl %eax, %esi
movl %esi, %eax
movl (%esp), %esi
addl $4, %esp
ret
instead of div/idiv instructions.
llvm-svn: 30920
2006-10-13 04:58:32 +08:00
|
|
|
|
|
|
|
// Unconditionally lower X%C -> X-X/C*C. This allows the X/C logic to hack on
|
|
|
|
// the remainder operation.
|
|
|
|
if (N1C && !N1C->isNullValue()) {
|
|
|
|
SDOperand Div = DAG.getNode(ISD::SDIV, VT, N0, N1);
|
|
|
|
SDOperand Mul = DAG.getNode(ISD::MUL, VT, Div, N1);
|
|
|
|
SDOperand Sub = DAG.getNode(ISD::SUB, VT, N0, Mul);
|
|
|
|
AddToWorkList(Div.Val);
|
|
|
|
AddToWorkList(Mul.Val);
|
|
|
|
return Sub;
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitUREM(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (urem c1, c2) -> c1%c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C && !N1C->isNullValue())
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::UREM, VT, N0, N1);
|
2005-10-11 05:26:48 +08:00
|
|
|
// fold (urem x, pow2) -> (and x, pow2-1)
|
|
|
|
if (N1C && !N1C->isNullValue() && isPowerOf2_64(N1C->getValue()))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::AND, VT, N0, DAG.getConstant(N1C->getValue()-1,VT));
|
2006-02-05 15:36:48 +08:00
|
|
|
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
|
|
|
|
if (N1.getOpcode() == ISD::SHL) {
|
|
|
|
if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
|
|
|
|
if (isPowerOf2_64(SHC->getValue())) {
|
2006-02-05 16:07:24 +08:00
|
|
|
SDOperand Add = DAG.getNode(ISD::ADD, VT, N1,DAG.getConstant(~0ULL,VT));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Add.Val);
|
2006-02-05 15:36:48 +08:00
|
|
|
return DAG.getNode(ISD::AND, VT, N0, Add);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Lower X%C into X/C+stuff. This allows the 'division by a constant' logic to
apply to rems as well as divs. This fixes PR945 and speeds up ReedSolomon
from 14.57s to 10.90s (which is now faster than gcc).
It compiles CodeGen/X86/rem.ll into:
_test1:
subl $4, %esp
movl %esi, (%esp)
movl $2155905153, %ecx
movl 8(%esp), %esi
movl %esi, %eax
imull %ecx
addl %esi, %edx
movl %edx, %eax
shrl $31, %eax
sarl $7, %edx
addl %eax, %edx
imull $255, %edx, %eax
subl %eax, %esi
movl %esi, %eax
movl (%esp), %esi
addl $4, %esp
ret
_test2:
movl 4(%esp), %eax
movl %eax, %ecx
sarl $31, %ecx
shrl $24, %ecx
addl %eax, %ecx
andl $4294967040, %ecx
subl %ecx, %eax
ret
_test3:
subl $4, %esp
movl %esi, (%esp)
movl $2155905153, %ecx
movl 8(%esp), %esi
movl %esi, %eax
mull %ecx
shrl $7, %edx
imull $255, %edx, %eax
subl %eax, %esi
movl %esi, %eax
movl (%esp), %esi
addl $4, %esp
ret
instead of div/idiv instructions.
llvm-svn: 30920
2006-10-13 04:58:32 +08:00
|
|
|
|
|
|
|
// Unconditionally lower X%C -> X-X/C*C. This allows the X/C logic to hack on
|
|
|
|
// the remainder operation.
|
|
|
|
if (N1C && !N1C->isNullValue()) {
|
|
|
|
SDOperand Div = DAG.getNode(ISD::UDIV, VT, N0, N1);
|
|
|
|
SDOperand Mul = DAG.getNode(ISD::MUL, VT, Div, N1);
|
|
|
|
SDOperand Sub = DAG.getNode(ISD::SUB, VT, N0, Mul);
|
|
|
|
AddToWorkList(Div.Val);
|
|
|
|
AddToWorkList(Mul.Val);
|
|
|
|
return Sub;
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitMULHS(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (mulhs x, 0) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (mulhs x, 1) -> (sra x, size(x)-1)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->getValue() == 1)
|
2005-09-01 08:19:25 +08:00
|
|
|
return DAG.getNode(ISD::SRA, N0.getValueType(), N0,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(N0.getValueType())-1,
|
2005-09-06 12:43:02 +08:00
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitMULHU(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (mulhu x, 0) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (mulhu x, 1) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->getValue() == 1)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, N0.getValueType());
|
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2006-05-05 13:51:50 +08:00
|
|
|
/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with
|
|
|
|
/// two operands of the same opcode, try to simplify it.
|
|
|
|
SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0), N1 = N->getOperand(1);
|
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
|
|
|
|
|
Implement:
// fold (and (sext x), (sext y)) -> (sext (and x, y))
// fold (or (sext x), (sext y)) -> (sext (or x, y))
// fold (xor (sext x), (sext y)) -> (sext (xor x, y))
// fold (and (aext x), (aext y)) -> (aext (and x, y))
// fold (or (aext x), (aext y)) -> (aext (or x, y))
// fold (xor (aext x), (aext y)) -> (aext (xor x, y))
llvm-svn: 28123
2006-05-05 14:31:05 +08:00
|
|
|
// For each of OP in AND/OR/XOR:
|
|
|
|
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
|
|
|
|
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
|
|
|
|
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
|
2006-05-05 14:32:04 +08:00
|
|
|
// fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y))
|
Implement:
// fold (and (sext x), (sext y)) -> (sext (and x, y))
// fold (or (sext x), (sext y)) -> (sext (or x, y))
// fold (xor (sext x), (sext y)) -> (sext (xor x, y))
// fold (and (aext x), (aext y)) -> (aext (and x, y))
// fold (or (aext x), (aext y)) -> (aext (or x, y))
// fold (xor (aext x), (aext y)) -> (aext (xor x, y))
llvm-svn: 28123
2006-05-05 14:31:05 +08:00
|
|
|
if ((N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND||
|
2006-05-05 14:32:04 +08:00
|
|
|
N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
|
2006-05-05 13:51:50 +08:00
|
|
|
N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
|
|
|
|
SDOperand ORNode = DAG.getNode(N->getOpcode(),
|
|
|
|
N0.getOperand(0).getValueType(),
|
|
|
|
N0.getOperand(0), N1.getOperand(0));
|
|
|
|
AddToWorkList(ORNode.Val);
|
Implement:
// fold (and (sext x), (sext y)) -> (sext (and x, y))
// fold (or (sext x), (sext y)) -> (sext (or x, y))
// fold (xor (sext x), (sext y)) -> (sext (xor x, y))
// fold (and (aext x), (aext y)) -> (aext (and x, y))
// fold (or (aext x), (aext y)) -> (aext (or x, y))
// fold (xor (aext x), (aext y)) -> (aext (xor x, y))
llvm-svn: 28123
2006-05-05 14:31:05 +08:00
|
|
|
return DAG.getNode(N0.getOpcode(), VT, ORNode);
|
2006-05-05 13:51:50 +08:00
|
|
|
}
|
|
|
|
|
Pull and through and/or/xor. This compiles some bitfield code to:
mov EAX, DWORD PTR [ESP + 4]
mov ECX, DWORD PTR [EAX]
mov EDX, ECX
add EDX, EDX
or EDX, ECX
and EDX, -2147483648
and ECX, 2147483647
or EDX, ECX
mov DWORD PTR [EAX], EDX
ret
instead of:
sub ESP, 4
mov DWORD PTR [ESP], ESI
mov EAX, DWORD PTR [ESP + 8]
mov ECX, DWORD PTR [EAX]
mov EDX, ECX
add EDX, EDX
mov ESI, ECX
and ESI, -2147483648
and EDX, -2147483648
or EDX, ESI
and ECX, 2147483647
or EDX, ECX
mov DWORD PTR [EAX], EDX
mov ESI, DWORD PTR [ESP]
add ESP, 4
ret
llvm-svn: 28122
2006-05-05 14:10:43 +08:00
|
|
|
// For each of OP in SHL/SRL/SRA/AND...
|
|
|
|
// fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
|
|
|
|
// fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z)
|
|
|
|
// fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
|
2006-05-05 13:51:50 +08:00
|
|
|
if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
|
Pull and through and/or/xor. This compiles some bitfield code to:
mov EAX, DWORD PTR [ESP + 4]
mov ECX, DWORD PTR [EAX]
mov EDX, ECX
add EDX, EDX
or EDX, ECX
and EDX, -2147483648
and ECX, 2147483647
or EDX, ECX
mov DWORD PTR [EAX], EDX
ret
instead of:
sub ESP, 4
mov DWORD PTR [ESP], ESI
mov EAX, DWORD PTR [ESP + 8]
mov ECX, DWORD PTR [EAX]
mov EDX, ECX
add EDX, EDX
mov ESI, ECX
and ESI, -2147483648
and EDX, -2147483648
or EDX, ESI
and ECX, 2147483647
or EDX, ECX
mov DWORD PTR [EAX], EDX
mov ESI, DWORD PTR [ESP]
add ESP, 4
ret
llvm-svn: 28122
2006-05-05 14:10:43 +08:00
|
|
|
N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
|
2006-05-05 13:51:50 +08:00
|
|
|
N0.getOperand(1) == N1.getOperand(1)) {
|
|
|
|
SDOperand ORNode = DAG.getNode(N->getOpcode(),
|
|
|
|
N0.getOperand(0).getValueType(),
|
|
|
|
N0.getOperand(0), N1.getOperand(0));
|
|
|
|
AddToWorkList(ORNode.Val);
|
|
|
|
return DAG.getNode(N0.getOpcode(), VT, ORNode, N0.getOperand(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitAND(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2006-02-18 03:54:08 +08:00
|
|
|
SDOperand LL, LR, RL, RR, CC0, CC1;
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N1.getValueType();
|
|
|
|
|
|
|
|
// fold (and c1, c2) -> c1&c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::AND, VT, N0, N1);
|
2005-09-08 07:25:52 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(ISD::AND, VT, N1, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (and x, -1) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isAllOnesValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
|
|
|
// if (and x, c) is known to be zero, return 0
|
2006-02-17 05:11:51 +08:00
|
|
|
if (N1C && TLI.MaskedValueIsZero(SDOperand(N, 0), MVT::getIntVTBitMask(VT)))
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, VT);
|
2006-02-03 14:46:56 +08:00
|
|
|
// reassociate and
|
|
|
|
SDOperand RAND = ReassociateOps(ISD::AND, N0, N1);
|
|
|
|
if (RAND.Val != 0)
|
|
|
|
return RAND;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (and (or x, 0xFFFF), 0xFF) -> 0xFF
|
2005-11-03 02:42:59 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::OR)
|
2005-09-01 08:19:25 +08:00
|
|
|
if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
|
2005-09-03 05:18:40 +08:00
|
|
|
if ((ORI->getValue() & N1C->getValue()) == N1C->getValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
Turn any_extend nodes into zero_extend nodes when it allows us to remove an
and instruction. This allows us to compile stuff like this:
bool %X(int %X) {
%Y = add int %X, 14
%Z = setne int %Y, 12345
ret bool %Z
}
to this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
ret
instead of this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
andl $1, %eax
ret
This occurs quite a bit with the X86 backend. For example, 25 times in
lambda, 30 times in 177.mesa, 14 times in galgel, 70 times in fma3d,
25 times in vpr, several hundred times in gcc, ~45 times in crafty,
~60 times in parser, ~140 times in eon, 110 times in perlbmk, 55 on gap,
16 times on bzip2, 14 times on twolf, and 1-2 times in many other SPEC2K
programs.
llvm-svn: 25901
2006-02-02 15:17:31 +08:00
|
|
|
// fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
|
|
|
|
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
|
2006-03-02 05:47:21 +08:00
|
|
|
unsigned InMask = MVT::getIntVTBitMask(N0.getOperand(0).getValueType());
|
Turn any_extend nodes into zero_extend nodes when it allows us to remove an
and instruction. This allows us to compile stuff like this:
bool %X(int %X) {
%Y = add int %X, 14
%Z = setne int %Y, 12345
ret bool %Z
}
to this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
ret
instead of this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
andl $1, %eax
ret
This occurs quite a bit with the X86 backend. For example, 25 times in
lambda, 30 times in 177.mesa, 14 times in galgel, 70 times in fma3d,
25 times in vpr, several hundred times in gcc, ~45 times in crafty,
~60 times in parser, ~140 times in eon, 110 times in perlbmk, 55 on gap,
16 times on bzip2, 14 times on twolf, and 1-2 times in many other SPEC2K
programs.
llvm-svn: 25901
2006-02-02 15:17:31 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N0.getOperand(0),
|
2006-03-02 05:47:21 +08:00
|
|
|
~N1C->getValue() & InMask)) {
|
|
|
|
SDOperand Zext = DAG.getNode(ISD::ZERO_EXTEND, N0.getValueType(),
|
|
|
|
N0.getOperand(0));
|
|
|
|
|
|
|
|
// Replace uses of the AND with uses of the Zero extend node.
|
|
|
|
CombineTo(N, Zext);
|
|
|
|
|
Turn any_extend nodes into zero_extend nodes when it allows us to remove an
and instruction. This allows us to compile stuff like this:
bool %X(int %X) {
%Y = add int %X, 14
%Z = setne int %Y, 12345
ret bool %Z
}
to this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
ret
instead of this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
andl $1, %eax
ret
This occurs quite a bit with the X86 backend. For example, 25 times in
lambda, 30 times in 177.mesa, 14 times in galgel, 70 times in fma3d,
25 times in vpr, several hundred times in gcc, ~45 times in crafty,
~60 times in parser, ~140 times in eon, 110 times in perlbmk, 55 on gap,
16 times on bzip2, 14 times on twolf, and 1-2 times in many other SPEC2K
programs.
llvm-svn: 25901
2006-02-02 15:17:31 +08:00
|
|
|
// We actually want to replace all uses of the any_extend with the
|
|
|
|
// zero_extend, to avoid duplicating things. This will later cause this
|
|
|
|
// AND to be folded.
|
2006-03-02 05:47:21 +08:00
|
|
|
CombineTo(N0.Val, Zext);
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
Turn any_extend nodes into zero_extend nodes when it allows us to remove an
and instruction. This allows us to compile stuff like this:
bool %X(int %X) {
%Y = add int %X, 14
%Z = setne int %Y, 12345
ret bool %Z
}
to this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
ret
instead of this:
_X:
cmpl $12331, 4(%esp)
setne %al
movzbl %al, %eax
andl $1, %eax
ret
This occurs quite a bit with the X86 backend. For example, 25 times in
lambda, 30 times in 177.mesa, 14 times in galgel, 70 times in fma3d,
25 times in vpr, several hundred times in gcc, ~45 times in crafty,
~60 times in parser, ~140 times in eon, 110 times in perlbmk, 55 on gap,
16 times on bzip2, 14 times on twolf, and 1-2 times in many other SPEC2K
programs.
llvm-svn: 25901
2006-02-02 15:17:31 +08:00
|
|
|
}
|
|
|
|
}
|
2005-09-10 03:49:52 +08:00
|
|
|
// fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
|
|
|
|
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
|
|
|
|
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
|
|
|
|
ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
|
|
|
|
|
|
|
|
if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
|
|
|
|
MVT::isInteger(LL.getValueType())) {
|
|
|
|
// fold (X == 0) & (Y == 0) -> (X|Y == 0)
|
|
|
|
if (cast<ConstantSDNode>(LR)->getValue() == 0 && Op1 == ISD::SETEQ) {
|
|
|
|
SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(ORNode.Val);
|
2005-09-10 03:49:52 +08:00
|
|
|
return DAG.getSetCC(VT, ORNode, LR, Op1);
|
|
|
|
}
|
|
|
|
// fold (X == -1) & (Y == -1) -> (X&Y == -1)
|
|
|
|
if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
|
|
|
|
SDOperand ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(ANDNode.Val);
|
2005-09-10 03:49:52 +08:00
|
|
|
return DAG.getSetCC(VT, ANDNode, LR, Op1);
|
|
|
|
}
|
|
|
|
// fold (X > -1) & (Y > -1) -> (X|Y > -1)
|
|
|
|
if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
|
|
|
|
SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(ORNode.Val);
|
2005-09-10 03:49:52 +08:00
|
|
|
return DAG.getSetCC(VT, ORNode, LR, Op1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// canonicalize equivalent to ll == rl
|
|
|
|
if (LL == RR && LR == RL) {
|
|
|
|
Op1 = ISD::getSetCCSwappedOperands(Op1);
|
|
|
|
std::swap(RL, RR);
|
|
|
|
}
|
|
|
|
if (LL == RL && LR == RR) {
|
|
|
|
bool isInteger = MVT::isInteger(LL.getValueType());
|
|
|
|
ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
|
|
|
|
if (Result != ISD::SETCC_INVALID)
|
|
|
|
return DAG.getSetCC(N0.getValueType(), LL, LR, Result);
|
|
|
|
}
|
|
|
|
}
|
2006-05-05 13:51:50 +08:00
|
|
|
|
|
|
|
// Simplify: and (op x...), (op y...) -> (op (and x, y))
|
|
|
|
if (N0.getOpcode() == N1.getOpcode()) {
|
|
|
|
SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N);
|
|
|
|
if (Tmp.Val) return Tmp;
|
2005-09-16 08:54:12 +08:00
|
|
|
}
|
2006-05-05 13:51:50 +08:00
|
|
|
|
2006-02-04 06:24:05 +08:00
|
|
|
// fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
|
|
|
|
// fold (and (sra)) -> (and (srl)) when possible.
|
2006-03-26 06:19:00 +08:00
|
|
|
if (!MVT::isVector(VT) &&
|
|
|
|
SimplifyDemandedBits(SDOperand(N, 0)))
|
2006-04-21 23:32:26 +08:00
|
|
|
return SDOperand(N, 0);
|
2005-10-13 11:11:28 +08:00
|
|
|
// fold (zext_inreg (extload x)) -> (zextload x)
|
2007-03-07 16:07:03 +08:00
|
|
|
if (ISD::isEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val)) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2006-10-11 15:10:22 +08:00
|
|
|
MVT::ValueType EVT = LN0->getLoadedVT();
|
2005-10-14 02:34:58 +08:00
|
|
|
// If we zero all the possible extended bits, then we can turn this into
|
|
|
|
// a zextload if we are running before legalize or the operation is legal.
|
2006-01-30 12:09:27 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N1, ~0ULL << MVT::getSizeInBits(EVT)) &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(N);
|
2005-10-14 02:16:34 +08:00
|
|
|
CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
|
2007-03-07 16:07:03 +08:00
|
|
|
if (ISD::isSEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
|
|
|
|
N0.hasOneUse()) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2006-10-11 15:10:22 +08:00
|
|
|
MVT::ValueType EVT = LN0->getLoadedVT();
|
2005-10-14 02:34:58 +08:00
|
|
|
// If we zero all the possible extended bits, then we can turn this into
|
|
|
|
// a zextload if we are running before legalize or the operation is legal.
|
2006-01-30 12:09:27 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N1, ~0ULL << MVT::getSizeInBits(EVT)) &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(N);
|
2005-10-14 02:16:34 +08:00
|
|
|
CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
|
|
|
}
|
Fold "and (LOAD P), 255" -> zextload. This allows us to compile:
unsigned foo3(unsigned *P) { return *P & 255; }
as:
_foo3:
lbz r3, 3(r3)
blr
instead of:
_foo3:
lwz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
and:
unsigned short foo2(float a) { return a; }
as:
_foo2:
fctiwz f0, f1
stfd f0, -8(r1)
lhz r3, -2(r1)
blr
instead of:
_foo2:
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
rlwinm r3, r2, 0, 16, 31
blr
llvm-svn: 26417
2006-02-28 14:35:35 +08:00
|
|
|
|
Compile:
unsigned foo4(unsigned short *P) { return *P & 255; }
unsigned foo5(short *P) { return *P & 255; }
to:
_foo4:
lbz r3,1(r3)
blr
_foo5:
lbz r3,1(r3)
blr
not:
_foo4:
lhz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
_foo5:
lhz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
llvm-svn: 26419
2006-02-28 14:49:37 +08:00
|
|
|
// fold (and (load x), 255) -> (zextload x, i8)
|
|
|
|
// fold (and (extload x, i16), 255) -> (zextload x, i8)
|
2006-10-10 04:57:25 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::LOAD) {
|
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
if (LN0->getExtensionType() != ISD::SEXTLOAD &&
|
2007-03-07 16:07:03 +08:00
|
|
|
LN0->getAddressingMode() == ISD::UNINDEXED &&
|
2006-10-10 04:57:25 +08:00
|
|
|
N0.hasOneUse()) {
|
|
|
|
MVT::ValueType EVT, LoadedVT;
|
|
|
|
if (N1C->getValue() == 255)
|
|
|
|
EVT = MVT::i8;
|
|
|
|
else if (N1C->getValue() == 65535)
|
|
|
|
EVT = MVT::i16;
|
|
|
|
else if (N1C->getValue() == ~0U)
|
|
|
|
EVT = MVT::i32;
|
|
|
|
else
|
|
|
|
EVT = MVT::Other;
|
Compile:
unsigned foo4(unsigned short *P) { return *P & 255; }
unsigned foo5(short *P) { return *P & 255; }
to:
_foo4:
lbz r3,1(r3)
blr
_foo5:
lbz r3,1(r3)
blr
not:
_foo4:
lhz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
_foo5:
lhz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
llvm-svn: 26419
2006-02-28 14:49:37 +08:00
|
|
|
|
2006-10-11 15:10:22 +08:00
|
|
|
LoadedVT = LN0->getLoadedVT();
|
2006-10-10 04:57:25 +08:00
|
|
|
if (EVT != MVT::Other && LoadedVT > EVT &&
|
|
|
|
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
|
|
|
|
MVT::ValueType PtrType = N0.getOperand(1).getValueType();
|
|
|
|
// For big endian targets, we need to add an offset to the pointer to
|
|
|
|
// load the correct bytes. For little endian systems, we merely need to
|
|
|
|
// read fewer bytes from the same pointer.
|
|
|
|
unsigned PtrOff =
|
|
|
|
(MVT::getSizeInBits(LoadedVT) - MVT::getSizeInBits(EVT)) / 8;
|
|
|
|
SDOperand NewPtr = LN0->getBasePtr();
|
|
|
|
if (!TLI.isLittleEndian())
|
|
|
|
NewPtr = DAG.getNode(ISD::ADD, PtrType, NewPtr,
|
|
|
|
DAG.getConstant(PtrOff, PtrType));
|
|
|
|
AddToWorkList(NewPtr.Val);
|
|
|
|
SDOperand Load =
|
|
|
|
DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), NewPtr,
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(), LN0->getAlignment());
|
2006-10-10 04:57:25 +08:00
|
|
|
AddToWorkList(N);
|
|
|
|
CombineTo(N0.Val, Load, Load.getValue(1));
|
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
Fold "and (LOAD P), 255" -> zextload. This allows us to compile:
unsigned foo3(unsigned *P) { return *P & 255; }
as:
_foo3:
lbz r3, 3(r3)
blr
instead of:
_foo3:
lwz r2, 0(r3)
rlwinm r3, r2, 0, 24, 31
blr
and:
unsigned short foo2(float a) { return a; }
as:
_foo2:
fctiwz f0, f1
stfd f0, -8(r1)
lhz r3, -2(r1)
blr
instead of:
_foo2:
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
rlwinm r3, r2, 0, 16, 31
blr
llvm-svn: 26417
2006-02-28 14:35:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitOR(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-10 03:49:52 +08:00
|
|
|
SDOperand LL, LR, RL, RR, CC0, CC1;
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-06 12:43:02 +08:00
|
|
|
MVT::ValueType VT = N1.getValueType();
|
|
|
|
unsigned OpSizeInBits = MVT::getSizeInBits(VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (or c1, c2) -> c1|c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::OR, VT, N0, N1);
|
2005-09-08 07:25:52 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(ISD::OR, VT, N1, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (or x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (or x, -1) -> -1
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isAllOnesValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
|
|
|
// fold (or x, c) -> c iff (x & ~c) == 0
|
2006-01-30 12:09:27 +08:00
|
|
|
if (N1C &&
|
|
|
|
TLI.MaskedValueIsZero(N0,~N1C->getValue() & (~0ULL>>(64-OpSizeInBits))))
|
2005-09-06 12:43:02 +08:00
|
|
|
return N1;
|
2006-02-03 14:46:56 +08:00
|
|
|
// reassociate or
|
|
|
|
SDOperand ROR = ReassociateOps(ISD::OR, N0, N1);
|
|
|
|
if (ROR.Val != 0)
|
|
|
|
return ROR;
|
|
|
|
// Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
|
|
|
|
if (N1C && N0.getOpcode() == ISD::AND && N0.Val->hasOneUse() &&
|
2005-10-27 13:06:38 +08:00
|
|
|
isa<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
|
|
|
|
return DAG.getNode(ISD::AND, VT, DAG.getNode(ISD::OR, VT, N0.getOperand(0),
|
|
|
|
N1),
|
|
|
|
DAG.getConstant(N1C->getValue() | C1->getValue(), VT));
|
2005-09-09 04:18:10 +08:00
|
|
|
}
|
2005-09-10 03:49:52 +08:00
|
|
|
// fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
|
|
|
|
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
|
|
|
|
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
|
|
|
|
ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
|
|
|
|
|
|
|
|
if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
|
|
|
|
MVT::isInteger(LL.getValueType())) {
|
|
|
|
// fold (X != 0) | (Y != 0) -> (X|Y != 0)
|
|
|
|
// fold (X < 0) | (Y < 0) -> (X|Y < 0)
|
|
|
|
if (cast<ConstantSDNode>(LR)->getValue() == 0 &&
|
|
|
|
(Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
|
|
|
|
SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(ORNode.Val);
|
2005-09-10 03:49:52 +08:00
|
|
|
return DAG.getSetCC(VT, ORNode, LR, Op1);
|
|
|
|
}
|
|
|
|
// fold (X != -1) | (Y != -1) -> (X&Y != -1)
|
|
|
|
// fold (X > -1) | (Y > -1) -> (X&Y > -1)
|
|
|
|
if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
|
|
|
|
(Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
|
|
|
|
SDOperand ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(ANDNode.Val);
|
2005-09-10 03:49:52 +08:00
|
|
|
return DAG.getSetCC(VT, ANDNode, LR, Op1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// canonicalize equivalent to ll == rl
|
|
|
|
if (LL == RR && LR == RL) {
|
|
|
|
Op1 = ISD::getSetCCSwappedOperands(Op1);
|
|
|
|
std::swap(RL, RR);
|
|
|
|
}
|
|
|
|
if (LL == RL && LR == RR) {
|
|
|
|
bool isInteger = MVT::isInteger(LL.getValueType());
|
|
|
|
ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
|
|
|
|
if (Result != ISD::SETCC_INVALID)
|
|
|
|
return DAG.getSetCC(N0.getValueType(), LL, LR, Result);
|
|
|
|
}
|
|
|
|
}
|
2006-05-05 13:51:50 +08:00
|
|
|
|
|
|
|
// Simplify: or (op x...), (op y...) -> (op (or x, y))
|
|
|
|
if (N0.getOpcode() == N1.getOpcode()) {
|
|
|
|
SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N);
|
|
|
|
if (Tmp.Val) return Tmp;
|
2006-02-01 15:19:44 +08:00
|
|
|
}
|
2006-09-15 04:50:57 +08:00
|
|
|
|
2006-09-15 05:11:37 +08:00
|
|
|
// (X & C1) | (Y & C2) -> (X|Y) & C3 if possible.
|
|
|
|
if (N0.getOpcode() == ISD::AND &&
|
|
|
|
N1.getOpcode() == ISD::AND &&
|
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant &&
|
|
|
|
N1.getOperand(1).getOpcode() == ISD::Constant &&
|
|
|
|
// Don't increase # computations.
|
|
|
|
(N0.Val->hasOneUse() || N1.Val->hasOneUse())) {
|
|
|
|
// We can only do this xform if we know that bits from X that are set in C2
|
|
|
|
// but not in C1 are already zero. Likewise for Y.
|
|
|
|
uint64_t LHSMask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
|
|
|
uint64_t RHSMask = cast<ConstantSDNode>(N1.getOperand(1))->getValue();
|
|
|
|
|
|
|
|
if (TLI.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
|
|
|
|
TLI.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
|
|
|
|
SDOperand X =DAG.getNode(ISD::OR, VT, N0.getOperand(0), N1.getOperand(0));
|
|
|
|
return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(LHSMask|RHSMask, VT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-09-15 04:50:57 +08:00
|
|
|
// See if this is some rotate idiom.
|
|
|
|
if (SDNode *Rot = MatchRotate(N0, N1))
|
|
|
|
return SDOperand(Rot, 0);
|
2006-05-05 13:51:50 +08:00
|
|
|
|
2006-09-15 04:50:57 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present.
|
|
|
|
static bool MatchRotateHalf(SDOperand Op, SDOperand &Shift, SDOperand &Mask) {
|
|
|
|
if (Op.getOpcode() == ISD::AND) {
|
2006-11-03 04:25:50 +08:00
|
|
|
if (isa<ConstantSDNode>(Op.getOperand(1))) {
|
2006-09-15 04:50:57 +08:00
|
|
|
Mask = Op.getOperand(1);
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
|
|
|
|
Shift = Op;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
|
|
|
|
// idioms for rotate, and if the target supports rotation instructions, generate
|
|
|
|
// a rot[lr].
|
|
|
|
SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) {
|
|
|
|
// Must be a legal type. Expanded an promoted things won't work with rotates.
|
|
|
|
MVT::ValueType VT = LHS.getValueType();
|
|
|
|
if (!TLI.isTypeLegal(VT)) return 0;
|
|
|
|
|
|
|
|
// The target must have at least one rotate flavor.
|
|
|
|
bool HasROTL = TLI.isOperationLegal(ISD::ROTL, VT);
|
|
|
|
bool HasROTR = TLI.isOperationLegal(ISD::ROTR, VT);
|
|
|
|
if (!HasROTL && !HasROTR) return 0;
|
|
|
|
|
|
|
|
// Match "(X shl/srl V1) & V2" where V2 may not be present.
|
|
|
|
SDOperand LHSShift; // The shift.
|
|
|
|
SDOperand LHSMask; // AND value if any.
|
|
|
|
if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
|
|
|
|
return 0; // Not part of a rotate.
|
|
|
|
|
|
|
|
SDOperand RHSShift; // The shift.
|
|
|
|
SDOperand RHSMask; // AND value if any.
|
|
|
|
if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
|
|
|
|
return 0; // Not part of a rotate.
|
|
|
|
|
|
|
|
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
|
|
|
|
return 0; // Not shifting the same value.
|
|
|
|
|
|
|
|
if (LHSShift.getOpcode() == RHSShift.getOpcode())
|
|
|
|
return 0; // Shifts must disagree.
|
|
|
|
|
|
|
|
// Canonicalize shl to left side in a shl/srl pair.
|
|
|
|
if (RHSShift.getOpcode() == ISD::SHL) {
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
std::swap(LHSShift, RHSShift);
|
|
|
|
std::swap(LHSMask , RHSMask );
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpSizeInBits = MVT::getSizeInBits(VT);
|
2007-04-03 05:36:32 +08:00
|
|
|
SDOperand LHSShiftArg = LHSShift.getOperand(0);
|
|
|
|
SDOperand LHSShiftAmt = LHSShift.getOperand(1);
|
|
|
|
SDOperand RHSShiftAmt = RHSShift.getOperand(1);
|
2006-09-15 04:50:57 +08:00
|
|
|
|
|
|
|
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
|
|
|
|
// fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
|
2007-04-03 05:36:32 +08:00
|
|
|
if (LHSShiftAmt.getOpcode() == ISD::Constant &&
|
|
|
|
RHSShiftAmt.getOpcode() == ISD::Constant) {
|
|
|
|
uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getValue();
|
|
|
|
uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getValue();
|
2006-09-15 04:50:57 +08:00
|
|
|
if ((LShVal + RShVal) != OpSizeInBits)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
SDOperand Rot;
|
|
|
|
if (HasROTL)
|
2007-04-03 05:36:32 +08:00
|
|
|
Rot = DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt);
|
2006-09-15 04:50:57 +08:00
|
|
|
else
|
2007-04-03 05:36:32 +08:00
|
|
|
Rot = DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt);
|
2006-09-15 04:50:57 +08:00
|
|
|
|
|
|
|
// If there is an AND of either shifted operand, apply it to the result.
|
|
|
|
if (LHSMask.Val || RHSMask.Val) {
|
|
|
|
uint64_t Mask = MVT::getIntVTBitMask(VT);
|
|
|
|
|
|
|
|
if (LHSMask.Val) {
|
|
|
|
uint64_t RHSBits = (1ULL << LShVal)-1;
|
|
|
|
Mask &= cast<ConstantSDNode>(LHSMask)->getValue() | RHSBits;
|
2006-08-31 15:41:12 +08:00
|
|
|
}
|
2006-09-15 04:50:57 +08:00
|
|
|
if (RHSMask.Val) {
|
|
|
|
uint64_t LHSBits = ~((1ULL << (OpSizeInBits-RShVal))-1);
|
|
|
|
Mask &= cast<ConstantSDNode>(RHSMask)->getValue() | LHSBits;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rot = DAG.getNode(ISD::AND, VT, Rot, DAG.getConstant(Mask, VT));
|
|
|
|
}
|
|
|
|
|
|
|
|
return Rot.Val;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is a mask here, and we have a variable shift, we can't be sure
|
|
|
|
// that we're masking out the right stuff.
|
|
|
|
if (LHSMask.Val || RHSMask.Val)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y)
|
|
|
|
// fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y))
|
2007-04-03 05:36:32 +08:00
|
|
|
if (RHSShiftAmt.getOpcode() == ISD::SUB &&
|
|
|
|
LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
|
2006-09-15 04:50:57 +08:00
|
|
|
if (ConstantSDNode *SUBC =
|
2007-04-03 05:36:32 +08:00
|
|
|
dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
|
2006-09-15 04:50:57 +08:00
|
|
|
if (SUBC->getValue() == OpSizeInBits)
|
|
|
|
if (HasROTL)
|
2007-04-03 05:36:32 +08:00
|
|
|
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
|
2006-09-15 04:50:57 +08:00
|
|
|
else
|
2007-04-03 05:36:32 +08:00
|
|
|
return DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt).Val;
|
2006-08-31 15:41:12 +08:00
|
|
|
}
|
2006-01-12 05:21:00 +08:00
|
|
|
}
|
2006-09-15 04:50:57 +08:00
|
|
|
|
|
|
|
// fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y)
|
|
|
|
// fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y))
|
2007-04-03 05:36:32 +08:00
|
|
|
if (LHSShiftAmt.getOpcode() == ISD::SUB &&
|
|
|
|
RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
|
2006-09-15 04:50:57 +08:00
|
|
|
if (ConstantSDNode *SUBC =
|
2007-04-03 05:36:32 +08:00
|
|
|
dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
|
2006-09-15 04:50:57 +08:00
|
|
|
if (SUBC->getValue() == OpSizeInBits)
|
|
|
|
if (HasROTL)
|
2007-04-03 05:36:32 +08:00
|
|
|
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
|
2006-09-15 04:50:57 +08:00
|
|
|
else
|
2007-04-03 05:36:32 +08:00
|
|
|
return DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt).Val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for sign/zext/any-extended cases:
|
|
|
|
if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
|
|
|
|
|| LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
|
|
|
|
|| LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND) &&
|
|
|
|
(RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND
|
|
|
|
|| RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND
|
|
|
|
|| RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND)) {
|
|
|
|
SDOperand LExtOp0 = LHSShiftAmt.getOperand(0);
|
|
|
|
SDOperand RExtOp0 = RHSShiftAmt.getOperand(0);
|
|
|
|
if (RExtOp0.getOpcode() == ISD::SUB &&
|
|
|
|
RExtOp0.getOperand(1) == LExtOp0) {
|
|
|
|
// fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
|
|
|
|
// (rotr x, y)
|
|
|
|
// fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
|
|
|
|
// (rotl x, (sub 32, y))
|
|
|
|
if (ConstantSDNode *SUBC = cast<ConstantSDNode>(RExtOp0.getOperand(0))) {
|
|
|
|
if (SUBC->getValue() == OpSizeInBits) {
|
|
|
|
if (HasROTL)
|
|
|
|
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
|
|
|
|
else
|
|
|
|
return DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt).Val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (LExtOp0.getOpcode() == ISD::SUB &&
|
|
|
|
RExtOp0 == LExtOp0.getOperand(1)) {
|
|
|
|
// fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext r))) ->
|
|
|
|
// (rotl x, y)
|
|
|
|
// fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext r))) ->
|
|
|
|
// (rotr x, (sub 32, y))
|
|
|
|
if (ConstantSDNode *SUBC = cast<ConstantSDNode>(LExtOp0.getOperand(0))) {
|
|
|
|
if (SUBC->getValue() == OpSizeInBits) {
|
|
|
|
if (HasROTL)
|
|
|
|
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, RHSShiftAmt).Val;
|
|
|
|
else
|
|
|
|
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
|
|
|
|
}
|
|
|
|
}
|
2006-09-15 04:50:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2006-09-15 04:50:57 +08:00
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitXOR(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
SDOperand LHS, RHS, CC;
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
|
|
|
|
// fold (xor c1, c2) -> c1^c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::XOR, VT, N0, N1);
|
2005-09-08 07:25:52 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(ISD::XOR, VT, N1, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (xor x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2006-02-03 14:46:56 +08:00
|
|
|
// reassociate xor
|
|
|
|
SDOperand RXOR = ReassociateOps(ISD::XOR, N0, N1);
|
|
|
|
if (RXOR.Val != 0)
|
|
|
|
return RXOR;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold !(x cc y) -> (x !cc y)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->getValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
|
|
|
|
bool isInt = MVT::isInteger(LHS.getValueType());
|
|
|
|
ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
|
|
|
|
isInt);
|
|
|
|
if (N0.getOpcode() == ISD::SETCC)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getSetCC(VT, LHS, RHS, NotCC);
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0.getOpcode() == ISD::SELECT_CC)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getSelectCC(LHS, RHS, N0.getOperand(2),N0.getOperand(3),NotCC);
|
2005-09-03 05:18:40 +08:00
|
|
|
assert(0 && "Unhandled SetCC Equivalent!");
|
|
|
|
abort();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
2005-09-08 07:25:52 +08:00
|
|
|
// fold !(x or y) -> (!x and !y) iff x or y are setcc
|
2006-11-11 05:37:15 +08:00
|
|
|
if (N1C && N1C->getValue() == 1 && VT == MVT::i1 &&
|
2005-09-08 07:25:52 +08:00
|
|
|
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand LHS = N0.getOperand(0), RHS = N0.getOperand(1);
|
2005-09-08 07:25:52 +08:00
|
|
|
if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
|
|
|
|
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
|
2005-09-01 08:19:25 +08:00
|
|
|
LHS = DAG.getNode(ISD::XOR, VT, LHS, N1); // RHS = ~LHS
|
|
|
|
RHS = DAG.getNode(ISD::XOR, VT, RHS, N1); // RHS = ~RHS
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(LHS.Val); AddToWorkList(RHS.Val);
|
2005-09-08 07:25:52 +08:00
|
|
|
return DAG.getNode(NewOpcode, VT, LHS, RHS);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
}
|
2005-09-08 07:25:52 +08:00
|
|
|
// fold !(x or y) -> (!x and !y) iff x or y are constants
|
|
|
|
if (N1C && N1C->isAllOnesValue() &&
|
|
|
|
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand LHS = N0.getOperand(0), RHS = N0.getOperand(1);
|
2005-09-08 07:25:52 +08:00
|
|
|
if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
|
|
|
|
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
|
2005-09-01 08:19:25 +08:00
|
|
|
LHS = DAG.getNode(ISD::XOR, VT, LHS, N1); // RHS = ~LHS
|
|
|
|
RHS = DAG.getNode(ISD::XOR, VT, RHS, N1); // RHS = ~RHS
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(LHS.Val); AddToWorkList(RHS.Val);
|
2005-09-08 07:25:52 +08:00
|
|
|
return DAG.getNode(NewOpcode, VT, LHS, RHS);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
}
|
2005-09-09 04:18:10 +08:00
|
|
|
// fold (xor (xor x, c1), c2) -> (xor x, c1^c2)
|
|
|
|
if (N1C && N0.getOpcode() == ISD::XOR) {
|
|
|
|
ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
|
|
|
|
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
|
|
|
|
if (N00C)
|
|
|
|
return DAG.getNode(ISD::XOR, VT, N0.getOperand(1),
|
|
|
|
DAG.getConstant(N1C->getValue()^N00C->getValue(), VT));
|
|
|
|
if (N01C)
|
|
|
|
return DAG.getNode(ISD::XOR, VT, N0.getOperand(0),
|
|
|
|
DAG.getConstant(N1C->getValue()^N01C->getValue(), VT));
|
|
|
|
}
|
|
|
|
// fold (xor x, x) -> 0
|
2006-03-29 03:11:05 +08:00
|
|
|
if (N0 == N1) {
|
|
|
|
if (!MVT::isVector(VT)) {
|
|
|
|
return DAG.getConstant(0, VT);
|
|
|
|
} else if (!AfterLegalize || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) {
|
|
|
|
// Produce a vector of zeros.
|
2007-06-15 06:58:02 +08:00
|
|
|
SDOperand El = DAG.getConstant(0, MVT::getVectorElementType(VT));
|
2006-03-29 03:11:05 +08:00
|
|
|
std::vector<SDOperand> Ops(MVT::getVectorNumElements(VT), El);
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
|
2006-03-29 03:11:05 +08:00
|
|
|
}
|
|
|
|
}
|
2006-05-05 13:51:50 +08:00
|
|
|
|
|
|
|
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
|
|
|
|
if (N0.getOpcode() == N1.getOpcode()) {
|
|
|
|
SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N);
|
|
|
|
if (Tmp.Val) return Tmp;
|
2006-02-01 15:19:44 +08:00
|
|
|
}
|
2006-05-05 13:51:50 +08:00
|
|
|
|
2006-04-08 12:15:24 +08:00
|
|
|
// Simplify the expression using non-local knowledge.
|
|
|
|
if (!MVT::isVector(VT) &&
|
|
|
|
SimplifyDemandedBits(SDOperand(N, 0)))
|
2006-04-21 23:32:26 +08:00
|
|
|
return SDOperand(N, 0);
|
2006-04-08 12:15:24 +08:00
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSHL(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
unsigned OpSizeInBits = MVT::getSizeInBits(VT);
|
|
|
|
|
|
|
|
// fold (shl c1, c2) -> c1<<c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SHL, VT, N0, N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (shl 0, x) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N0C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (shl x, c >= size(x)) -> undef
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->getValue() >= OpSizeInBits)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (shl x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// if (shl x, c) is known to be zero, return 0
|
2006-02-18 03:54:08 +08:00
|
|
|
if (TLI.MaskedValueIsZero(SDOperand(N, 0), MVT::getIntVTBitMask(VT)))
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, VT);
|
2007-04-18 11:06:49 +08:00
|
|
|
if (N1C && SimplifyDemandedBits(SDOperand(N, 0)))
|
2006-04-21 23:32:26 +08:00
|
|
|
return SDOperand(N, 0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (shl (shl x, c1), c2) -> 0 or (shl x, c1+c2)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::SHL &&
|
2005-09-01 08:19:25 +08:00
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant) {
|
|
|
|
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
2005-09-03 05:18:40 +08:00
|
|
|
uint64_t c2 = N1C->getValue();
|
2005-09-01 08:19:25 +08:00
|
|
|
if (c1 + c2 > OpSizeInBits)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
return DAG.getNode(ISD::SHL, VT, N0.getOperand(0),
|
2005-09-06 12:43:02 +08:00
|
|
|
DAG.getConstant(c1 + c2, N1.getValueType()));
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
// fold (shl (srl x, c1), c2) -> (shl (and x, -1 << c1), c2-c1) or
|
|
|
|
// (srl (and x, -1 << c1), c1-c2)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::SRL &&
|
2005-09-01 08:19:25 +08:00
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant) {
|
|
|
|
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
2005-09-03 05:18:40 +08:00
|
|
|
uint64_t c2 = N1C->getValue();
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand Mask = DAG.getNode(ISD::AND, VT, N0.getOperand(0),
|
|
|
|
DAG.getConstant(~0ULL << c1, VT));
|
|
|
|
if (c2 > c1)
|
|
|
|
return DAG.getNode(ISD::SHL, VT, Mask,
|
2005-09-06 12:43:02 +08:00
|
|
|
DAG.getConstant(c2-c1, N1.getValueType()));
|
2005-09-01 08:19:25 +08:00
|
|
|
else
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, Mask,
|
|
|
|
DAG.getConstant(c1-c2, N1.getValueType()));
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
// fold (shl (sra x, c1), c1) -> (and x, -1 << c1)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1))
|
2005-09-02 07:24:04 +08:00
|
|
|
return DAG.getNode(ISD::AND, VT, N0.getOperand(0),
|
2005-09-06 12:43:02 +08:00
|
|
|
DAG.getConstant(~0ULL << N1C->getValue(), VT));
|
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSRA(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
|
|
|
|
// fold (sra c1, c2) -> c1>>c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SRA, VT, N0, N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sra 0, x) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N0C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sra -1, x) -> -1
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N0C->isAllOnesValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sra x, c >= size(x)) -> undef
|
2006-02-18 03:54:08 +08:00
|
|
|
if (N1C && N1C->getValue() >= MVT::getSizeInBits(VT))
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sra x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2006-02-18 03:54:08 +08:00
|
|
|
// fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
|
|
|
|
// sext_inreg.
|
|
|
|
if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
|
|
|
|
unsigned LowBits = MVT::getSizeInBits(VT) - (unsigned)N1C->getValue();
|
|
|
|
MVT::ValueType EVT;
|
|
|
|
switch (LowBits) {
|
|
|
|
default: EVT = MVT::Other; break;
|
|
|
|
case 1: EVT = MVT::i1; break;
|
|
|
|
case 8: EVT = MVT::i8; break;
|
|
|
|
case 16: EVT = MVT::i16; break;
|
|
|
|
case 32: EVT = MVT::i32; break;
|
|
|
|
}
|
|
|
|
if (EVT > MVT::Other && TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT))
|
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0),
|
|
|
|
DAG.getValueType(EVT));
|
|
|
|
}
|
2006-02-28 14:23:04 +08:00
|
|
|
|
|
|
|
// fold (sra (sra x, c1), c2) -> (sra x, c1+c2)
|
|
|
|
if (N1C && N0.getOpcode() == ISD::SRA) {
|
|
|
|
if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
unsigned Sum = N1C->getValue() + C1->getValue();
|
|
|
|
if (Sum >= MVT::getSizeInBits(VT)) Sum = MVT::getSizeInBits(VT)-1;
|
|
|
|
return DAG.getNode(ISD::SRA, VT, N0.getOperand(0),
|
|
|
|
DAG.getConstant(Sum, N1C->getValueType(0)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-09 04:51:54 +08:00
|
|
|
// Simplify, based on bits shifted out of the LHS.
|
|
|
|
if (N1C && SimplifyDemandedBits(SDOperand(N, 0)))
|
|
|
|
return SDOperand(N, 0);
|
|
|
|
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// If the sign bit is known to be zero, switch this to a SRL.
|
2006-02-18 03:54:08 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N0, MVT::getIntVTSignBit(VT)))
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, N0, N1);
|
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSRL(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N0.getValueType();
|
|
|
|
unsigned OpSizeInBits = MVT::getSizeInBits(VT);
|
|
|
|
|
|
|
|
// fold (srl c1, c2) -> c1 >>u c2
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N1C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, N0, N1);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (srl 0, x) -> 0
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C && N0C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (srl x, c >= size(x)) -> undef
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->getValue() >= OpSizeInBits)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (srl x, 0) -> x
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N1C->isNullValue())
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// if (srl x, c) is known to be zero, return 0
|
2006-01-30 12:09:27 +08:00
|
|
|
if (N1C && TLI.MaskedValueIsZero(SDOperand(N, 0), ~0ULL >> (64-OpSizeInBits)))
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, VT);
|
2007-04-18 11:05:22 +08:00
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (srl (srl x, c1), c2) -> 0 or (srl x, c1+c2)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N1C && N0.getOpcode() == ISD::SRL &&
|
2005-09-01 08:19:25 +08:00
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant) {
|
|
|
|
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
2005-09-03 05:18:40 +08:00
|
|
|
uint64_t c2 = N1C->getValue();
|
2005-09-01 08:19:25 +08:00
|
|
|
if (c1 + c2 > OpSizeInBits)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getConstant(0, VT);
|
2005-09-01 08:19:25 +08:00
|
|
|
return DAG.getNode(ISD::SRL, VT, N0.getOperand(0),
|
2005-09-06 12:43:02 +08:00
|
|
|
DAG.getConstant(c1 + c2, N1.getValueType()));
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
Add a little dag combine to compile this:
int %AreSecondAndThirdElementsBothNegative(<4 x float>* %in) {
entry:
%tmp1 = load <4 x float>* %in ; <<4 x float>> [#uses=1]
%tmp = tail call int %llvm.ppc.altivec.vcmpgefp.p( int 1, <4 x float> < float 0x7FF8000000000000, float 0.000000e+00, float 0.000000e+00, float 0x7FF8000000000000 >, <4 x float> %tmp1 ) ; <int> [#uses=1]
%tmp = seteq int %tmp, 0 ; <bool> [#uses=1]
%tmp3 = cast bool %tmp to int ; <int> [#uses=1]
ret int %tmp3
}
into this:
_AreSecondAndThirdElementsBothNegative:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI1_0)
lis r5, ha16(LCPI1_0)
lvx v0, 0, r3
lvx v1, r5, r4
vcmpgefp. v0, v1, v0
mfcr r3, 2
rlwinm r3, r3, 27, 31, 31
mtspr 256, r2
blr
instead of this:
_AreSecondAndThirdElementsBothNegative:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI1_0)
lis r5, ha16(LCPI1_0)
lvx v0, 0, r3
lvx v1, r5, r4
vcmpgefp. v0, v1, v0
mfcr r3, 2
rlwinm r3, r3, 27, 31, 31
xori r3, r3, 1
cntlzw r3, r3
srwi r3, r3, 5
mtspr 256, r2
blr
llvm-svn: 27356
2006-04-02 14:11:11 +08:00
|
|
|
|
2006-05-06 06:53:17 +08:00
|
|
|
// fold (srl (anyextend x), c) -> (anyextend (srl x, c))
|
|
|
|
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
|
|
|
|
// Shifting in all undef bits?
|
|
|
|
MVT::ValueType SmallVT = N0.getOperand(0).getValueType();
|
|
|
|
if (N1C->getValue() >= MVT::getSizeInBits(SmallVT))
|
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
|
|
|
|
|
|
|
SDOperand SmallShift = DAG.getNode(ISD::SRL, SmallVT, N0.getOperand(0), N1);
|
|
|
|
AddToWorkList(SmallShift.Val);
|
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, VT, SmallShift);
|
|
|
|
}
|
|
|
|
|
2006-10-13 04:23:19 +08:00
|
|
|
// fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
|
|
|
|
// bit, which is unmodified by sra.
|
|
|
|
if (N1C && N1C->getValue()+1 == MVT::getSizeInBits(VT)) {
|
|
|
|
if (N0.getOpcode() == ISD::SRA)
|
|
|
|
return DAG.getNode(ISD::SRL, VT, N0.getOperand(0), N1);
|
|
|
|
}
|
|
|
|
|
Add a little dag combine to compile this:
int %AreSecondAndThirdElementsBothNegative(<4 x float>* %in) {
entry:
%tmp1 = load <4 x float>* %in ; <<4 x float>> [#uses=1]
%tmp = tail call int %llvm.ppc.altivec.vcmpgefp.p( int 1, <4 x float> < float 0x7FF8000000000000, float 0.000000e+00, float 0.000000e+00, float 0x7FF8000000000000 >, <4 x float> %tmp1 ) ; <int> [#uses=1]
%tmp = seteq int %tmp, 0 ; <bool> [#uses=1]
%tmp3 = cast bool %tmp to int ; <int> [#uses=1]
ret int %tmp3
}
into this:
_AreSecondAndThirdElementsBothNegative:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI1_0)
lis r5, ha16(LCPI1_0)
lvx v0, 0, r3
lvx v1, r5, r4
vcmpgefp. v0, v1, v0
mfcr r3, 2
rlwinm r3, r3, 27, 31, 31
mtspr 256, r2
blr
instead of this:
_AreSecondAndThirdElementsBothNegative:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI1_0)
lis r5, ha16(LCPI1_0)
lvx v0, 0, r3
lvx v1, r5, r4
vcmpgefp. v0, v1, v0
mfcr r3, 2
rlwinm r3, r3, 27, 31, 31
xori r3, r3, 1
cntlzw r3, r3
srwi r3, r3, 5
mtspr 256, r2
blr
llvm-svn: 27356
2006-04-02 14:11:11 +08:00
|
|
|
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
|
|
|
|
if (N1C && N0.getOpcode() == ISD::CTLZ &&
|
|
|
|
N1C->getValue() == Log2_32(MVT::getSizeInBits(VT))) {
|
|
|
|
uint64_t KnownZero, KnownOne, Mask = MVT::getIntVTBitMask(VT);
|
|
|
|
TLI.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
|
|
|
|
|
|
|
|
// If any of the input bits are KnownOne, then the input couldn't be all
|
|
|
|
// zeros, thus the result of the srl will always be zero.
|
|
|
|
if (KnownOne) return DAG.getConstant(0, VT);
|
|
|
|
|
|
|
|
// If all of the bits input the to ctlz node are known to be zero, then
|
|
|
|
// the result of the ctlz is "32" and the result of the shift is one.
|
|
|
|
uint64_t UnknownBits = ~KnownZero & Mask;
|
|
|
|
if (UnknownBits == 0) return DAG.getConstant(1, VT);
|
|
|
|
|
|
|
|
// Otherwise, check to see if there is exactly one bit input to the ctlz.
|
|
|
|
if ((UnknownBits & (UnknownBits-1)) == 0) {
|
|
|
|
// Okay, we know that only that the single bit specified by UnknownBits
|
|
|
|
// could be set on input to the CTLZ node. If this bit is set, the SRL
|
|
|
|
// will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
|
|
|
|
// to an SRL,XOR pair, which is likely to simplify more.
|
|
|
|
unsigned ShAmt = CountTrailingZeros_64(UnknownBits);
|
|
|
|
SDOperand Op = N0.getOperand(0);
|
|
|
|
if (ShAmt) {
|
|
|
|
Op = DAG.getNode(ISD::SRL, VT, Op,
|
|
|
|
DAG.getConstant(ShAmt, TLI.getShiftAmountTy()));
|
|
|
|
AddToWorkList(Op.Val);
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::XOR, VT, Op, DAG.getConstant(1, VT));
|
|
|
|
}
|
|
|
|
}
|
2007-04-18 11:06:49 +08:00
|
|
|
|
|
|
|
// fold operands of srl based on knowledge that the low bits are not
|
|
|
|
// demanded.
|
|
|
|
if (N1C && SimplifyDemandedBits(SDOperand(N, 0)))
|
|
|
|
return SDOperand(N, 0);
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitCTLZ(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (ctlz c1) -> c2
|
2006-05-07 07:06:26 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::CTLZ, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitCTTZ(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (cttz c1) -> c2
|
2006-05-07 07:06:26 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::CTTZ, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitCTPOP(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (ctpop c1) -> c2
|
2006-05-07 07:06:26 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::CTPOP, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand DAGCombiner::visitSELECT(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
SDOperand N2 = N->getOperand(2);
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-20 06:34:01 +08:00
|
|
|
|
2005-09-16 08:54:12 +08:00
|
|
|
// fold select C, X, X -> X
|
|
|
|
if (N1 == N2)
|
|
|
|
return N1;
|
|
|
|
// fold select true, X, Y -> X
|
|
|
|
if (N0C && !N0C->isNullValue())
|
|
|
|
return N1;
|
|
|
|
// fold select false, X, Y -> Y
|
|
|
|
if (N0C && N0C->isNullValue())
|
|
|
|
return N2;
|
|
|
|
// fold select C, 1, X -> C | X
|
2005-09-20 06:34:01 +08:00
|
|
|
if (MVT::i1 == VT && N1C && N1C->getValue() == 1)
|
2005-09-16 08:54:12 +08:00
|
|
|
return DAG.getNode(ISD::OR, VT, N0, N2);
|
|
|
|
// fold select C, 0, X -> ~C & X
|
|
|
|
// FIXME: this should check for C type == X type, not i1?
|
|
|
|
if (MVT::i1 == VT && N1C && N1C->isNullValue()) {
|
|
|
|
SDOperand XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(XORNode.Val);
|
2005-09-16 08:54:12 +08:00
|
|
|
return DAG.getNode(ISD::AND, VT, XORNode, N2);
|
|
|
|
}
|
|
|
|
// fold select C, X, 1 -> ~C | X
|
2005-09-20 06:34:01 +08:00
|
|
|
if (MVT::i1 == VT && N2C && N2C->getValue() == 1) {
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(XORNode.Val);
|
2005-09-16 08:54:12 +08:00
|
|
|
return DAG.getNode(ISD::OR, VT, XORNode, N1);
|
|
|
|
}
|
|
|
|
// fold select C, X, 0 -> C & X
|
|
|
|
// FIXME: this should check for C type == X type, not i1?
|
|
|
|
if (MVT::i1 == VT && N2C && N2C->isNullValue())
|
|
|
|
return DAG.getNode(ISD::AND, VT, N0, N1);
|
|
|
|
// fold X ? X : Y --> X ? 1 : Y --> X | Y
|
|
|
|
if (MVT::i1 == VT && N0 == N1)
|
|
|
|
return DAG.getNode(ISD::OR, VT, N0, N2);
|
|
|
|
// fold X ? Y : X --> X ? Y : 0 --> X & Y
|
|
|
|
if (MVT::i1 == VT && N0 == N2)
|
|
|
|
return DAG.getNode(ISD::AND, VT, N0, N1);
|
2006-05-27 08:43:02 +08:00
|
|
|
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
// If we can fold this based on the true/false value, do so.
|
|
|
|
if (SimplifySelectOps(N, N1, N2))
|
2006-05-27 08:43:02 +08:00
|
|
|
return SDOperand(N, 0); // Don't revisit N.
|
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
// fold selects based on a setcc into other things, such as min/max/abs
|
|
|
|
if (N0.getOpcode() == ISD::SETCC)
|
2006-02-01 15:19:44 +08:00
|
|
|
// FIXME:
|
|
|
|
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
|
|
|
|
// having to say they don't support SELECT_CC on every type the DAG knows
|
|
|
|
// about, since there is no way to mark an opcode illegal at all value types
|
|
|
|
if (TLI.isOperationLegal(ISD::SELECT_CC, MVT::Other))
|
|
|
|
return DAG.getNode(ISD::SELECT_CC, VT, N0.getOperand(0), N0.getOperand(1),
|
|
|
|
N1, N2, N0.getOperand(2));
|
|
|
|
else
|
|
|
|
return SimplifySelect(N0, N1, N2);
|
2005-09-16 08:54:12 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitSELECT_CC(SDNode *N) {
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
SDOperand N2 = N->getOperand(2);
|
|
|
|
SDOperand N3 = N->getOperand(3);
|
|
|
|
SDOperand N4 = N->getOperand(4);
|
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
|
|
|
|
|
|
|
|
// fold select_cc lhs, rhs, x, x, cc -> x
|
|
|
|
if (N2 == N3)
|
|
|
|
return N2;
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
|
Two things:
1. teach SimplifySetCC that '(srl (ctlz x), 5) == 0' is really x != 0.
2. Teach visitSELECT_CC to use SimplifySetCC instead of calling it and
ignoring the result. This allows us to compile:
bool %test(ulong %x) {
%tmp = setlt ulong %x, 4294967296
ret bool %tmp
}
to:
_test:
cntlzw r2, r3
cmplwi cr0, r3, 1
srwi r2, r2, 5
li r3, 0
beq cr0, LBB1_2 ;
LBB1_1: ;
mr r3, r2
LBB1_2: ;
blr
instead of:
_test:
addi r2, r3, -1
cntlzw r2, r2
cntlzw r3, r3
srwi r2, r2, 5
cmplwi cr0, r2, 0
srwi r2, r3, 5
li r3, 0
bne cr0, LBB1_2 ;
LBB1_1: ;
mr r3, r2
LBB1_2: ;
blr
This isn't wonderful, but it's an improvement.
llvm-svn: 30513
2006-09-20 14:19:26 +08:00
|
|
|
// Determine if the condition we're dealing with is constant
|
|
|
|
SDOperand SCC = SimplifySetCC(TLI.getSetCCResultTy(), N0, N1, CC, false);
|
2006-10-14 11:52:46 +08:00
|
|
|
if (SCC.Val) AddToWorkList(SCC.Val);
|
Two things:
1. teach SimplifySetCC that '(srl (ctlz x), 5) == 0' is really x != 0.
2. Teach visitSELECT_CC to use SimplifySetCC instead of calling it and
ignoring the result. This allows us to compile:
bool %test(ulong %x) {
%tmp = setlt ulong %x, 4294967296
ret bool %tmp
}
to:
_test:
cntlzw r2, r3
cmplwi cr0, r3, 1
srwi r2, r2, 5
li r3, 0
beq cr0, LBB1_2 ;
LBB1_1: ;
mr r3, r2
LBB1_2: ;
blr
instead of:
_test:
addi r2, r3, -1
cntlzw r2, r2
cntlzw r3, r3
srwi r2, r2, 5
cmplwi cr0, r2, 0
srwi r2, r3, 5
li r3, 0
bne cr0, LBB1_2 ;
LBB1_1: ;
mr r3, r2
LBB1_2: ;
blr
This isn't wonderful, but it's an improvement.
llvm-svn: 30513
2006-09-20 14:19:26 +08:00
|
|
|
|
|
|
|
if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.Val)) {
|
|
|
|
if (SCCC->getValue())
|
|
|
|
return N2; // cond always true -> true val
|
|
|
|
else
|
|
|
|
return N3; // cond always false -> false val
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fold to a simpler select_cc
|
|
|
|
if (SCC.Val && SCC.getOpcode() == ISD::SETCC)
|
|
|
|
return DAG.getNode(ISD::SELECT_CC, N2.getValueType(),
|
|
|
|
SCC.getOperand(0), SCC.getOperand(1), N2, N3,
|
|
|
|
SCC.getOperand(2));
|
|
|
|
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
// If we can fold this based on the true/false value, do so.
|
|
|
|
if (SimplifySelectOps(N, N2, N3))
|
2006-05-27 08:43:02 +08:00
|
|
|
return SDOperand(N, 0); // Don't revisit N.
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
// fold select_cc into other things, such as min/max/abs
|
|
|
|
return SimplifySelectCC(N0, N1, N2, N3, CC);
|
2005-09-16 08:54:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitSETCC(SDNode *N) {
|
|
|
|
return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1),
|
|
|
|
cast<CondCodeSDNode>(N->getOperand(2))->get());
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// fold (sext c1) -> c1
|
2006-11-03 04:25:50 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND, VT, N0);
|
2006-05-07 07:06:26 +08:00
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (sext (sext x)) -> (sext x)
|
2006-05-07 07:06:26 +08:00
|
|
|
// fold (sext (aext x)) -> (sext x)
|
|
|
|
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND, VT, N0.getOperand(0));
|
2006-05-07 07:06:26 +08:00
|
|
|
|
2007-03-22 09:54:19 +08:00
|
|
|
// fold (sext (truncate (load x))) -> (sext (smaller load x))
|
|
|
|
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
|
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE) {
|
|
|
|
SDOperand NarrowLoad = ReduceLoadWidth(N0.Val);
|
2007-03-23 10:16:52 +08:00
|
|
|
if (NarrowLoad.Val) {
|
|
|
|
if (NarrowLoad.Val != N0.Val)
|
|
|
|
CombineTo(N0.Val, NarrowLoad);
|
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND, VT, NarrowLoad);
|
|
|
|
}
|
2007-03-22 09:54:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// See if the value being truncated is already sign extended. If so, just
|
|
|
|
// eliminate the trunc/sext pair.
|
2007-02-26 11:13:59 +08:00
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE) {
|
2006-09-21 14:00:20 +08:00
|
|
|
SDOperand Op = N0.getOperand(0);
|
2007-02-26 11:13:59 +08:00
|
|
|
unsigned OpBits = MVT::getSizeInBits(Op.getValueType());
|
|
|
|
unsigned MidBits = MVT::getSizeInBits(N0.getValueType());
|
|
|
|
unsigned DestBits = MVT::getSizeInBits(VT);
|
|
|
|
unsigned NumSignBits = TLI.ComputeNumSignBits(Op);
|
|
|
|
|
|
|
|
if (OpBits == DestBits) {
|
|
|
|
// Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
|
|
|
|
// bits, it is already ready.
|
|
|
|
if (NumSignBits > DestBits-MidBits)
|
|
|
|
return Op;
|
|
|
|
} else if (OpBits < DestBits) {
|
|
|
|
// Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
|
|
|
|
// bits, just sext from i32.
|
|
|
|
if (NumSignBits > OpBits-MidBits)
|
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND, VT, Op);
|
|
|
|
} else {
|
|
|
|
// Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
|
|
|
|
// bits, just truncate to i32.
|
|
|
|
if (NumSignBits > OpBits-MidBits)
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, Op);
|
|
|
|
}
|
|
|
|
|
|
|
|
// fold (sext (truncate x)) -> (sextinreg x).
|
|
|
|
if (!AfterLegalize || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
|
|
|
|
N0.getValueType())) {
|
|
|
|
if (Op.getValueType() < VT)
|
|
|
|
Op = DAG.getNode(ISD::ANY_EXTEND, VT, Op);
|
|
|
|
else if (Op.getValueType() > VT)
|
|
|
|
Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
|
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, Op,
|
|
|
|
DAG.getValueType(N0.getValueType()));
|
2006-09-21 14:00:20 +08:00
|
|
|
}
|
|
|
|
}
|
2006-05-07 07:06:26 +08:00
|
|
|
|
2005-12-14 10:19:23 +08:00
|
|
|
// fold (sext (load x)) -> (sext (truncate (sextload x)))
|
2006-10-10 04:57:25 +08:00
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize||TLI.isLoadXLegal(ISD::SEXTLOAD, N0.getValueType()))){
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
|
|
|
LN0->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
N0.getValueType(),
|
|
|
|
LN0->isVolatile());
|
2005-12-15 03:25:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
2005-10-14 05:52:31 +08:00
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
More cool stuff for the dag combiner. We can now finally handle things
like turning:
_foo:
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
rlwinm r3, r2, 0, 16, 31
blr
into
_foo:
fctiwz f0,f1
stfd f0,-8(r1)
lhz r3,-2(r1)
blr
Also removed an unncessary constraint from sra -> srl conversion, which
should take care of hte only reason we would ever need to handle sra in
MaskedValueIsZero, AFAIK.
llvm-svn: 23703
2005-10-13 04:40:40 +08:00
|
|
|
}
|
2005-12-15 03:05:06 +08:00
|
|
|
|
|
|
|
// fold (sext (sextload x)) -> (sext (truncate (sextload x)))
|
|
|
|
// fold (sext ( extload x)) -> (sext (truncate (sextload x)))
|
2007-03-07 16:07:03 +08:00
|
|
|
if ((ISD::isSEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) &&
|
|
|
|
ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2006-10-11 15:10:22 +08:00
|
|
|
MVT::ValueType EVT = LN0->getLoadedVT();
|
2006-12-16 05:38:30 +08:00
|
|
|
if (!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) {
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-12-16 05:38:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
2005-12-15 03:05:06 +08:00
|
|
|
}
|
|
|
|
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
// sext(setcc x,y,cc) -> select_cc x, y, -1, 0, cc
|
|
|
|
if (N0.getOpcode() == ISD::SETCC) {
|
|
|
|
SDOperand SCC =
|
2007-04-11 14:50:51 +08:00
|
|
|
SimplifySelectCC(N0.getOperand(0), N0.getOperand(1),
|
|
|
|
DAG.getConstant(~0ULL, VT), DAG.getConstant(0, VT),
|
|
|
|
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
|
|
|
|
if (SCC.Val) return SCC;
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// fold (zext c1) -> c1
|
2006-11-03 04:25:50 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (zext (zext x)) -> (zext x)
|
2006-05-07 07:06:26 +08:00
|
|
|
// fold (zext (aext x)) -> (zext x)
|
|
|
|
if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::ZERO_EXTEND, VT, N0.getOperand(0));
|
2006-09-21 14:00:20 +08:00
|
|
|
|
2007-03-22 09:54:19 +08:00
|
|
|
// fold (zext (truncate (load x))) -> (zext (smaller load x))
|
|
|
|
// fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
|
2007-03-31 05:38:07 +08:00
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE) {
|
2007-03-22 09:54:19 +08:00
|
|
|
SDOperand NarrowLoad = ReduceLoadWidth(N0.Val);
|
2007-03-23 10:16:52 +08:00
|
|
|
if (NarrowLoad.Val) {
|
|
|
|
if (NarrowLoad.Val != N0.Val)
|
|
|
|
CombineTo(N0.Val, NarrowLoad);
|
|
|
|
return DAG.getNode(ISD::ZERO_EXTEND, VT, NarrowLoad);
|
|
|
|
}
|
2007-03-22 09:54:19 +08:00
|
|
|
}
|
|
|
|
|
2006-09-21 14:00:20 +08:00
|
|
|
// fold (zext (truncate x)) -> (and x, mask)
|
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE &&
|
|
|
|
(!AfterLegalize || TLI.isOperationLegal(ISD::AND, VT))) {
|
|
|
|
SDOperand Op = N0.getOperand(0);
|
|
|
|
if (Op.getValueType() < VT) {
|
|
|
|
Op = DAG.getNode(ISD::ANY_EXTEND, VT, Op);
|
|
|
|
} else if (Op.getValueType() > VT) {
|
|
|
|
Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
|
|
|
|
}
|
|
|
|
return DAG.getZeroExtendInReg(Op, N0.getValueType());
|
|
|
|
}
|
|
|
|
|
Compile:
int %test(ulong *%tmp) {
%tmp = load ulong* %tmp ; <ulong> [#uses=1]
%tmp.mask = shr ulong %tmp, ubyte 50 ; <ulong> [#uses=1]
%tmp.mask = cast ulong %tmp.mask to ubyte
%tmp2 = and ubyte %tmp.mask, 3 ; <ubyte> [#uses=1]
%tmp2 = cast ubyte %tmp2 to int ; <int> [#uses=1]
ret int %tmp2
}
to:
_test:
movl 4(%esp), %eax
movl 4(%eax), %eax
shrl $18, %eax
andl $3, %eax
ret
instead of:
_test:
movl 4(%esp), %eax
movl 4(%eax), %eax
shrl $18, %eax
# TRUNCATE movb %al, %al
andb $3, %al
movzbl %al, %eax
ret
llvm-svn: 30558
2006-09-21 14:14:31 +08:00
|
|
|
// fold (zext (and (trunc x), cst)) -> (and x, cst).
|
|
|
|
if (N0.getOpcode() == ISD::AND &&
|
|
|
|
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
|
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant) {
|
|
|
|
SDOperand X = N0.getOperand(0).getOperand(0);
|
|
|
|
if (X.getValueType() < VT) {
|
|
|
|
X = DAG.getNode(ISD::ANY_EXTEND, VT, X);
|
|
|
|
} else if (X.getValueType() > VT) {
|
|
|
|
X = DAG.getNode(ISD::TRUNCATE, VT, X);
|
|
|
|
}
|
|
|
|
uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
|
|
|
return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
|
|
|
|
}
|
|
|
|
|
2005-12-14 10:19:23 +08:00
|
|
|
// fold (zext (load x)) -> (zext (truncate (zextload x)))
|
2006-10-10 04:57:25 +08:00
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize||TLI.isLoadXLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
|
|
|
LN0->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
N0.getValueType(),
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2005-12-15 03:25:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
2005-12-14 10:19:23 +08:00
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-12-14 10:19:23 +08:00
|
|
|
}
|
2005-12-15 03:05:06 +08:00
|
|
|
|
|
|
|
// fold (zext (zextload x)) -> (zext (truncate (zextload x)))
|
|
|
|
// fold (zext ( extload x)) -> (zext (truncate (zextload x)))
|
2007-03-07 16:07:03 +08:00
|
|
|
if ((ISD::isZEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) &&
|
|
|
|
ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2006-10-11 15:10:22 +08:00
|
|
|
MVT::ValueType EVT = LN0->getLoadedVT();
|
2006-10-10 04:57:25 +08:00
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2005-12-15 03:25:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
2005-12-15 03:05:06 +08:00
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-12-15 03:05:06 +08:00
|
|
|
}
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
|
|
|
|
// zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
|
|
|
|
if (N0.getOpcode() == ISD::SETCC) {
|
|
|
|
SDOperand SCC =
|
|
|
|
SimplifySelectCC(N0.getOperand(0), N0.getOperand(1),
|
|
|
|
DAG.getConstant(1, VT), DAG.getConstant(0, VT),
|
2007-04-11 14:50:51 +08:00
|
|
|
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
|
|
|
|
if (SCC.Val) return SCC;
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2006-05-05 13:58:59 +08:00
|
|
|
SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// fold (aext c1) -> c1
|
2006-05-07 07:06:26 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-05-05 13:58:59 +08:00
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, VT, N0);
|
|
|
|
// fold (aext (aext x)) -> (aext x)
|
|
|
|
// fold (aext (zext x)) -> (zext x)
|
|
|
|
// fold (aext (sext x)) -> (sext x)
|
|
|
|
if (N0.getOpcode() == ISD::ANY_EXTEND ||
|
|
|
|
N0.getOpcode() == ISD::ZERO_EXTEND ||
|
|
|
|
N0.getOpcode() == ISD::SIGN_EXTEND)
|
|
|
|
return DAG.getNode(N0.getOpcode(), VT, N0.getOperand(0));
|
|
|
|
|
2007-03-22 09:54:19 +08:00
|
|
|
// fold (aext (truncate (load x))) -> (aext (smaller load x))
|
|
|
|
// fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
|
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE) {
|
|
|
|
SDOperand NarrowLoad = ReduceLoadWidth(N0.Val);
|
2007-03-23 10:16:52 +08:00
|
|
|
if (NarrowLoad.Val) {
|
|
|
|
if (NarrowLoad.Val != N0.Val)
|
|
|
|
CombineTo(N0.Val, NarrowLoad);
|
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, VT, NarrowLoad);
|
|
|
|
}
|
2007-03-22 09:54:19 +08:00
|
|
|
}
|
|
|
|
|
2006-09-20 14:29:17 +08:00
|
|
|
// fold (aext (truncate x))
|
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE) {
|
|
|
|
SDOperand TruncOp = N0.getOperand(0);
|
|
|
|
if (TruncOp.getValueType() == VT)
|
|
|
|
return TruncOp; // x iff x size == zext size.
|
|
|
|
if (TruncOp.getValueType() > VT)
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, TruncOp);
|
|
|
|
return DAG.getNode(ISD::ANY_EXTEND, VT, TruncOp);
|
|
|
|
}
|
2006-09-21 14:40:43 +08:00
|
|
|
|
|
|
|
// fold (aext (and (trunc x), cst)) -> (and x, cst).
|
|
|
|
if (N0.getOpcode() == ISD::AND &&
|
|
|
|
N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
|
|
|
|
N0.getOperand(1).getOpcode() == ISD::Constant) {
|
|
|
|
SDOperand X = N0.getOperand(0).getOperand(0);
|
|
|
|
if (X.getValueType() < VT) {
|
|
|
|
X = DAG.getNode(ISD::ANY_EXTEND, VT, X);
|
|
|
|
} else if (X.getValueType() > VT) {
|
|
|
|
X = DAG.getNode(ISD::TRUNCATE, VT, X);
|
|
|
|
}
|
|
|
|
uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
|
|
|
|
return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
|
|
|
|
}
|
|
|
|
|
2006-05-05 13:58:59 +08:00
|
|
|
// fold (aext (load x)) -> (aext (truncate (extload x)))
|
2006-10-10 04:57:25 +08:00
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize||TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
|
|
|
LN0->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
N0.getValueType(),
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-05-05 13:58:59 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
|
|
|
|
|
|
|
// fold (aext (zextload x)) -> (aext (truncate (zextload x)))
|
|
|
|
// fold (aext (sextload x)) -> (aext (truncate (sextload x)))
|
|
|
|
// fold (aext ( extload x)) -> (aext (truncate (extload x)))
|
2007-03-07 16:07:03 +08:00
|
|
|
if (N0.getOpcode() == ISD::LOAD &&
|
|
|
|
!ISD::isNON_EXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
|
2006-10-10 04:57:25 +08:00
|
|
|
N0.hasOneUse()) {
|
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2006-10-11 15:10:22 +08:00
|
|
|
MVT::ValueType EVT = LN0->getLoadedVT();
|
2006-10-10 04:57:25 +08:00
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
|
|
|
|
LN0->getChain(), LN0->getBasePtr(),
|
|
|
|
LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-05-05 13:58:59 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
|
|
|
|
// aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
|
|
|
|
if (N0.getOpcode() == ISD::SETCC) {
|
|
|
|
SDOperand SCC =
|
2007-04-11 14:50:51 +08:00
|
|
|
SimplifySelectCC(N0.getOperand(0), N0.getOperand(1),
|
|
|
|
DAG.getConstant(1, VT), DAG.getConstant(0, VT),
|
2007-04-12 00:51:53 +08:00
|
|
|
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
|
2007-04-11 14:50:51 +08:00
|
|
|
if (SCC.Val)
|
2007-04-11 14:43:25 +08:00
|
|
|
return SCC;
|
Teach the codegen to turn [aez]ext (setcc) -> selectcc of 1/0, which often
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
2007-04-11 13:32:27 +08:00
|
|
|
}
|
|
|
|
|
2006-05-05 13:58:59 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2007-03-22 09:54:19 +08:00
|
|
|
/// ReduceLoadWidth - If the result of a wider load is shifted to right of N
|
|
|
|
/// bits and then truncated to a narrower type and where N is a multiple
|
|
|
|
/// of number of bits of the narrower type, transform it to a narrower load
|
|
|
|
/// from address + N / num of bits of new type. If the result is to be
|
|
|
|
/// extended, also fold the extension to form a extending load.
|
|
|
|
SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
|
|
|
unsigned Opc = N->getOpcode();
|
|
|
|
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
MVT::ValueType EVT = N->getValueType(0);
|
|
|
|
|
2007-03-24 06:13:36 +08:00
|
|
|
// Special case: SIGN_EXTEND_INREG is basically truncating to EVT then
|
|
|
|
// extended to VT.
|
2007-03-22 09:54:19 +08:00
|
|
|
if (Opc == ISD::SIGN_EXTEND_INREG) {
|
|
|
|
ExtType = ISD::SEXTLOAD;
|
|
|
|
EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
|
2007-03-24 06:13:36 +08:00
|
|
|
if (AfterLegalize && !TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))
|
|
|
|
return SDOperand();
|
2007-03-22 09:54:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned EVTBits = MVT::getSizeInBits(EVT);
|
|
|
|
unsigned ShAmt = 0;
|
2007-03-24 04:55:21 +08:00
|
|
|
bool CombineSRL = false;
|
2007-03-22 09:54:19 +08:00
|
|
|
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
|
|
|
|
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
|
|
|
|
ShAmt = N01->getValue();
|
|
|
|
// Is the shift amount a multiple of size of VT?
|
|
|
|
if ((ShAmt & (EVTBits-1)) == 0) {
|
|
|
|
N0 = N0.getOperand(0);
|
|
|
|
if (MVT::getSizeInBits(N0.getValueType()) <= EVTBits)
|
|
|
|
return SDOperand();
|
2007-03-24 04:55:21 +08:00
|
|
|
CombineSRL = true;
|
2007-03-22 09:54:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
|
|
|
// Do not allow folding to i1 here. i1 is implicitly stored in memory in
|
|
|
|
// zero extended form: by shrinking the load, we lose track of the fact
|
|
|
|
// that it is already zero extended.
|
|
|
|
// FIXME: This should be reevaluated.
|
|
|
|
VT != MVT::i1) {
|
|
|
|
assert(MVT::getSizeInBits(N0.getValueType()) > EVTBits &&
|
|
|
|
"Cannot truncate to larger type!");
|
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
MVT::ValueType PtrType = N0.getOperand(1).getValueType();
|
2007-03-24 08:02:43 +08:00
|
|
|
// For big endian targets, we need to adjust the offset to the pointer to
|
|
|
|
// load the correct bytes.
|
|
|
|
if (!TLI.isLittleEndian())
|
|
|
|
ShAmt = MVT::getSizeInBits(N0.getValueType()) - ShAmt - EVTBits;
|
|
|
|
uint64_t PtrOff = ShAmt / 8;
|
2007-03-22 09:54:19 +08:00
|
|
|
SDOperand NewPtr = DAG.getNode(ISD::ADD, PtrType, LN0->getBasePtr(),
|
|
|
|
DAG.getConstant(PtrOff, PtrType));
|
|
|
|
AddToWorkList(NewPtr.Val);
|
|
|
|
SDOperand Load = (ExtType == ISD::NON_EXTLOAD)
|
|
|
|
? DAG.getLoad(VT, LN0->getChain(), NewPtr,
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValue(), LN0->getSrcValueOffset(),
|
|
|
|
LN0->isVolatile(), LN0->getAlignment())
|
2007-03-22 09:54:19 +08:00
|
|
|
: DAG.getExtLoad(ExtType, VT, LN0->getChain(), NewPtr,
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(), LN0->getAlignment());
|
2007-03-22 09:54:19 +08:00
|
|
|
AddToWorkList(N);
|
2007-03-24 04:55:21 +08:00
|
|
|
if (CombineSRL) {
|
|
|
|
std::vector<SDNode*> NowDead;
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1), NowDead);
|
|
|
|
CombineTo(N->getOperand(0).Val, Load);
|
|
|
|
} else
|
|
|
|
CombineTo(N0.Val, Load, Load.getValue(1));
|
2007-03-26 15:12:51 +08:00
|
|
|
if (ShAmt) {
|
|
|
|
if (Opc == ISD::SIGN_EXTEND_INREG)
|
|
|
|
return DAG.getNode(Opc, VT, Load, N->getOperand(1));
|
|
|
|
else
|
|
|
|
return DAG.getNode(Opc, VT, Load);
|
|
|
|
}
|
2007-03-22 09:54:19 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-05-05 13:58:59 +08:00
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2005-09-03 05:18:40 +08:00
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-09-01 08:19:25 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-03 05:18:40 +08:00
|
|
|
MVT::ValueType EVT = cast<VTSDNode>(N1)->getVT();
|
2005-10-11 05:26:48 +08:00
|
|
|
unsigned EVTBits = MVT::getSizeInBits(EVT);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (sext_in_reg c1) -> c1
|
2006-05-09 04:59:41 +08:00
|
|
|
if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
|
2006-05-07 07:06:26 +08:00
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0, N1);
|
Use the new TargetLowering::ComputeNumSignBits method to eliminate
sign_extend_inreg operations. Though ComputeNumSignBits is still rudimentary,
this is enough to compile this:
short test(short X, short x) {
int Y = X+x;
return (Y >> 1);
}
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
into:
_test:
add r2, r3, r4
srawi r3, r2, 1
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test:
add r2, r3, r4
srawi r2, r2, 1
extsh r3, r2
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28146
2006-05-06 17:30:03 +08:00
|
|
|
|
2006-05-07 06:43:44 +08:00
|
|
|
// If the input is already sign extended, just drop the extension.
|
Use the new TargetLowering::ComputeNumSignBits method to eliminate
sign_extend_inreg operations. Though ComputeNumSignBits is still rudimentary,
this is enough to compile this:
short test(short X, short x) {
int Y = X+x;
return (Y >> 1);
}
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
into:
_test:
add r2, r3, r4
srawi r3, r2, 1
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test:
add r2, r3, r4
srawi r2, r2, 1
extsh r3, r2
blr
_test2:
add r2, r3, r4
extsh r2, r2
srawi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28146
2006-05-06 17:30:03 +08:00
|
|
|
if (TLI.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1)
|
|
|
|
return N0;
|
|
|
|
|
2005-09-03 05:18:40 +08:00
|
|
|
// fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
|
|
|
|
if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
|
|
|
|
EVT < cast<VTSDNode>(N0.getOperand(1))->getVT()) {
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0), N1);
|
2005-09-03 05:18:40 +08:00
|
|
|
}
|
Make the case I just checked in stronger. Now we compile this:
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
to:
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test2:
add r2, r3, r4
extsh r2, r2
srwi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28175
2006-05-09 05:18:59 +08:00
|
|
|
|
SIGN_EXTEND_INREG does not demand its top bits. Give SimplifyDemandedBits
a chance to hack on it. This compiles:
int baz(long long a) { return (short)(((int)(a >>24)) >> 9); }
into:
_baz:
slwi r2, r3, 8
srwi r2, r2, 9
extsh r3, r2
blr
instead of:
_baz:
srwi r2, r4, 24
rlwimi r2, r3, 8, 0, 23
srwi r2, r2, 9
extsh r3, r2
blr
This implements CodeGen/PowerPC/sign_ext_inreg1.ll
llvm-svn: 36212
2007-04-18 03:03:21 +08:00
|
|
|
// fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
|
Make the case I just checked in stronger. Now we compile this:
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
to:
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test2:
add r2, r3, r4
extsh r2, r2
srwi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28175
2006-05-09 05:18:59 +08:00
|
|
|
if (TLI.MaskedValueIsZero(N0, 1ULL << (EVTBits-1)))
|
|
|
|
return DAG.getZeroExtendInReg(N0, EVT);
|
2006-05-07 06:43:44 +08:00
|
|
|
|
SIGN_EXTEND_INREG does not demand its top bits. Give SimplifyDemandedBits
a chance to hack on it. This compiles:
int baz(long long a) { return (short)(((int)(a >>24)) >> 9); }
into:
_baz:
slwi r2, r3, 8
srwi r2, r2, 9
extsh r3, r2
blr
instead of:
_baz:
srwi r2, r4, 24
rlwimi r2, r3, 8, 0, 23
srwi r2, r2, 9
extsh r3, r2
blr
This implements CodeGen/PowerPC/sign_ext_inreg1.ll
llvm-svn: 36212
2007-04-18 03:03:21 +08:00
|
|
|
// fold operands of sext_in_reg based on knowledge that the top bits are not
|
|
|
|
// demanded.
|
|
|
|
if (SimplifyDemandedBits(SDOperand(N, 0)))
|
|
|
|
return SDOperand(N, 0);
|
|
|
|
|
2007-03-22 09:54:19 +08:00
|
|
|
// fold (sext_in_reg (load x)) -> (smaller sextload x)
|
|
|
|
// fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
|
|
|
|
SDOperand NarrowLoad = ReduceLoadWidth(N);
|
|
|
|
if (NarrowLoad.Val)
|
|
|
|
return NarrowLoad;
|
|
|
|
|
2006-05-09 04:59:41 +08:00
|
|
|
// fold (sext_in_reg (srl X, 24), i8) -> sra X, 24
|
Make the case I just checked in stronger. Now we compile this:
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
to:
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test2:
add r2, r3, r4
extsh r2, r2
srwi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28175
2006-05-09 05:18:59 +08:00
|
|
|
// fold (sext_in_reg (srl X, 23), i8) -> sra X, 23 iff possible.
|
|
|
|
// We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
|
2006-05-09 04:59:41 +08:00
|
|
|
if (N0.getOpcode() == ISD::SRL) {
|
|
|
|
if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
|
Make the case I just checked in stronger. Now we compile this:
short test2(short X, short x) {
int Y = (short)(X+x);
return Y >> 1;
}
to:
_test2:
add r2, r3, r4
extsh r2, r2
srawi r3, r2, 1
blr
instead of:
_test2:
add r2, r3, r4
extsh r2, r2
srwi r2, r2, 1
extsh r3, r2
blr
llvm-svn: 28175
2006-05-09 05:18:59 +08:00
|
|
|
if (ShAmt->getValue()+EVTBits <= MVT::getSizeInBits(VT)) {
|
|
|
|
// We can turn this into an SRA iff the input to the SRL is already sign
|
|
|
|
// extended enough.
|
|
|
|
unsigned InSignBits = TLI.ComputeNumSignBits(N0.getOperand(0));
|
|
|
|
if (MVT::getSizeInBits(VT)-(ShAmt->getValue()+EVTBits) < InSignBits)
|
|
|
|
return DAG.getNode(ISD::SRA, VT, N0.getOperand(0), N0.getOperand(1));
|
|
|
|
}
|
2006-05-09 04:59:41 +08:00
|
|
|
}
|
2007-03-22 09:54:19 +08:00
|
|
|
|
2005-10-13 11:11:28 +08:00
|
|
|
// fold (sext_inreg (extload x)) -> (sextload x)
|
2006-10-04 08:56:09 +08:00
|
|
|
if (ISD::isEXTLoad(N0.Val) &&
|
2007-03-07 16:07:03 +08:00
|
|
|
ISD::isUNINDEXEDLoad(N0.Val) &&
|
2006-10-11 15:10:22 +08:00
|
|
|
EVT == cast<LoadSDNode>(N0)->getLoadedVT() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2005-12-15 03:25:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
2005-10-14 02:34:58 +08:00
|
|
|
CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
|
|
|
// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
|
2007-03-07 16:07:03 +08:00
|
|
|
if (ISD::isZEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
|
|
|
|
N0.hasOneUse() &&
|
2006-10-11 15:10:22 +08:00
|
|
|
EVT == cast<LoadSDNode>(N0)->getLoadedVT() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LN0->getSrcValueOffset(), EVT,
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2005-12-15 03:25:30 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
2005-10-14 02:34:58 +08:00
|
|
|
CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1));
|
2006-04-21 07:55:59 +08:00
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
2005-10-13 11:11:28 +08:00
|
|
|
}
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// noop truncate
|
|
|
|
if (N0.getValueType() == N->getValueType(0))
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0;
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (truncate c1) -> c1
|
2006-05-07 07:06:26 +08:00
|
|
|
if (isa<ConstantSDNode>(N0))
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (truncate (truncate x)) -> (truncate x)
|
|
|
|
if (N0.getOpcode() == ISD::TRUNCATE)
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, N0.getOperand(0));
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (truncate (ext x)) -> (ext x) or (truncate x) or x
|
2006-05-06 06:56:26 +08:00
|
|
|
if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::SIGN_EXTEND||
|
|
|
|
N0.getOpcode() == ISD::ANY_EXTEND) {
|
2006-11-21 02:05:46 +08:00
|
|
|
if (N0.getOperand(0).getValueType() < VT)
|
2005-09-01 08:19:25 +08:00
|
|
|
// if the source is smaller than the dest, we still need an extend
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(N0.getOpcode(), VT, N0.getOperand(0));
|
2006-11-21 02:05:46 +08:00
|
|
|
else if (N0.getOperand(0).getValueType() > VT)
|
2005-09-01 08:19:25 +08:00
|
|
|
// if the source is larger than the dest, than we just need the truncate
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, N0.getOperand(0));
|
2005-09-01 08:19:25 +08:00
|
|
|
else
|
|
|
|
// if the source and dest are the same type, we can drop both the extend
|
|
|
|
// and the truncate
|
2005-09-06 12:43:02 +08:00
|
|
|
return N0.getOperand(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
2007-03-22 04:14:05 +08:00
|
|
|
|
More cool stuff for the dag combiner. We can now finally handle things
like turning:
_foo:
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
rlwinm r3, r2, 0, 16, 31
blr
into
_foo:
fctiwz f0,f1
stfd f0,-8(r1)
lhz r3,-2(r1)
blr
Also removed an unncessary constraint from sra -> srl conversion, which
should take care of hte only reason we would ever need to handle sra in
MaskedValueIsZero, AFAIK.
llvm-svn: 23703
2005-10-13 04:40:40 +08:00
|
|
|
// fold (truncate (load x)) -> (smaller load x)
|
2007-03-22 04:14:05 +08:00
|
|
|
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
|
2007-03-22 09:54:19 +08:00
|
|
|
return ReduceLoadWidth(N);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
constant fold bits_convert in getNode and in the dag combiner for fp<->int
conversions. This allows V8 to compiles this:
void %test() {
call float %test2( float 1.000000e+00, float 2.000000e+00, double 3.000000e+00, double* null )
ret void
}
into:
test:
save -96, %o6, %o6
sethi 0, %o3
sethi 1049088, %o2
sethi 1048576, %o1
sethi 1040384, %o0
or %g0, %o3, %o4
call test2
nop
restore %g0, %g0, %g0
retl
nop
instead of:
test:
save -112, %o6, %o6
sethi 0, %o4
sethi 1049088, %l0
st %o4, [%i6+-12]
st %l0, [%i6+-16]
ld [%i6+-12], %o3
ld [%i6+-16], %o2
sethi 1048576, %o1
sethi 1040384, %o0
call test2
nop
restore %g0, %g0, %g0
retl
nop
llvm-svn: 24980
2005-12-23 13:30:37 +08:00
|
|
|
SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// If the input is a constant, let getNode() fold it.
|
|
|
|
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
|
|
|
|
SDOperand Res = DAG.getNode(ISD::BIT_CONVERT, VT, N0);
|
|
|
|
if (Res.Val != N) return Res;
|
|
|
|
}
|
|
|
|
|
Fold bitconv(bitconv(x)) -> x. We now compile this:
void foo(double);
void bar(double X) { foo(X); }
to this:
bar:
save -96, %o6, %o6
or %g0, %i0, %o0
or %g0, %i1, %o1
call foo
nop
restore %g0, %g0, %g0
retl
nop
instead of this:
bar:
save -112, %o6, %o6
st %i1, [%i6+-4]
st %i0, [%i6+-8]
ldd [%i6+-8], %f0
std %f0, [%i6+-16]
ld [%i6+-12], %o1
ld [%i6+-16], %o0
call foo
nop
restore %g0, %g0, %g0
retl
nop
on V8.
llvm-svn: 24981
2005-12-23 13:37:50 +08:00
|
|
|
if (N0.getOpcode() == ISD::BIT_CONVERT) // conv(conv(x,t1),t2) -> conv(x,t2)
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0));
|
2006-04-02 10:53:43 +08:00
|
|
|
|
fold (conv (load x)) -> (load (conv*)x).
This allows us to compile this:
void foo(double);
void bar(double *X) { foo(*X); }
To this:
bar:
save -96, %o6, %o6
ld [%i0+4], %o1
ld [%i0], %o0
call foo
nop
restore %g0, %g0, %g0
retl
nop
instead of this:
bar:
save -104, %o6, %o6
ldd [%i0], %f0
std %f0, [%i6+-8]
ld [%i6+-4], %o1
ld [%i6+-8], %o0
call foo
nop
restore %g0, %g0, %g0
retl
nop
on SparcV8.
llvm-svn: 24982
2005-12-23 13:44:41 +08:00
|
|
|
// fold (conv (load x)) -> (load (conv*)x)
|
2007-05-08 05:27:48 +08:00
|
|
|
// If the resultant load doesn't need a higher alignment than the original!
|
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
2007-05-17 06:45:30 +08:00
|
|
|
ISD::isUNINDEXEDLoad(N0.Val) &&
|
2007-05-08 05:27:48 +08:00
|
|
|
TLI.isOperationLegal(ISD::LOAD, VT)) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
2007-05-08 05:27:48 +08:00
|
|
|
unsigned Align = TLI.getTargetMachine().getTargetData()->
|
2007-05-19 02:41:29 +08:00
|
|
|
getABITypeAlignment(MVT::getTypeForValueType(VT));
|
2007-05-08 05:27:48 +08:00
|
|
|
unsigned OrigAlign = LN0->getAlignment();
|
|
|
|
if (Align <= OrigAlign) {
|
|
|
|
SDOperand Load = DAG.getLoad(VT, LN0->getChain(), LN0->getBasePtr(),
|
|
|
|
LN0->getSrcValue(), LN0->getSrcValueOffset(),
|
|
|
|
LN0->isVolatile(), LN0->getAlignment());
|
|
|
|
AddToWorkList(N);
|
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::BIT_CONVERT, N0.getValueType(), Load),
|
|
|
|
Load.getValue(1));
|
|
|
|
return Load;
|
|
|
|
}
|
fold (conv (load x)) -> (load (conv*)x).
This allows us to compile this:
void foo(double);
void bar(double *X) { foo(*X); }
To this:
bar:
save -96, %o6, %o6
ld [%i0+4], %o1
ld [%i0], %o0
call foo
nop
restore %g0, %g0, %g0
retl
nop
instead of this:
bar:
save -104, %o6, %o6
ldd [%i0], %f0
std %f0, [%i6+-8]
ld [%i6+-4], %o1
ld [%i6+-8], %o0
call foo
nop
restore %g0, %g0, %g0
retl
nop
on SparcV8.
llvm-svn: 24982
2005-12-23 13:44:41 +08:00
|
|
|
}
|
|
|
|
|
constant fold bits_convert in getNode and in the dag combiner for fp<->int
conversions. This allows V8 to compiles this:
void %test() {
call float %test2( float 1.000000e+00, float 2.000000e+00, double 3.000000e+00, double* null )
ret void
}
into:
test:
save -96, %o6, %o6
sethi 0, %o3
sethi 1049088, %o2
sethi 1048576, %o1
sethi 1040384, %o0
or %g0, %o3, %o4
call test2
nop
restore %g0, %g0, %g0
retl
nop
instead of:
test:
save -112, %o6, %o6
sethi 0, %o4
sethi 1049088, %l0
st %o4, [%i6+-12]
st %l0, [%i6+-16]
ld [%i6+-12], %o3
ld [%i6+-16], %o2
sethi 1048576, %o1
sethi 1040384, %o0
call test2
nop
restore %g0, %g0, %g0
retl
nop
llvm-svn: 24980
2005-12-23 13:30:37 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-04-02 10:53:43 +08:00
|
|
|
SDOperand DAGCombiner::visitVBIT_CONVERT(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// If the input is a VBUILD_VECTOR with all constant elements, fold this now.
|
|
|
|
// First check to see if this is all constant.
|
|
|
|
if (N0.getOpcode() == ISD::VBUILD_VECTOR && N0.Val->hasOneUse() &&
|
|
|
|
VT == MVT::Vector) {
|
|
|
|
bool isSimple = true;
|
|
|
|
for (unsigned i = 0, e = N0.getNumOperands()-2; i != e; ++i)
|
|
|
|
if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
N0.getOperand(i).getOpcode() != ISD::Constant &&
|
|
|
|
N0.getOperand(i).getOpcode() != ISD::ConstantFP) {
|
|
|
|
isSimple = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-04-04 01:29:28 +08:00
|
|
|
MVT::ValueType DestEltVT = cast<VTSDNode>(N->getOperand(2))->getVT();
|
|
|
|
if (isSimple && !MVT::isVector(DestEltVT)) {
|
2006-04-02 10:53:43 +08:00
|
|
|
return ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(N0.Val, DestEltVT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantFoldVBIT_CONVERTofVBUILD_VECTOR - We know that BV is a vbuild_vector
|
|
|
|
/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the
|
|
|
|
/// destination element value type.
|
|
|
|
SDOperand DAGCombiner::
|
|
|
|
ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) {
|
|
|
|
MVT::ValueType SrcEltVT = BV->getOperand(0).getValueType();
|
|
|
|
|
|
|
|
// If this is already the right type, we're done.
|
|
|
|
if (SrcEltVT == DstEltVT) return SDOperand(BV, 0);
|
|
|
|
|
|
|
|
unsigned SrcBitSize = MVT::getSizeInBits(SrcEltVT);
|
|
|
|
unsigned DstBitSize = MVT::getSizeInBits(DstEltVT);
|
|
|
|
|
|
|
|
// If this is a conversion of N elements of one type to N elements of another
|
|
|
|
// type, convert each element. This handles FP<->INT cases.
|
|
|
|
if (SrcBitSize == DstBitSize) {
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-08 12:15:24 +08:00
|
|
|
for (unsigned i = 0, e = BV->getNumOperands()-2; i != e; ++i) {
|
2006-04-02 10:53:43 +08:00
|
|
|
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, DstEltVT, BV->getOperand(i)));
|
2006-04-08 12:15:24 +08:00
|
|
|
AddToWorkList(Ops.back().Val);
|
|
|
|
}
|
2006-04-02 10:53:43 +08:00
|
|
|
Ops.push_back(*(BV->op_end()-2)); // Add num elements.
|
|
|
|
Ops.push_back(DAG.getValueType(DstEltVT));
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
|
2006-04-02 10:53:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we're growing or shrinking the elements. To avoid having to
|
|
|
|
// handle annoying details of growing/shrinking FP values, we convert them to
|
|
|
|
// int first.
|
|
|
|
if (MVT::isFloatingPoint(SrcEltVT)) {
|
|
|
|
// Convert the input float vector to a int vector where the elements are the
|
|
|
|
// same sizes.
|
|
|
|
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
|
|
|
|
MVT::ValueType IntVT = SrcEltVT == MVT::f32 ? MVT::i32 : MVT::i64;
|
|
|
|
BV = ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(BV, IntVT).Val;
|
|
|
|
SrcEltVT = IntVT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we know the input is an integer vector. If the output is a FP type,
|
|
|
|
// convert to integer first, then to FP of the right size.
|
|
|
|
if (MVT::isFloatingPoint(DstEltVT)) {
|
|
|
|
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
|
|
|
|
MVT::ValueType TmpVT = DstEltVT == MVT::f32 ? MVT::i32 : MVT::i64;
|
|
|
|
SDNode *Tmp = ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(BV, TmpVT).Val;
|
|
|
|
|
|
|
|
// Next, convert to FP elements of the same size.
|
|
|
|
return ConstantFoldVBIT_CONVERTofVBUILD_VECTOR(Tmp, DstEltVT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, we know the src/dst types are both integers of differing types.
|
|
|
|
// Handling growing first.
|
|
|
|
assert(MVT::isInteger(SrcEltVT) && MVT::isInteger(DstEltVT));
|
|
|
|
if (SrcBitSize < DstBitSize) {
|
|
|
|
unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
|
|
|
|
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-02 10:53:43 +08:00
|
|
|
for (unsigned i = 0, e = BV->getNumOperands()-2; i != e;
|
|
|
|
i += NumInputsPerOutput) {
|
|
|
|
bool isLE = TLI.isLittleEndian();
|
|
|
|
uint64_t NewBits = 0;
|
|
|
|
bool EltIsUndef = true;
|
|
|
|
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
|
|
|
|
// Shift the previously computed bits over.
|
|
|
|
NewBits <<= SrcBitSize;
|
|
|
|
SDOperand Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
|
|
|
|
if (Op.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
EltIsUndef = false;
|
|
|
|
|
|
|
|
NewBits |= cast<ConstantSDNode>(Op)->getValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (EltIsUndef)
|
|
|
|
Ops.push_back(DAG.getNode(ISD::UNDEF, DstEltVT));
|
|
|
|
else
|
|
|
|
Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ops.push_back(DAG.getConstant(Ops.size(), MVT::i32)); // Add num elements.
|
|
|
|
Ops.push_back(DAG.getValueType(DstEltVT)); // Add element size.
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
|
2006-04-02 10:53:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, this must be the case where we are shrinking elements: each input
|
|
|
|
// turns into multiple outputs.
|
|
|
|
unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
2006-04-02 10:53:43 +08:00
|
|
|
for (unsigned i = 0, e = BV->getNumOperands()-2; i != e; ++i) {
|
|
|
|
if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
|
|
|
|
for (unsigned j = 0; j != NumOutputsPerInput; ++j)
|
|
|
|
Ops.push_back(DAG.getNode(ISD::UNDEF, DstEltVT));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
uint64_t OpVal = cast<ConstantSDNode>(BV->getOperand(i))->getValue();
|
|
|
|
|
|
|
|
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
|
|
|
|
unsigned ThisVal = OpVal & ((1ULL << DstBitSize)-1);
|
|
|
|
OpVal >>= DstBitSize;
|
|
|
|
Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
|
|
|
|
}
|
|
|
|
|
|
|
|
// For big endian targets, swap the order of the pieces of each element.
|
|
|
|
if (!TLI.isLittleEndian())
|
|
|
|
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
|
|
|
|
}
|
|
|
|
Ops.push_back(DAG.getConstant(Ops.size(), MVT::i32)); // Add num elements.
|
|
|
|
Ops.push_back(DAG.getValueType(DstEltVT)); // Add element size.
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
|
2006-04-02 10:53:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
SDOperand DAGCombiner::visitFADD(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-10-18 08:28:13 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-10-18 08:28:13 +08:00
|
|
|
|
|
|
|
// fold (fadd c1, c2) -> c1+c2
|
|
|
|
if (N0CFP && N1CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FADD, VT, N0, N1);
|
2005-10-18 08:28:13 +08:00
|
|
|
// canonicalize constant to RHS
|
|
|
|
if (N0CFP && !N1CFP)
|
|
|
|
return DAG.getNode(ISD::FADD, VT, N1, N0);
|
2005-09-29 06:28:18 +08:00
|
|
|
// fold (A + (-B)) -> A-B
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
if (isNegatibleForFree(N1) == 2)
|
|
|
|
return DAG.getNode(ISD::FSUB, VT, N0, GetNegatedExpression(N1, DAG));
|
2005-09-29 06:28:18 +08:00
|
|
|
// fold ((-A) + B) -> B-A
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
if (isNegatibleForFree(N0) == 2)
|
|
|
|
return DAG.getNode(ISD::FSUB, VT, N1, GetNegatedExpression(N0, DAG));
|
2007-01-09 07:04:05 +08:00
|
|
|
|
|
|
|
// If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
|
|
|
|
if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
|
|
|
|
N0.Val->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
|
|
|
|
return DAG.getNode(ISD::FADD, VT, N0.getOperand(0),
|
|
|
|
DAG.getNode(ISD::FADD, VT, N0.getOperand(1), N1));
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitFSUB(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-10-18 08:28:13 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-10-18 08:28:13 +08:00
|
|
|
|
|
|
|
// fold (fsub c1, c2) -> c1-c2
|
|
|
|
if (N0CFP && N1CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FSUB, VT, N0, N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
// fold (A-(-B)) -> A+B
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
if (isNegatibleForFree(N1))
|
|
|
|
return DAG.getNode(ISD::FADD, VT, N0, GetNegatedExpression(N1, DAG));
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitFMUL(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2005-10-18 04:40:11 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
2005-10-18 04:40:11 +08:00
|
|
|
// fold (fmul c1, c2) -> c1*c2
|
|
|
|
if (N0CFP && N1CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FMUL, VT, N0, N1);
|
2005-10-18 04:40:11 +08:00
|
|
|
// canonicalize constant to RHS
|
2005-10-18 08:28:13 +08:00
|
|
|
if (N0CFP && !N1CFP)
|
|
|
|
return DAG.getNode(ISD::FMUL, VT, N1, N0);
|
2005-10-18 04:40:11 +08:00
|
|
|
// fold (fmul X, 2.0) -> (fadd X, X)
|
|
|
|
if (N1CFP && N1CFP->isExactlyValue(+2.0))
|
|
|
|
return DAG.getNode(ISD::FADD, VT, N0, N0);
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
// fold (fmul X, -1.0) -> (fneg X)
|
|
|
|
if (N1CFP && N1CFP->isExactlyValue(-1.0))
|
|
|
|
return DAG.getNode(ISD::FNEG, VT, N0);
|
|
|
|
|
|
|
|
// -X * -Y -> X*Y
|
|
|
|
if (char LHSNeg = isNegatibleForFree(N0)) {
|
|
|
|
if (char RHSNeg = isNegatibleForFree(N1)) {
|
|
|
|
// Both can be negated for free, check to see if at least one is cheaper
|
|
|
|
// negated.
|
|
|
|
if (LHSNeg == 2 || RHSNeg == 2)
|
|
|
|
return DAG.getNode(ISD::FMUL, VT, GetNegatedExpression(N0, DAG),
|
|
|
|
GetNegatedExpression(N1, DAG));
|
|
|
|
}
|
|
|
|
}
|
2007-01-09 07:04:05 +08:00
|
|
|
|
|
|
|
// If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
|
|
|
|
if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL &&
|
|
|
|
N0.Val->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
|
|
|
|
return DAG.getNode(ISD::FMUL, VT, N0.getOperand(0),
|
|
|
|
DAG.getNode(ISD::FMUL, VT, N0.getOperand(1), N1));
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitFDIV(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2006-01-19 06:35:16 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
2006-01-19 06:35:16 +08:00
|
|
|
// fold (fdiv c1, c2) -> c1/c2
|
|
|
|
if (N0CFP && N1CFP)
|
|
|
|
return DAG.getNode(ISD::FDIV, VT, N0, N1);
|
implement a simple fneg optimization/propagation thing. This compiles:
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
2007-05-15 06:04:50 +08:00
|
|
|
|
|
|
|
|
|
|
|
// -X / -Y -> X*Y
|
|
|
|
if (char LHSNeg = isNegatibleForFree(N0)) {
|
|
|
|
if (char RHSNeg = isNegatibleForFree(N1)) {
|
|
|
|
// Both can be negated for free, check to see if at least one is cheaper
|
|
|
|
// negated.
|
|
|
|
if (LHSNeg == 2 || RHSNeg == 2)
|
|
|
|
return DAG.getNode(ISD::FDIV, VT, GetNegatedExpression(N0, DAG),
|
|
|
|
GetNegatedExpression(N1, DAG));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitFREM(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
2006-01-19 06:35:16 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
2006-01-19 06:35:16 +08:00
|
|
|
// fold (frem c1, c2) -> fmod(c1,c2)
|
|
|
|
if (N0CFP && N1CFP)
|
|
|
|
return DAG.getNode(ISD::FREM, VT, N0, N1);
|
2005-09-29 06:28:18 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-03-05 13:30:57 +08:00
|
|
|
SDOperand DAGCombiner::visitFCOPYSIGN(SDNode *N) {
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
if (N0CFP && N1CFP) // Constant fold
|
|
|
|
return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1);
|
|
|
|
|
|
|
|
if (N1CFP) {
|
|
|
|
// copysign(x, c1) -> fabs(x) iff ispos(c1)
|
|
|
|
// copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
|
|
|
|
union {
|
|
|
|
double d;
|
|
|
|
int64_t i;
|
|
|
|
} u;
|
|
|
|
u.d = N1CFP->getValue();
|
|
|
|
if (u.i >= 0)
|
|
|
|
return DAG.getNode(ISD::FABS, VT, N0);
|
|
|
|
else
|
|
|
|
return DAG.getNode(ISD::FNEG, VT, DAG.getNode(ISD::FABS, VT, N0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// copysign(fabs(x), y) -> copysign(x, y)
|
|
|
|
// copysign(fneg(x), y) -> copysign(x, y)
|
|
|
|
// copysign(copysign(x,z), y) -> copysign(x, y)
|
|
|
|
if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
|
|
|
|
N0.getOpcode() == ISD::FCOPYSIGN)
|
|
|
|
return DAG.getNode(ISD::FCOPYSIGN, VT, N0.getOperand(0), N1);
|
|
|
|
|
|
|
|
// copysign(x, abs(y)) -> abs(x)
|
|
|
|
if (N1.getOpcode() == ISD::FABS)
|
|
|
|
return DAG.getNode(ISD::FABS, VT, N0);
|
|
|
|
|
|
|
|
// copysign(x, copysign(y,z)) -> copysign(x, z)
|
|
|
|
if (N1.getOpcode() == ISD::FCOPYSIGN)
|
|
|
|
return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1.getOperand(1));
|
|
|
|
|
|
|
|
// copysign(x, fp_extend(y)) -> copysign(x, y)
|
|
|
|
// copysign(x, fp_round(y)) -> copysign(x, y)
|
|
|
|
if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND)
|
|
|
|
return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1.getOperand(0));
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 06:28:18 +08:00
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (sint_to_fp c1) -> c1fp
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::SINT_TO_FP, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
2006-01-19 06:35:16 +08:00
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (uint_to_fp c1) -> c1fp
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0C)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::UINT_TO_FP, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFP_TO_SINT(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (fp_to_sint c1fp) -> c1
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FP_TO_SINT, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFP_TO_UINT(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (fp_to_uint c1fp) -> c1
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FP_TO_UINT, VT, N0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFP_ROUND(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (fp_round c1fp) -> c1fp
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FP_ROUND, VT, N0);
|
2006-03-13 14:26:26 +08:00
|
|
|
|
|
|
|
// fold (fp_round (fp_extend x)) -> x
|
|
|
|
if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
|
|
|
|
return N0.getOperand(0);
|
|
|
|
|
|
|
|
// fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
|
|
|
|
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.Val->hasOneUse()) {
|
|
|
|
SDOperand Tmp = DAG.getNode(ISD::FP_ROUND, VT, N0.getOperand(0));
|
|
|
|
AddToWorkList(Tmp.Val);
|
|
|
|
return DAG.getNode(ISD::FCOPYSIGN, VT, Tmp, N0.getOperand(1));
|
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
|
2005-09-01 08:19:25 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
|
2005-09-03 05:18:40 +08:00
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (fp_round_inreg c1fp) -> c1fp
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP) {
|
|
|
|
SDOperand Round = DAG.getConstantFP(N0CFP->getValue(), EVT);
|
2005-09-06 12:43:02 +08:00
|
|
|
return DAG.getNode(ISD::FP_EXTEND, VT, Round);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
|
|
|
|
// fold (fp_extend c1fp) -> c1fp
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FP_EXTEND, VT, N0);
|
2006-05-06 05:34:35 +08:00
|
|
|
|
|
|
|
// fold (fpext (load x)) -> (fpext (fpround (extload x)))
|
2006-10-10 04:57:25 +08:00
|
|
|
if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
|
2006-10-04 08:56:09 +08:00
|
|
|
(!AfterLegalize||TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
|
|
|
SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
|
|
|
|
LN0->getBasePtr(), LN0->getSrcValue(),
|
|
|
|
LN0->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
N0.getValueType(),
|
|
|
|
LN0->isVolatile(),
|
|
|
|
LN0->getAlignment());
|
2006-05-06 05:34:35 +08:00
|
|
|
CombineTo(N, ExtLoad);
|
|
|
|
CombineTo(N0.Val, DAG.getNode(ISD::FP_ROUND, N0.getValueType(), ExtLoad),
|
|
|
|
ExtLoad.getValue(1));
|
|
|
|
return SDOperand(N, 0); // Return N so it doesn't get rechecked!
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFNEG(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// fold (fneg c1) -> -c1
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FNEG, VT, N0);
|
|
|
|
// fold (fneg (sub x, y)) -> (sub y, x)
|
2006-03-05 13:30:57 +08:00
|
|
|
if (N0.getOpcode() == ISD::SUB)
|
|
|
|
return DAG.getNode(ISD::SUB, VT, N0.getOperand(1), N0.getOperand(0));
|
2006-01-19 06:35:16 +08:00
|
|
|
// fold (fneg (fneg x)) -> x
|
2006-03-05 13:30:57 +08:00
|
|
|
if (N0.getOpcode() == ISD::FNEG)
|
|
|
|
return N0.getOperand(0);
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
SDOperand DAGCombiner::visitFABS(SDNode *N) {
|
2006-01-19 06:35:16 +08:00
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
|
|
|
|
MVT::ValueType VT = N->getValueType(0);
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (fabs c1) -> fabs(c1)
|
2005-09-03 05:18:40 +08:00
|
|
|
if (N0CFP)
|
2006-01-19 06:35:16 +08:00
|
|
|
return DAG.getNode(ISD::FABS, VT, N0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (fabs (fabs x)) -> (fabs x)
|
2006-03-05 13:30:57 +08:00
|
|
|
if (N0.getOpcode() == ISD::FABS)
|
2005-09-06 12:43:02 +08:00
|
|
|
return N->getOperand(0);
|
2005-09-01 08:19:25 +08:00
|
|
|
// fold (fabs (fneg x)) -> (fabs x)
|
2006-03-05 13:30:57 +08:00
|
|
|
// fold (fabs (fcopysign x, y)) -> (fabs x)
|
|
|
|
if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
|
|
|
|
return DAG.getNode(ISD::FABS, VT, N0.getOperand(0));
|
|
|
|
|
2005-09-06 12:43:02 +08:00
|
|
|
return SDOperand();
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand DAGCombiner::visitBRCOND(SDNode *N) {
|
|
|
|
SDOperand Chain = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
SDOperand N2 = N->getOperand(2);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
|
|
|
|
// never taken branch, fold to chain
|
|
|
|
if (N1C && N1C->isNullValue())
|
|
|
|
return Chain;
|
|
|
|
// unconditional branch
|
2005-10-06 05:43:42 +08:00
|
|
|
if (N1C && N1C->getValue() == 1)
|
2005-09-20 06:34:01 +08:00
|
|
|
return DAG.getNode(ISD::BR, MVT::Other, Chain, N2);
|
2006-02-01 15:19:44 +08:00
|
|
|
// fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
|
|
|
|
// on the target.
|
|
|
|
if (N1.getOpcode() == ISD::SETCC &&
|
|
|
|
TLI.isOperationLegal(ISD::BR_CC, MVT::Other)) {
|
|
|
|
return DAG.getNode(ISD::BR_CC, MVT::Other, Chain, N1.getOperand(2),
|
|
|
|
N1.getOperand(0), N1.getOperand(1), N2);
|
|
|
|
}
|
2005-09-20 06:34:01 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2005-10-05 14:47:48 +08:00
|
|
|
// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
|
|
|
|
//
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand DAGCombiner::visitBR_CC(SDNode *N) {
|
2005-10-05 14:47:48 +08:00
|
|
|
CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
|
|
|
|
SDOperand CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
|
|
|
|
|
|
|
|
// Use SimplifySetCC to simplify SETCC's.
|
2005-10-06 05:43:42 +08:00
|
|
|
SDOperand Simp = SimplifySetCC(MVT::i1, CondLHS, CondRHS, CC->get(), false);
|
2006-10-14 11:52:46 +08:00
|
|
|
if (Simp.Val) AddToWorkList(Simp.Val);
|
|
|
|
|
2005-10-06 05:43:42 +08:00
|
|
|
ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(Simp.Val);
|
|
|
|
|
|
|
|
// fold br_cc true, dest -> br dest (unconditional branch)
|
|
|
|
if (SCCC && SCCC->getValue())
|
|
|
|
return DAG.getNode(ISD::BR, MVT::Other, N->getOperand(0),
|
|
|
|
N->getOperand(4));
|
|
|
|
// fold br_cc false, dest -> unconditional fall through
|
|
|
|
if (SCCC && SCCC->isNullValue())
|
|
|
|
return N->getOperand(0);
|
2006-10-14 11:52:46 +08:00
|
|
|
|
2005-10-06 05:43:42 +08:00
|
|
|
// fold to a simpler setcc
|
|
|
|
if (Simp.Val && Simp.getOpcode() == ISD::SETCC)
|
|
|
|
return DAG.getNode(ISD::BR_CC, MVT::Other, N->getOperand(0),
|
|
|
|
Simp.getOperand(2), Simp.getOperand(0),
|
|
|
|
Simp.getOperand(1), N->getOperand(4));
|
2005-09-20 06:34:01 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-11-11 08:39:41 +08:00
|
|
|
|
|
|
|
/// CombineToPreIndexedLoadStore - Try turning a load / store and a
|
|
|
|
/// pre-indexed load / store when the base pointer is a add or subtract
|
|
|
|
/// and it has other uses besides the load / store. After the
|
|
|
|
/// transformation, the new indexed load / store has effectively folded
|
|
|
|
/// the add / subtract in and all of its other uses are redirected to the
|
|
|
|
/// new load / store.
|
|
|
|
bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
|
|
|
|
if (!AfterLegalize)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isLoad = true;
|
|
|
|
SDOperand Ptr;
|
|
|
|
MVT::ValueType VT;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
2006-12-16 14:25:23 +08:00
|
|
|
if (LD->getAddressingMode() != ISD::UNINDEXED)
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
VT = LD->getLoadedVT();
|
2007-03-07 16:07:03 +08:00
|
|
|
if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
|
2006-11-11 08:39:41 +08:00
|
|
|
!TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
|
|
|
|
return false;
|
|
|
|
Ptr = LD->getBasePtr();
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
2006-12-16 14:25:23 +08:00
|
|
|
if (ST->getAddressingMode() != ISD::UNINDEXED)
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
VT = ST->getStoredVT();
|
|
|
|
if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
|
|
|
|
!TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
|
|
|
|
return false;
|
|
|
|
Ptr = ST->getBasePtr();
|
|
|
|
isLoad = false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
// If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
|
|
|
|
// out. There is no reason to make this a preinc/predec.
|
|
|
|
if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
|
|
|
|
Ptr.Val->hasOneUse())
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
// Ask the target to do addressing mode selection.
|
|
|
|
SDOperand BasePtr;
|
|
|
|
SDOperand Offset;
|
|
|
|
ISD::MemIndexedMode AM = ISD::UNINDEXED;
|
|
|
|
if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
|
|
|
|
return false;
|
2007-05-04 07:52:19 +08:00
|
|
|
// Don't create a indexed load / store with zero offset.
|
|
|
|
if (isa<ConstantSDNode>(Offset) &&
|
|
|
|
cast<ConstantSDNode>(Offset)->getValue() == 0)
|
|
|
|
return false;
|
2006-11-11 08:56:29 +08:00
|
|
|
|
2006-11-11 09:00:15 +08:00
|
|
|
// Try turning it into a pre-indexed load / store except when:
|
2007-05-24 10:35:39 +08:00
|
|
|
// 1) The new base ptr is a frame index.
|
|
|
|
// 2) If N is a store and the new base ptr is either the same as or is a
|
2006-11-11 08:56:29 +08:00
|
|
|
// predecessor of the value being stored.
|
2007-05-24 10:35:39 +08:00
|
|
|
// 3) Another use of old base ptr is a predecessor of N. If ptr is folded
|
2006-11-11 08:56:29 +08:00
|
|
|
// that would create a cycle.
|
2007-05-24 10:35:39 +08:00
|
|
|
// 4) All uses are load / store ops that use it as old base ptr.
|
2006-11-11 08:56:29 +08:00
|
|
|
|
2006-11-11 09:00:15 +08:00
|
|
|
// Check #1. Preinc'ing a frame index would require copying the stack pointer
|
|
|
|
// (plus the implicit offset) to a register to preinc anyway.
|
|
|
|
if (isa<FrameIndexSDNode>(BasePtr))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check #2.
|
2006-11-11 08:56:29 +08:00
|
|
|
if (!isLoad) {
|
|
|
|
SDOperand Val = cast<StoreSDNode>(N)->getValue();
|
2007-05-24 10:35:39 +08:00
|
|
|
if (Val == BasePtr || BasePtr.Val->isPredecessor(Val.Val))
|
2006-11-11 08:56:29 +08:00
|
|
|
return false;
|
|
|
|
}
|
2006-11-11 08:39:41 +08:00
|
|
|
|
2007-05-24 10:35:39 +08:00
|
|
|
// Now check for #3 and #4.
|
2006-11-11 08:56:29 +08:00
|
|
|
bool RealUse = false;
|
|
|
|
for (SDNode::use_iterator I = Ptr.Val->use_begin(),
|
|
|
|
E = Ptr.Val->use_end(); I != E; ++I) {
|
|
|
|
SDNode *Use = *I;
|
|
|
|
if (Use == N)
|
|
|
|
continue;
|
|
|
|
if (Use->isPredecessor(N))
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
if (!((Use->getOpcode() == ISD::LOAD &&
|
|
|
|
cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
|
|
|
|
(Use->getOpcode() == ISD::STORE) &&
|
|
|
|
cast<StoreSDNode>(Use)->getBasePtr() == Ptr))
|
|
|
|
RealUse = true;
|
2006-11-11 08:39:41 +08:00
|
|
|
}
|
2006-11-11 08:56:29 +08:00
|
|
|
if (!RealUse)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SDOperand Result;
|
|
|
|
if (isLoad)
|
|
|
|
Result = DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM);
|
|
|
|
else
|
|
|
|
Result = DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM);
|
|
|
|
++PreIndexedNodes;
|
|
|
|
++NodesCombined;
|
2006-12-08 04:04:42 +08:00
|
|
|
DOUT << "\nReplacing.4 "; DEBUG(N->dump());
|
|
|
|
DOUT << "\nWith: "; DEBUG(Result.Val->dump(&DAG));
|
|
|
|
DOUT << '\n';
|
2006-11-11 08:56:29 +08:00
|
|
|
std::vector<SDNode*> NowDead;
|
|
|
|
if (isLoad) {
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(0),
|
|
|
|
NowDead);
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Result.getValue(2),
|
|
|
|
NowDead);
|
|
|
|
} else {
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(1),
|
|
|
|
NowDead);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nodes can end up on the worklist more than once. Make sure we do
|
|
|
|
// not process a node that has been replaced.
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
// Finally, since the node is now dead, remove it from the graph.
|
|
|
|
DAG.DeleteNode(N);
|
|
|
|
|
|
|
|
// Replace the uses of Ptr with uses of the updated base value.
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0),
|
|
|
|
NowDead);
|
|
|
|
removeFromWorkList(Ptr.Val);
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
DAG.DeleteNode(Ptr.Val);
|
|
|
|
|
|
|
|
return true;
|
2006-11-11 08:39:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CombineToPostIndexedLoadStore - Try combine a load / store with a
|
|
|
|
/// add / sub of the base pointer node into a post-indexed load / store.
|
|
|
|
/// The transformation folded the add / subtract into the new indexed
|
|
|
|
/// load / store effectively and all of its uses are redirected to the
|
|
|
|
/// new load / store.
|
|
|
|
bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
|
|
|
|
if (!AfterLegalize)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isLoad = true;
|
|
|
|
SDOperand Ptr;
|
|
|
|
MVT::ValueType VT;
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
2006-12-16 14:25:23 +08:00
|
|
|
if (LD->getAddressingMode() != ISD::UNINDEXED)
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
VT = LD->getLoadedVT();
|
|
|
|
if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
|
|
|
|
!TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
|
|
|
|
return false;
|
|
|
|
Ptr = LD->getBasePtr();
|
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
2006-12-16 14:25:23 +08:00
|
|
|
if (ST->getAddressingMode() != ISD::UNINDEXED)
|
|
|
|
return false;
|
2006-11-11 08:39:41 +08:00
|
|
|
VT = ST->getStoredVT();
|
|
|
|
if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
|
|
|
|
!TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
|
|
|
|
return false;
|
|
|
|
Ptr = ST->getBasePtr();
|
|
|
|
isLoad = false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
2006-11-16 08:08:20 +08:00
|
|
|
if (Ptr.Val->hasOneUse())
|
2006-11-11 08:56:29 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (SDNode::use_iterator I = Ptr.Val->use_begin(),
|
|
|
|
E = Ptr.Val->use_end(); I != E; ++I) {
|
|
|
|
SDNode *Op = *I;
|
|
|
|
if (Op == N ||
|
|
|
|
(Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
SDOperand BasePtr;
|
|
|
|
SDOperand Offset;
|
|
|
|
ISD::MemIndexedMode AM = ISD::UNINDEXED;
|
|
|
|
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
|
|
|
|
if (Ptr == Offset)
|
|
|
|
std::swap(BasePtr, Offset);
|
|
|
|
if (Ptr != BasePtr)
|
2006-11-11 08:39:41 +08:00
|
|
|
continue;
|
2007-05-04 07:52:19 +08:00
|
|
|
// Don't create a indexed load / store with zero offset.
|
|
|
|
if (isa<ConstantSDNode>(Offset) &&
|
|
|
|
cast<ConstantSDNode>(Offset)->getValue() == 0)
|
|
|
|
continue;
|
2006-11-11 08:39:41 +08:00
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
// Try turning it into a post-indexed load / store except when
|
|
|
|
// 1) All uses are load / store ops that use it as base ptr.
|
|
|
|
// 2) Op must be independent of N, i.e. Op is neither a predecessor
|
|
|
|
// nor a successor of N. Otherwise, if Op is folded that would
|
|
|
|
// create a cycle.
|
|
|
|
|
|
|
|
// Check for #1.
|
|
|
|
bool TryNext = false;
|
|
|
|
for (SDNode::use_iterator II = BasePtr.Val->use_begin(),
|
|
|
|
EE = BasePtr.Val->use_end(); II != EE; ++II) {
|
|
|
|
SDNode *Use = *II;
|
|
|
|
if (Use == Ptr.Val)
|
2006-11-11 08:39:41 +08:00
|
|
|
continue;
|
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
// If all the uses are load / store addresses, then don't do the
|
|
|
|
// transformation.
|
|
|
|
if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
|
|
|
|
bool RealUse = false;
|
|
|
|
for (SDNode::use_iterator III = Use->use_begin(),
|
|
|
|
EEE = Use->use_end(); III != EEE; ++III) {
|
|
|
|
SDNode *UseUse = *III;
|
|
|
|
if (!((UseUse->getOpcode() == ISD::LOAD &&
|
|
|
|
cast<LoadSDNode>(UseUse)->getBasePtr().Val == Use) ||
|
|
|
|
(UseUse->getOpcode() == ISD::STORE) &&
|
|
|
|
cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use))
|
|
|
|
RealUse = true;
|
2006-11-11 08:39:41 +08:00
|
|
|
}
|
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
if (!RealUse) {
|
|
|
|
TryNext = true;
|
|
|
|
break;
|
2006-11-11 08:39:41 +08:00
|
|
|
}
|
2006-11-11 08:56:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (TryNext)
|
|
|
|
continue;
|
2006-11-11 08:39:41 +08:00
|
|
|
|
2006-11-11 08:56:29 +08:00
|
|
|
// Check for #2
|
|
|
|
if (!Op->isPredecessor(N) && !N->isPredecessor(Op)) {
|
|
|
|
SDOperand Result = isLoad
|
|
|
|
? DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM)
|
|
|
|
: DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM);
|
|
|
|
++PostIndexedNodes;
|
|
|
|
++NodesCombined;
|
2006-12-08 04:04:42 +08:00
|
|
|
DOUT << "\nReplacing.5 "; DEBUG(N->dump());
|
|
|
|
DOUT << "\nWith: "; DEBUG(Result.Val->dump(&DAG));
|
|
|
|
DOUT << '\n';
|
2006-11-11 08:56:29 +08:00
|
|
|
std::vector<SDNode*> NowDead;
|
|
|
|
if (isLoad) {
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(0),
|
|
|
|
NowDead);
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Result.getValue(2),
|
|
|
|
NowDead);
|
|
|
|
} else {
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(1),
|
2006-11-11 08:39:41 +08:00
|
|
|
NowDead);
|
|
|
|
}
|
2006-11-11 08:56:29 +08:00
|
|
|
|
|
|
|
// Nodes can end up on the worklist more than once. Make sure we do
|
|
|
|
// not process a node that has been replaced.
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
// Finally, since the node is now dead, remove it from the graph.
|
|
|
|
DAG.DeleteNode(N);
|
|
|
|
|
|
|
|
// Replace the uses of Use with uses of the updated base value.
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDOperand(Op, 0),
|
|
|
|
Result.getValue(isLoad ? 1 : 0),
|
|
|
|
NowDead);
|
|
|
|
removeFromWorkList(Op);
|
|
|
|
for (unsigned i = 0, e = NowDead.size(); i != e; ++i)
|
|
|
|
removeFromWorkList(NowDead[i]);
|
|
|
|
DAG.DeleteNode(Op);
|
|
|
|
|
|
|
|
return true;
|
2006-11-11 08:39:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
SDOperand DAGCombiner::visitLOAD(SDNode *N) {
|
2006-10-10 04:57:25 +08:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(N);
|
|
|
|
SDOperand Chain = LD->getChain();
|
|
|
|
SDOperand Ptr = LD->getBasePtr();
|
2007-05-01 08:38:21 +08:00
|
|
|
|
|
|
|
// If load is not volatile and there are no uses of the loaded value (and
|
|
|
|
// the updated indexed value in case of indexed loads), change uses of the
|
|
|
|
// chain value into uses of the chain input (i.e. delete the dead load).
|
|
|
|
if (!LD->isVolatile()) {
|
2007-05-01 16:53:39 +08:00
|
|
|
if (N->getValueType(1) == MVT::Other) {
|
|
|
|
// Unindexed loads.
|
|
|
|
if (N->hasNUsesOfValue(0, 0))
|
|
|
|
return CombineTo(N, DAG.getNode(ISD::UNDEF, N->getValueType(0)), Chain);
|
|
|
|
} else {
|
|
|
|
// Indexed loads.
|
|
|
|
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
|
|
|
|
if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
|
|
|
|
SDOperand Undef0 = DAG.getNode(ISD::UNDEF, N->getValueType(0));
|
|
|
|
SDOperand Undef1 = DAG.getNode(ISD::UNDEF, N->getValueType(1));
|
|
|
|
SDOperand To[] = { Undef0, Undef1, Chain };
|
|
|
|
return CombineTo(N, To, 3);
|
2007-05-01 08:38:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
|
|
|
|
// If this load is directly stored, replace the load value with the stored
|
|
|
|
// value.
|
|
|
|
// TODO: Handle store large -> read small portion.
|
2006-10-12 01:47:52 +08:00
|
|
|
// TODO: Handle TRUNCSTORE/LOADEXT
|
|
|
|
if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
|
2006-10-14 05:14:26 +08:00
|
|
|
if (ISD::isNON_TRUNCStore(Chain.Val)) {
|
|
|
|
StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
|
|
|
|
if (PrevST->getBasePtr() == Ptr &&
|
|
|
|
PrevST->getValue().getValueType() == N->getValueType(0))
|
2006-10-12 01:47:52 +08:00
|
|
|
return CombineTo(N, Chain.getOperand(1), Chain);
|
2006-10-14 05:14:26 +08:00
|
|
|
}
|
2006-10-12 01:47:52 +08:00
|
|
|
}
|
2006-10-05 00:53:27 +08:00
|
|
|
|
2006-10-11 21:47:09 +08:00
|
|
|
if (CombinerAA) {
|
2006-09-26 00:29:54 +08:00
|
|
|
// Walk up chain skipping non-aliasing memory nodes.
|
|
|
|
SDOperand BetterChain = FindBetterChain(N, Chain);
|
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// If there is a better chain.
|
2006-09-26 00:29:54 +08:00
|
|
|
if (Chain != BetterChain) {
|
2006-10-12 01:47:52 +08:00
|
|
|
SDOperand ReplLoad;
|
|
|
|
|
2006-09-26 00:29:54 +08:00
|
|
|
// Replace the chain to void dependency.
|
2006-10-12 01:47:52 +08:00
|
|
|
if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
|
|
|
|
ReplLoad = DAG.getLoad(N->getValueType(0), BetterChain, Ptr,
|
2007-04-23 07:15:30 +08:00
|
|
|
LD->getSrcValue(), LD->getSrcValueOffset(),
|
|
|
|
LD->isVolatile(), LD->getAlignment());
|
2006-10-12 01:47:52 +08:00
|
|
|
} else {
|
|
|
|
ReplLoad = DAG.getExtLoad(LD->getExtensionType(),
|
|
|
|
LD->getValueType(0),
|
|
|
|
BetterChain, Ptr, LD->getSrcValue(),
|
|
|
|
LD->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LD->getLoadedVT(),
|
|
|
|
LD->isVolatile(),
|
|
|
|
LD->getAlignment());
|
2006-10-12 01:47:52 +08:00
|
|
|
}
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// Create token factor to keep old chain connected.
|
2006-09-26 03:32:58 +08:00
|
|
|
SDOperand Token = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
Chain, ReplLoad.getValue(1));
|
2006-10-05 00:53:27 +08:00
|
|
|
|
2006-10-14 07:32:28 +08:00
|
|
|
// Replace uses with load result and token factor. Don't add users
|
|
|
|
// to work list.
|
|
|
|
return CombineTo(N, ReplLoad.getValue(0), Token, false);
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-03 11:06:21 +08:00
|
|
|
// Try transforming N to an indexed load.
|
2006-11-07 17:03:05 +08:00
|
|
|
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
|
2006-11-03 11:06:21 +08:00
|
|
|
return SDOperand(N, 0);
|
|
|
|
|
Add support for CombineTo, allowing the dag combiner to replace nodes with
multiple results.
Use this support to implement trivial store->load forwarding, implementing
CodeGen/PowerPC/store-load-fwd.ll. Though this is the most simple case and
can be extended in the future, it is still useful. For example, it speeds
up 197.parser by 6.2% by avoiding an LSU reject in xalloc:
stw r6, lo16(l5_end_of_array)(r2)
addi r2, r5, -4
stwx r5, r4, r2
- lwzx r5, r4, r2
- rlwinm r5, r5, 0, 0, 30
stwx r5, r4, r2
lwz r2, -4(r4)
ori r2, r2, 1
llvm-svn: 23690
2005-10-11 06:04:48 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2005-10-11 06:31:19 +08:00
|
|
|
SDOperand DAGCombiner::visitSTORE(SDNode *N) {
|
2006-10-14 05:14:26 +08:00
|
|
|
StoreSDNode *ST = cast<StoreSDNode>(N);
|
|
|
|
SDOperand Chain = ST->getChain();
|
|
|
|
SDOperand Value = ST->getValue();
|
|
|
|
SDOperand Ptr = ST->getBasePtr();
|
2006-10-12 02:55:16 +08:00
|
|
|
|
2007-05-08 05:27:48 +08:00
|
|
|
// If this is a store of a bit convert, store the input value if the
|
2007-05-10 05:49:47 +08:00
|
|
|
// resultant store does not need a higher alignment than the original.
|
2007-05-17 06:45:30 +08:00
|
|
|
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
|
|
|
|
ST->getAddressingMode() == ISD::UNINDEXED) {
|
2007-05-08 05:27:48 +08:00
|
|
|
unsigned Align = ST->getAlignment();
|
|
|
|
MVT::ValueType SVT = Value.getOperand(0).getValueType();
|
|
|
|
unsigned OrigAlign = TLI.getTargetMachine().getTargetData()->
|
2007-05-19 02:41:29 +08:00
|
|
|
getABITypeAlignment(MVT::getTypeForValueType(SVT));
|
2007-05-08 05:36:06 +08:00
|
|
|
if (Align <= OrigAlign && TLI.isOperationLegal(ISD::STORE, SVT))
|
2007-05-08 05:27:48 +08:00
|
|
|
return DAG.getStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(),
|
|
|
|
ST->getSrcValueOffset());
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
|
|
|
|
2006-12-11 10:23:46 +08:00
|
|
|
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
|
|
|
|
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
|
2006-12-12 01:25:19 +08:00
|
|
|
if (Value.getOpcode() != ISD::TargetConstantFP) {
|
|
|
|
SDOperand Tmp;
|
2006-12-12 12:16:14 +08:00
|
|
|
switch (CFP->getValueType(0)) {
|
|
|
|
default: assert(0 && "Unknown FP type");
|
|
|
|
case MVT::f32:
|
|
|
|
if (!AfterLegalize || TLI.isTypeLegal(MVT::i32)) {
|
|
|
|
Tmp = DAG.getConstant(FloatToBits(CFP->getValue()), MVT::i32);
|
|
|
|
return DAG.getStore(Chain, Tmp, Ptr, ST->getSrcValue(),
|
|
|
|
ST->getSrcValueOffset());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MVT::f64:
|
|
|
|
if (!AfterLegalize || TLI.isTypeLegal(MVT::i64)) {
|
|
|
|
Tmp = DAG.getConstant(DoubleToBits(CFP->getValue()), MVT::i64);
|
|
|
|
return DAG.getStore(Chain, Tmp, Ptr, ST->getSrcValue(),
|
|
|
|
ST->getSrcValueOffset());
|
|
|
|
} else if (TLI.isTypeLegal(MVT::i32)) {
|
|
|
|
// Many FP stores are not make apparent until after legalize, e.g. for
|
|
|
|
// argument passing. Since this is so common, custom legalize the
|
|
|
|
// 64-bit integer store into two 32-bit stores.
|
|
|
|
uint64_t Val = DoubleToBits(CFP->getValue());
|
|
|
|
SDOperand Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
|
|
|
|
SDOperand Hi = DAG.getConstant(Val >> 32, MVT::i32);
|
|
|
|
if (!TLI.isLittleEndian()) std::swap(Lo, Hi);
|
|
|
|
|
|
|
|
SDOperand St0 = DAG.getStore(Chain, Lo, Ptr, ST->getSrcValue(),
|
|
|
|
ST->getSrcValueOffset());
|
|
|
|
Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
|
|
|
|
DAG.getConstant(4, Ptr.getValueType()));
|
|
|
|
SDOperand St1 = DAG.getStore(Chain, Hi, Ptr, ST->getSrcValue(),
|
|
|
|
ST->getSrcValueOffset()+4);
|
|
|
|
return DAG.getNode(ISD::TokenFactor, MVT::Other, St0, St1);
|
|
|
|
}
|
|
|
|
break;
|
2006-12-12 01:25:19 +08:00
|
|
|
}
|
2006-12-11 10:23:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-26 00:29:54 +08:00
|
|
|
if (CombinerAA) {
|
|
|
|
// Walk up chain skipping non-aliasing memory nodes.
|
|
|
|
SDOperand BetterChain = FindBetterChain(N, Chain);
|
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// If there is a better chain.
|
2006-09-26 00:29:54 +08:00
|
|
|
if (Chain != BetterChain) {
|
2006-10-05 00:53:27 +08:00
|
|
|
// Replace the chain to avoid dependency.
|
2006-10-14 20:14:27 +08:00
|
|
|
SDOperand ReplStore;
|
|
|
|
if (ST->isTruncatingStore()) {
|
|
|
|
ReplStore = DAG.getTruncStore(BetterChain, Value, Ptr,
|
|
|
|
ST->getSrcValue(),ST->getSrcValueOffset(), ST->getStoredVT());
|
|
|
|
} else {
|
|
|
|
ReplStore = DAG.getStore(BetterChain, Value, Ptr,
|
|
|
|
ST->getSrcValue(), ST->getSrcValueOffset());
|
|
|
|
}
|
|
|
|
|
2006-09-26 00:29:54 +08:00
|
|
|
// Create token to keep both nodes around.
|
2006-10-14 07:32:28 +08:00
|
|
|
SDOperand Token =
|
|
|
|
DAG.getNode(ISD::TokenFactor, MVT::Other, Chain, ReplStore);
|
|
|
|
|
|
|
|
// Don't add users to work list.
|
|
|
|
return CombineTo(N, Token, false);
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
2006-09-22 00:28:59 +08:00
|
|
|
}
|
Simplify store(bitconv(x)) to store(x). This allows us to compile this:
void bar(double Y, double *X) {
*X = Y;
}
to this:
bar:
save -96, %o6, %o6
st %i1, [%i2+4]
st %i0, [%i2]
restore %g0, %g0, %g0
retl
nop
instead of this:
bar:
save -104, %o6, %o6
st %i1, [%i6+-4]
st %i0, [%i6+-8]
ldd [%i6+-8], %f0
std %f0, [%i2]
restore %g0, %g0, %g0
retl
nop
on sparcv8.
llvm-svn: 24983
2005-12-23 13:48:07 +08:00
|
|
|
|
2006-11-05 17:31:14 +08:00
|
|
|
// Try transforming N to an indexed store.
|
2006-11-07 17:03:05 +08:00
|
|
|
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
|
2006-11-05 17:31:14 +08:00
|
|
|
return SDOperand(N, 0);
|
|
|
|
|
2005-10-11 06:31:19 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-03-19 09:27:56 +08:00
|
|
|
SDOperand DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
|
|
|
|
SDOperand InVec = N->getOperand(0);
|
|
|
|
SDOperand InVal = N->getOperand(1);
|
|
|
|
SDOperand EltNo = N->getOperand(2);
|
|
|
|
|
|
|
|
// If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
|
|
|
|
// vector with the inserted element.
|
|
|
|
if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
|
|
|
|
unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue();
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops(InVec.Val->op_begin(), InVec.Val->op_end());
|
2006-03-19 09:27:56 +08:00
|
|
|
if (Elt < Ops.size())
|
|
|
|
Ops[Elt] = InVal;
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, InVec.getValueType(),
|
|
|
|
&Ops[0], Ops.size());
|
2006-03-19 09:27:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand DAGCombiner::visitVINSERT_VECTOR_ELT(SDNode *N) {
|
|
|
|
SDOperand InVec = N->getOperand(0);
|
|
|
|
SDOperand InVal = N->getOperand(1);
|
|
|
|
SDOperand EltNo = N->getOperand(2);
|
|
|
|
SDOperand NumElts = N->getOperand(3);
|
|
|
|
SDOperand EltType = N->getOperand(4);
|
|
|
|
|
|
|
|
// If the invec is a VBUILD_VECTOR and if EltNo is a constant, build a new
|
|
|
|
// vector with the inserted element.
|
|
|
|
if (InVec.getOpcode() == ISD::VBUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
|
|
|
|
unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue();
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops(InVec.Val->op_begin(), InVec.Val->op_end());
|
2006-03-19 09:27:56 +08:00
|
|
|
if (Elt < Ops.size()-2)
|
|
|
|
Ops[Elt] = InVal;
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::VBUILD_VECTOR, InVec.getValueType(),
|
|
|
|
&Ops[0], Ops.size());
|
2006-03-19 09:27:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
SDOperand DAGCombiner::visitVBUILD_VECTOR(SDNode *N) {
|
|
|
|
unsigned NumInScalars = N->getNumOperands()-2;
|
|
|
|
SDOperand NumElts = N->getOperand(NumInScalars);
|
|
|
|
SDOperand EltType = N->getOperand(NumInScalars+1);
|
|
|
|
|
|
|
|
// Check to see if this is a VBUILD_VECTOR of a bunch of VEXTRACT_VECTOR_ELT
|
|
|
|
// operations. If so, and if the EXTRACT_ELT vector inputs come from at most
|
|
|
|
// two distinct vectors, turn this into a shuffle node.
|
|
|
|
SDOperand VecIn1, VecIn2;
|
|
|
|
for (unsigned i = 0; i != NumInScalars; ++i) {
|
|
|
|
// Ignore undef inputs.
|
|
|
|
if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
|
|
|
|
|
|
|
|
// If this input is something other than a VEXTRACT_VECTOR_ELT with a
|
|
|
|
// constant index, bail out.
|
|
|
|
if (N->getOperand(i).getOpcode() != ISD::VEXTRACT_VECTOR_ELT ||
|
|
|
|
!isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
|
|
|
|
VecIn1 = VecIn2 = SDOperand(0, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the input vector type disagrees with the result of the vbuild_vector,
|
|
|
|
// we can't make a shuffle.
|
|
|
|
SDOperand ExtractedFromVec = N->getOperand(i).getOperand(0);
|
|
|
|
if (*(ExtractedFromVec.Val->op_end()-2) != NumElts ||
|
|
|
|
*(ExtractedFromVec.Val->op_end()-1) != EltType) {
|
|
|
|
VecIn1 = VecIn2 = SDOperand(0, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, remember this. We allow up to two distinct input vectors.
|
|
|
|
if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (VecIn1.Val == 0) {
|
|
|
|
VecIn1 = ExtractedFromVec;
|
|
|
|
} else if (VecIn2.Val == 0) {
|
|
|
|
VecIn2 = ExtractedFromVec;
|
|
|
|
} else {
|
|
|
|
// Too many inputs.
|
|
|
|
VecIn1 = VecIn2 = SDOperand(0, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If everything is good, we can make a shuffle operation.
|
|
|
|
if (VecIn1.Val) {
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> BuildVecIndices;
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
for (unsigned i = 0; i != NumInScalars; ++i) {
|
|
|
|
if (N->getOperand(i).getOpcode() == ISD::UNDEF) {
|
2007-01-20 18:10:26 +08:00
|
|
|
BuildVecIndices.push_back(DAG.getNode(ISD::UNDEF, TLI.getPointerTy()));
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand Extract = N->getOperand(i);
|
|
|
|
|
|
|
|
// If extracting from the first vector, just use the index directly.
|
|
|
|
if (Extract.getOperand(0) == VecIn1) {
|
|
|
|
BuildVecIndices.push_back(Extract.getOperand(1));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use InIdx + VecSize
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(Extract.getOperand(1))->getValue();
|
2007-01-20 18:10:26 +08:00
|
|
|
BuildVecIndices.push_back(DAG.getConstant(Idx+NumInScalars,
|
|
|
|
TLI.getPointerTy()));
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add count and size info.
|
|
|
|
BuildVecIndices.push_back(NumElts);
|
2007-01-20 18:10:26 +08:00
|
|
|
BuildVecIndices.push_back(DAG.getValueType(TLI.getPointerTy()));
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
|
|
|
|
// Return the new VVECTOR_SHUFFLE node.
|
2006-08-08 10:23:42 +08:00
|
|
|
SDOperand Ops[5];
|
|
|
|
Ops[0] = VecIn1;
|
When building a VVECTOR_SHUFFLE node from extract_element operations, make
sure to build it as SHUFFLE(X, undef, mask), not SHUFFLE(X, X, mask).
The later is not canonical form, and prevents the PPC splat pattern from
matching. For a particular splat, we go from generating this:
li r10, lo16(LCPI1_0)
lis r11, ha16(LCPI1_0)
lvx v3, r11, r10
vperm v3, v2, v2, v3
to generating:
vspltw v3, v2, 3
llvm-svn: 27236
2006-03-29 06:19:47 +08:00
|
|
|
if (VecIn2.Val) {
|
2006-08-08 10:23:42 +08:00
|
|
|
Ops[1] = VecIn2;
|
When building a VVECTOR_SHUFFLE node from extract_element operations, make
sure to build it as SHUFFLE(X, undef, mask), not SHUFFLE(X, X, mask).
The later is not canonical form, and prevents the PPC splat pattern from
matching. For a particular splat, we go from generating this:
li r10, lo16(LCPI1_0)
lis r11, ha16(LCPI1_0)
lvx v3, r11, r10
vperm v3, v2, v2, v3
to generating:
vspltw v3, v2, 3
llvm-svn: 27236
2006-03-29 06:19:47 +08:00
|
|
|
} else {
|
2007-05-19 02:41:29 +08:00
|
|
|
// Use an undef vbuild_vector as input for the second operand.
|
When building a VVECTOR_SHUFFLE node from extract_element operations, make
sure to build it as SHUFFLE(X, undef, mask), not SHUFFLE(X, X, mask).
The later is not canonical form, and prevents the PPC splat pattern from
matching. For a particular splat, we go from generating this:
li r10, lo16(LCPI1_0)
lis r11, ha16(LCPI1_0)
lvx v3, r11, r10
vperm v3, v2, v2, v3
to generating:
vspltw v3, v2, 3
llvm-svn: 27236
2006-03-29 06:19:47 +08:00
|
|
|
std::vector<SDOperand> UnOps(NumInScalars,
|
|
|
|
DAG.getNode(ISD::UNDEF,
|
|
|
|
cast<VTSDNode>(EltType)->getVT()));
|
|
|
|
UnOps.push_back(NumElts);
|
|
|
|
UnOps.push_back(EltType);
|
2006-08-08 10:23:42 +08:00
|
|
|
Ops[1] = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
|
|
|
|
&UnOps[0], UnOps.size());
|
|
|
|
AddToWorkList(Ops[1].Val);
|
When building a VVECTOR_SHUFFLE node from extract_element operations, make
sure to build it as SHUFFLE(X, undef, mask), not SHUFFLE(X, X, mask).
The later is not canonical form, and prevents the PPC splat pattern from
matching. For a particular splat, we go from generating this:
li r10, lo16(LCPI1_0)
lis r11, ha16(LCPI1_0)
lvx v3, r11, r10
vperm v3, v2, v2, v3
to generating:
vspltw v3, v2, 3
llvm-svn: 27236
2006-03-29 06:19:47 +08:00
|
|
|
}
|
2006-08-08 10:23:42 +08:00
|
|
|
Ops[2] = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
|
|
|
|
&BuildVecIndices[0], BuildVecIndices.size());
|
|
|
|
Ops[3] = NumElts;
|
|
|
|
Ops[4] = EltType;
|
|
|
|
return DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, Ops, 5);
|
Turn a series of extract_element's feeding a build_vector into a
vector_shuffle node. For this:
void test(__m128 *res, __m128 *A, __m128 *B) {
*res = _mm_unpacklo_ps(*A, *B);
}
we now produce this code:
_test:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 12(%esp), %eax
unpcklps (%eax), %xmm0
movl 4(%esp), %eax
movaps %xmm0, (%eax)
ret
instead of this:
_test:
subl $76, %esp
movl 88(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, (%esp)
movaps %xmm0, 32(%esp)
movss 4(%esp), %xmm0
movss 32(%esp), %xmm1
unpcklps %xmm0, %xmm1
movl 84(%esp), %eax
movaps (%eax), %xmm0
movaps %xmm0, 16(%esp)
movaps %xmm0, 48(%esp)
movss 20(%esp), %xmm0
movss 48(%esp), %xmm2
unpcklps %xmm0, %xmm2
unpcklps %xmm1, %xmm2
movl 80(%esp), %eax
movaps %xmm2, (%eax)
addl $76, %esp
ret
GCC produces this (with -fomit-frame-pointer):
_test:
subl $12, %esp
movl 20(%esp), %eax
movaps (%eax), %xmm0
movl 24(%esp), %eax
unpcklps (%eax), %xmm0
movl 16(%esp), %eax
movaps %xmm0, (%eax)
addl $12, %esp
ret
llvm-svn: 27233
2006-03-29 04:28:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-03-29 06:11:53 +08:00
|
|
|
SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
2006-04-01 06:16:43 +08:00
|
|
|
SDOperand ShufMask = N->getOperand(2);
|
|
|
|
unsigned NumElts = ShufMask.getNumOperands();
|
|
|
|
|
|
|
|
// If the shuffle mask is an identity operation on the LHS, return the LHS.
|
|
|
|
bool isIdentity = true;
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() != i) {
|
|
|
|
isIdentity = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isIdentity) return N->getOperand(0);
|
|
|
|
|
|
|
|
// If the shuffle mask is an identity operation on the RHS, return the RHS.
|
|
|
|
isIdentity = true;
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() != i+NumElts) {
|
|
|
|
isIdentity = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isIdentity) return N->getOperand(1);
|
2006-07-21 06:44:41 +08:00
|
|
|
|
|
|
|
// Check if the shuffle is a unary shuffle, i.e. one of the vectors is not
|
|
|
|
// needed at all.
|
|
|
|
bool isUnary = true;
|
2006-07-21 16:25:53 +08:00
|
|
|
bool isSplat = true;
|
2006-07-21 06:44:41 +08:00
|
|
|
int VecNum = -1;
|
2006-07-26 04:44:41 +08:00
|
|
|
unsigned BaseIdx = 0;
|
2006-07-21 06:44:41 +08:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF) {
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue();
|
|
|
|
int V = (Idx < NumElts) ? 0 : 1;
|
2006-07-21 16:25:53 +08:00
|
|
|
if (VecNum == -1) {
|
2006-07-21 06:44:41 +08:00
|
|
|
VecNum = V;
|
2006-07-21 16:25:53 +08:00
|
|
|
BaseIdx = Idx;
|
|
|
|
} else {
|
|
|
|
if (BaseIdx != Idx)
|
|
|
|
isSplat = false;
|
|
|
|
if (VecNum != V) {
|
|
|
|
isUnary = false;
|
|
|
|
break;
|
|
|
|
}
|
2006-07-21 06:44:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
// Normalize unary shuffle so the RHS is undef.
|
|
|
|
if (isUnary && VecNum == 1)
|
|
|
|
std::swap(N0, N1);
|
|
|
|
|
2006-07-21 16:25:53 +08:00
|
|
|
// If it is a splat, check if the argument vector is a build_vector with
|
|
|
|
// all scalar elements the same.
|
|
|
|
if (isSplat) {
|
|
|
|
SDNode *V = N0.Val;
|
|
|
|
if (V->getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
V = V->getOperand(0).Val;
|
|
|
|
if (V->getOpcode() == ISD::BUILD_VECTOR) {
|
|
|
|
unsigned NumElems = V->getNumOperands()-2;
|
|
|
|
if (NumElems > BaseIdx) {
|
|
|
|
SDOperand Base;
|
|
|
|
bool AllSame = true;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
|
|
|
|
Base = V->getOperand(i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Splat of <u, u, u, u>, return <u, u, u, u>
|
|
|
|
if (!Base.Val)
|
|
|
|
return N0;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (V->getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
V->getOperand(i) != Base) {
|
|
|
|
AllSame = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Splat of <x, x, x, x>, return <x, x, x, x>
|
|
|
|
if (AllSame)
|
|
|
|
return N0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-21 06:44:41 +08:00
|
|
|
// If it is a unary or the LHS and the RHS are the same node, turn the RHS
|
|
|
|
// into an undef.
|
|
|
|
if (isUnary || N0 == N1) {
|
|
|
|
if (N0.getOpcode() == ISD::UNDEF)
|
2006-04-07 07:20:43 +08:00
|
|
|
return DAG.getNode(ISD::UNDEF, N->getValueType(0));
|
2006-03-29 06:11:53 +08:00
|
|
|
// Check the SHUFFLE mask, mapping any inputs from the 2nd operand into the
|
|
|
|
// first operand.
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> MappedOps;
|
2006-03-29 06:11:53 +08:00
|
|
|
for (unsigned i = 0, e = ShufMask.getNumOperands(); i != e; ++i) {
|
2006-04-07 07:20:43 +08:00
|
|
|
if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF ||
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() < NumElts) {
|
|
|
|
MappedOps.push_back(ShufMask.getOperand(i));
|
|
|
|
} else {
|
2006-03-29 06:11:53 +08:00
|
|
|
unsigned NewIdx =
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() - NumElts;
|
|
|
|
MappedOps.push_back(DAG.getConstant(NewIdx, MVT::i32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ShufMask = DAG.getNode(ISD::BUILD_VECTOR, ShufMask.getValueType(),
|
2006-08-08 10:23:42 +08:00
|
|
|
&MappedOps[0], MappedOps.size());
|
2006-04-08 12:15:24 +08:00
|
|
|
AddToWorkList(ShufMask.Val);
|
2006-03-29 06:11:53 +08:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, N->getValueType(0),
|
2006-07-21 06:44:41 +08:00
|
|
|
N0,
|
2006-03-29 06:11:53 +08:00
|
|
|
DAG.getNode(ISD::UNDEF, N->getValueType(0)),
|
|
|
|
ShufMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-04-01 06:16:43 +08:00
|
|
|
SDOperand DAGCombiner::visitVVECTOR_SHUFFLE(SDNode *N) {
|
|
|
|
SDOperand ShufMask = N->getOperand(2);
|
|
|
|
unsigned NumElts = ShufMask.getNumOperands()-2;
|
|
|
|
|
|
|
|
// If the shuffle mask is an identity operation on the LHS, return the LHS.
|
|
|
|
bool isIdentity = true;
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() != i) {
|
|
|
|
isIdentity = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isIdentity) return N->getOperand(0);
|
|
|
|
|
|
|
|
// If the shuffle mask is an identity operation on the RHS, return the RHS.
|
|
|
|
isIdentity = true;
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() != i+NumElts) {
|
|
|
|
isIdentity = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isIdentity) return N->getOperand(1);
|
|
|
|
|
2006-07-21 06:44:41 +08:00
|
|
|
// Check if the shuffle is a unary shuffle, i.e. one of the vectors is not
|
|
|
|
// needed at all.
|
|
|
|
bool isUnary = true;
|
2006-07-21 16:25:53 +08:00
|
|
|
bool isSplat = true;
|
2006-07-21 06:44:41 +08:00
|
|
|
int VecNum = -1;
|
2006-07-26 04:44:41 +08:00
|
|
|
unsigned BaseIdx = 0;
|
2006-07-21 06:44:41 +08:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() != ISD::UNDEF) {
|
|
|
|
unsigned Idx = cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue();
|
|
|
|
int V = (Idx < NumElts) ? 0 : 1;
|
2006-07-21 16:25:53 +08:00
|
|
|
if (VecNum == -1) {
|
2006-07-21 06:44:41 +08:00
|
|
|
VecNum = V;
|
2006-07-21 16:25:53 +08:00
|
|
|
BaseIdx = Idx;
|
|
|
|
} else {
|
|
|
|
if (BaseIdx != Idx)
|
|
|
|
isSplat = false;
|
|
|
|
if (VecNum != V) {
|
|
|
|
isUnary = false;
|
|
|
|
break;
|
|
|
|
}
|
2006-07-21 06:44:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SDOperand N0 = N->getOperand(0);
|
|
|
|
SDOperand N1 = N->getOperand(1);
|
|
|
|
// Normalize unary shuffle so the RHS is undef.
|
|
|
|
if (isUnary && VecNum == 1)
|
|
|
|
std::swap(N0, N1);
|
|
|
|
|
2006-07-21 16:25:53 +08:00
|
|
|
// If it is a splat, check if the argument vector is a build_vector with
|
|
|
|
// all scalar elements the same.
|
|
|
|
if (isSplat) {
|
|
|
|
SDNode *V = N0.Val;
|
Be careful when looking through a vbit_convert. Optimizing this:
(vector_shuffle
(vbitconvert (vbuildvector (copyfromreg v4f32), 1, v4f32), 4, f32),
(undef, undef, undef, undef), (0, 0, 0, 0), 4, f32)
to the
vbitconvert
is a very bad idea.
llvm-svn: 30989
2006-10-17 06:49:37 +08:00
|
|
|
|
|
|
|
// If this is a vbit convert that changes the element type of the vector but
|
|
|
|
// not the number of vector elements, look through it. Be careful not to
|
|
|
|
// look though conversions that change things like v4f32 to v2f64.
|
|
|
|
if (V->getOpcode() == ISD::VBIT_CONVERT) {
|
|
|
|
SDOperand ConvInput = V->getOperand(0);
|
2006-10-18 01:06:35 +08:00
|
|
|
if (ConvInput.getValueType() == MVT::Vector &&
|
|
|
|
NumElts ==
|
Be careful when looking through a vbit_convert. Optimizing this:
(vector_shuffle
(vbitconvert (vbuildvector (copyfromreg v4f32), 1, v4f32), 4, f32),
(undef, undef, undef, undef), (0, 0, 0, 0), 4, f32)
to the
vbitconvert
is a very bad idea.
llvm-svn: 30989
2006-10-17 06:49:37 +08:00
|
|
|
ConvInput.getConstantOperandVal(ConvInput.getNumOperands()-2))
|
|
|
|
V = ConvInput.Val;
|
|
|
|
}
|
|
|
|
|
2006-07-21 16:25:53 +08:00
|
|
|
if (V->getOpcode() == ISD::VBUILD_VECTOR) {
|
|
|
|
unsigned NumElems = V->getNumOperands()-2;
|
|
|
|
if (NumElems > BaseIdx) {
|
|
|
|
SDOperand Base;
|
|
|
|
bool AllSame = true;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
|
|
|
|
Base = V->getOperand(i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Splat of <u, u, u, u>, return <u, u, u, u>
|
|
|
|
if (!Base.Val)
|
|
|
|
return N0;
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
|
|
|
if (V->getOperand(i).getOpcode() != ISD::UNDEF &&
|
|
|
|
V->getOperand(i) != Base) {
|
|
|
|
AllSame = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Splat of <x, x, x, x>, return <x, x, x, x>
|
|
|
|
if (AllSame)
|
|
|
|
return N0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-21 06:44:41 +08:00
|
|
|
// If it is a unary or the LHS and the RHS are the same node, turn the RHS
|
|
|
|
// into an undef.
|
|
|
|
if (isUnary || N0 == N1) {
|
2006-04-08 13:34:25 +08:00
|
|
|
// Check the SHUFFLE mask, mapping any inputs from the 2nd operand into the
|
|
|
|
// first operand.
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> MappedOps;
|
2006-04-08 13:34:25 +08:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF ||
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() < NumElts) {
|
|
|
|
MappedOps.push_back(ShufMask.getOperand(i));
|
|
|
|
} else {
|
|
|
|
unsigned NewIdx =
|
|
|
|
cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() - NumElts;
|
|
|
|
MappedOps.push_back(DAG.getConstant(NewIdx, MVT::i32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add the type/#elts values.
|
|
|
|
MappedOps.push_back(ShufMask.getOperand(NumElts));
|
|
|
|
MappedOps.push_back(ShufMask.getOperand(NumElts+1));
|
|
|
|
|
|
|
|
ShufMask = DAG.getNode(ISD::VBUILD_VECTOR, ShufMask.getValueType(),
|
2006-08-08 10:23:42 +08:00
|
|
|
&MappedOps[0], MappedOps.size());
|
2006-04-08 13:34:25 +08:00
|
|
|
AddToWorkList(ShufMask.Val);
|
|
|
|
|
|
|
|
// Build the undef vector.
|
|
|
|
SDOperand UDVal = DAG.getNode(ISD::UNDEF, MappedOps[0].getValueType());
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
MappedOps[i] = UDVal;
|
2006-07-21 06:44:41 +08:00
|
|
|
MappedOps[NumElts ] = *(N0.Val->op_end()-2);
|
|
|
|
MappedOps[NumElts+1] = *(N0.Val->op_end()-1);
|
2006-08-08 10:23:42 +08:00
|
|
|
UDVal = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
|
|
|
|
&MappedOps[0], MappedOps.size());
|
2006-04-08 13:34:25 +08:00
|
|
|
|
|
|
|
return DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
|
2006-07-21 06:44:41 +08:00
|
|
|
N0, UDVal, ShufMask,
|
2006-04-08 13:34:25 +08:00
|
|
|
MappedOps[NumElts], MappedOps[NumElts+1]);
|
|
|
|
}
|
|
|
|
|
2006-04-01 06:16:43 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2006-04-20 16:56:16 +08:00
|
|
|
/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
|
|
|
|
/// a VAND to a vector_shuffle with the destination vector and a zero vector.
|
|
|
|
/// e.g. VAND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
|
|
|
|
/// vector_shuffle V, Zero, <0, 4, 2, 4>
|
|
|
|
SDOperand DAGCombiner::XformToShuffleWithZero(SDNode *N) {
|
|
|
|
SDOperand LHS = N->getOperand(0);
|
|
|
|
SDOperand RHS = N->getOperand(1);
|
|
|
|
if (N->getOpcode() == ISD::VAND) {
|
|
|
|
SDOperand DstVecSize = *(LHS.Val->op_end()-2);
|
|
|
|
SDOperand DstVecEVT = *(LHS.Val->op_end()-1);
|
|
|
|
if (RHS.getOpcode() == ISD::VBIT_CONVERT)
|
|
|
|
RHS = RHS.getOperand(0);
|
|
|
|
if (RHS.getOpcode() == ISD::VBUILD_VECTOR) {
|
|
|
|
std::vector<SDOperand> IdxOps;
|
|
|
|
unsigned NumOps = RHS.getNumOperands();
|
|
|
|
unsigned NumElts = NumOps-2;
|
|
|
|
MVT::ValueType EVT = cast<VTSDNode>(RHS.getOperand(NumOps-1))->getVT();
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
SDOperand Elt = RHS.getOperand(i);
|
|
|
|
if (!isa<ConstantSDNode>(Elt))
|
|
|
|
return SDOperand();
|
|
|
|
else if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
|
|
|
|
IdxOps.push_back(DAG.getConstant(i, EVT));
|
|
|
|
else if (cast<ConstantSDNode>(Elt)->isNullValue())
|
|
|
|
IdxOps.push_back(DAG.getConstant(NumElts, EVT));
|
|
|
|
else
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let's see if the target supports this vector_shuffle.
|
|
|
|
if (!TLI.isVectorClearMaskLegal(IdxOps, EVT, DAG))
|
|
|
|
return SDOperand();
|
|
|
|
|
|
|
|
// Return the new VVECTOR_SHUFFLE node.
|
|
|
|
SDOperand NumEltsNode = DAG.getConstant(NumElts, MVT::i32);
|
|
|
|
SDOperand EVTNode = DAG.getValueType(EVT);
|
|
|
|
std::vector<SDOperand> Ops;
|
2006-09-15 04:50:57 +08:00
|
|
|
LHS = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, LHS, NumEltsNode,
|
|
|
|
EVTNode);
|
2006-04-20 16:56:16 +08:00
|
|
|
Ops.push_back(LHS);
|
|
|
|
AddToWorkList(LHS.Val);
|
|
|
|
std::vector<SDOperand> ZeroOps(NumElts, DAG.getConstant(0, EVT));
|
|
|
|
ZeroOps.push_back(NumEltsNode);
|
|
|
|
ZeroOps.push_back(EVTNode);
|
2006-08-08 10:23:42 +08:00
|
|
|
Ops.push_back(DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
|
|
|
|
&ZeroOps[0], ZeroOps.size()));
|
2006-04-20 16:56:16 +08:00
|
|
|
IdxOps.push_back(NumEltsNode);
|
|
|
|
IdxOps.push_back(EVTNode);
|
2006-08-08 10:23:42 +08:00
|
|
|
Ops.push_back(DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
|
|
|
|
&IdxOps[0], IdxOps.size()));
|
2006-04-20 16:56:16 +08:00
|
|
|
Ops.push_back(NumEltsNode);
|
|
|
|
Ops.push_back(EVTNode);
|
2006-08-08 10:23:42 +08:00
|
|
|
SDOperand Result = DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
|
|
|
|
&Ops[0], Ops.size());
|
2006-04-20 16:56:16 +08:00
|
|
|
if (NumEltsNode != DstVecSize || EVTNode != DstVecEVT) {
|
|
|
|
Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
|
|
|
|
DstVecSize, DstVecEVT);
|
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
/// visitVBinOp - Visit a binary vector operation, like VADD. IntOp indicates
|
|
|
|
/// the scalar operation of the vop if it is operating on an integer vector
|
|
|
|
/// (e.g. ADD) and FPOp indicates the FP version (e.g. FADD).
|
|
|
|
SDOperand DAGCombiner::visitVBinOp(SDNode *N, ISD::NodeType IntOp,
|
|
|
|
ISD::NodeType FPOp) {
|
|
|
|
MVT::ValueType EltType = cast<VTSDNode>(*(N->op_end()-1))->getVT();
|
|
|
|
ISD::NodeType ScalarOp = MVT::isInteger(EltType) ? IntOp : FPOp;
|
|
|
|
SDOperand LHS = N->getOperand(0);
|
|
|
|
SDOperand RHS = N->getOperand(1);
|
2006-04-20 16:56:16 +08:00
|
|
|
SDOperand Shuffle = XformToShuffleWithZero(N);
|
|
|
|
if (Shuffle.Val) return Shuffle;
|
|
|
|
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
// If the LHS and RHS are VBUILD_VECTOR nodes, see if we can constant fold
|
|
|
|
// this operation.
|
|
|
|
if (LHS.getOpcode() == ISD::VBUILD_VECTOR &&
|
|
|
|
RHS.getOpcode() == ISD::VBUILD_VECTOR) {
|
2006-08-08 10:23:42 +08:00
|
|
|
SmallVector<SDOperand, 8> Ops;
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
for (unsigned i = 0, e = LHS.getNumOperands()-2; i != e; ++i) {
|
|
|
|
SDOperand LHSOp = LHS.getOperand(i);
|
|
|
|
SDOperand RHSOp = RHS.getOperand(i);
|
|
|
|
// If these two elements can't be folded, bail out.
|
|
|
|
if ((LHSOp.getOpcode() != ISD::UNDEF &&
|
|
|
|
LHSOp.getOpcode() != ISD::Constant &&
|
|
|
|
LHSOp.getOpcode() != ISD::ConstantFP) ||
|
|
|
|
(RHSOp.getOpcode() != ISD::UNDEF &&
|
|
|
|
RHSOp.getOpcode() != ISD::Constant &&
|
|
|
|
RHSOp.getOpcode() != ISD::ConstantFP))
|
|
|
|
break;
|
2006-05-31 14:08:35 +08:00
|
|
|
// Can't fold divide by zero.
|
|
|
|
if (N->getOpcode() == ISD::VSDIV || N->getOpcode() == ISD::VUDIV) {
|
|
|
|
if ((RHSOp.getOpcode() == ISD::Constant &&
|
|
|
|
cast<ConstantSDNode>(RHSOp.Val)->isNullValue()) ||
|
|
|
|
(RHSOp.getOpcode() == ISD::ConstantFP &&
|
|
|
|
!cast<ConstantFPSDNode>(RHSOp.Val)->getValue()))
|
|
|
|
break;
|
|
|
|
}
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
Ops.push_back(DAG.getNode(ScalarOp, EltType, LHSOp, RHSOp));
|
2006-04-08 12:15:24 +08:00
|
|
|
AddToWorkList(Ops.back().Val);
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
assert((Ops.back().getOpcode() == ISD::UNDEF ||
|
|
|
|
Ops.back().getOpcode() == ISD::Constant ||
|
|
|
|
Ops.back().getOpcode() == ISD::ConstantFP) &&
|
|
|
|
"Scalar binop didn't fold!");
|
|
|
|
}
|
2006-04-04 01:21:50 +08:00
|
|
|
|
|
|
|
if (Ops.size() == LHS.getNumOperands()-2) {
|
|
|
|
Ops.push_back(*(LHS.Val->op_end()-2));
|
|
|
|
Ops.push_back(*(LHS.Val->op_end()-1));
|
2006-08-08 10:23:42 +08:00
|
|
|
return DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
|
2006-04-04 01:21:50 +08:00
|
|
|
}
|
Constant fold all of the vector binops. This allows us to compile this:
"vector unsigned char mergeLowHigh = (vector unsigned char)
( 8, 9, 10, 11, 16, 17, 18, 19, 12, 13, 14, 15, 20, 21, 22, 23 );
vector unsigned char mergeHighLow = vec_xor( mergeLowHigh, vec_splat_u8(8));"
aka:
void %test2(<16 x sbyte>* %P) {
store <16 x sbyte> cast (<4 x int> xor (<4 x int> cast (<16 x ubyte> < ubyte 8, ubyte 9, ubyte 10, ubyte 11, ubyte 16, ubyte 17, ubyte 18, ubyte 19, ubyte 12, ubyte 13, ubyte 14, ubyte 15, ubyte 20, ubyte 21, ubyte 22, ubyte 23 > to <4 x int>), <4 x int> cast (<16 x sbyte> < sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8, sbyte 8 > to <4 x int>)) to <16 x sbyte>), <16 x sbyte> * %P
ret void
}
into this:
_test2:
mfspr r2, 256
oris r4, r2, 32768
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
lvx v0, r5, r4
stvx v0, 0, r3
mtspr 256, r2
blr
instead of this:
_test2:
mfspr r2, 256
oris r4, r2, 49152
mtspr 256, r4
li r4, lo16(LCPI2_0)
lis r5, ha16(LCPI2_0)
vspltisb v0, 8
lvx v1, r5, r4
vxor v0, v1, v0
stvx v0, 0, r3
mtspr 256, r2
blr
... which occurs here:
http://developer.apple.com/hardware/ve/calcspeed.html
llvm-svn: 27343
2006-04-02 11:25:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand DAGCombiner::SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2){
|
2005-10-08 08:29:44 +08:00
|
|
|
assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");
|
|
|
|
|
|
|
|
SDOperand SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), N1, N2,
|
|
|
|
cast<CondCodeSDNode>(N0.getOperand(2))->get());
|
|
|
|
// If we got a simplified select_cc node back from SimplifySelectCC, then
|
|
|
|
// break it down into a new SETCC node, and a new SELECT node, and then return
|
|
|
|
// the SELECT node, since we were called with a SELECT node.
|
|
|
|
if (SCC.Val) {
|
|
|
|
// Check to see if we got a select_cc back (to turn into setcc/select).
|
|
|
|
// Otherwise, just return whatever node we got back, like fabs.
|
|
|
|
if (SCC.getOpcode() == ISD::SELECT_CC) {
|
|
|
|
SDOperand SETCC = DAG.getNode(ISD::SETCC, N0.getValueType(),
|
|
|
|
SCC.getOperand(0), SCC.getOperand(1),
|
|
|
|
SCC.getOperand(4));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(SETCC.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
return DAG.getNode(ISD::SELECT, SCC.getValueType(), SCC.getOperand(2),
|
|
|
|
SCC.getOperand(3), SETCC);
|
|
|
|
}
|
|
|
|
return SCC;
|
|
|
|
}
|
2005-09-20 06:34:01 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS
|
|
|
|
/// are the two values being selected between, see if we can simplify the
|
2006-05-27 08:43:02 +08:00
|
|
|
/// select. Callers of this should assume that TheSelect is deleted if this
|
|
|
|
/// returns true. As such, they should return the appropriate thing (e.g. the
|
|
|
|
/// node) back to the top-level of the DAG combiner loop to avoid it being
|
|
|
|
/// looked at.
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
///
|
|
|
|
bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDOperand LHS,
|
|
|
|
SDOperand RHS) {
|
|
|
|
|
|
|
|
// If this is a select from two identical things, try to pull the operation
|
|
|
|
// through the select.
|
|
|
|
if (LHS.getOpcode() == RHS.getOpcode() && LHS.hasOneUse() && RHS.hasOneUse()){
|
|
|
|
// If this is a load and the token chain is identical, replace the select
|
|
|
|
// of two loads with a load through a select of the address to load from.
|
|
|
|
// This triggers in things like "select bool X, 10.0, 123.0" after the FP
|
|
|
|
// constants have been dropped into the constant pool.
|
2006-10-10 04:57:25 +08:00
|
|
|
if (LHS.getOpcode() == ISD::LOAD &&
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
// Token chains must be identical.
|
2006-10-10 04:57:25 +08:00
|
|
|
LHS.getOperand(0) == RHS.getOperand(0)) {
|
|
|
|
LoadSDNode *LLD = cast<LoadSDNode>(LHS);
|
|
|
|
LoadSDNode *RLD = cast<LoadSDNode>(RHS);
|
|
|
|
|
|
|
|
// If this is an EXTLOAD, the VT's must match.
|
2006-10-11 15:10:22 +08:00
|
|
|
if (LLD->getLoadedVT() == RLD->getLoadedVT()) {
|
2006-10-10 04:57:25 +08:00
|
|
|
// FIXME: this conflates two src values, discarding one. This is not
|
|
|
|
// the right thing to do, but nothing uses srcvalues now. When they do,
|
|
|
|
// turn SrcValue into a list of locations.
|
|
|
|
SDOperand Addr;
|
2007-01-16 13:59:59 +08:00
|
|
|
if (TheSelect->getOpcode() == ISD::SELECT) {
|
|
|
|
// Check that the condition doesn't reach either load. If so, folding
|
|
|
|
// this will induce a cycle into the DAG.
|
|
|
|
if (!LLD->isPredecessor(TheSelect->getOperand(0).Val) &&
|
|
|
|
!RLD->isPredecessor(TheSelect->getOperand(0).Val)) {
|
|
|
|
Addr = DAG.getNode(ISD::SELECT, LLD->getBasePtr().getValueType(),
|
|
|
|
TheSelect->getOperand(0), LLD->getBasePtr(),
|
|
|
|
RLD->getBasePtr());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Check that the condition doesn't reach either load. If so, folding
|
|
|
|
// this will induce a cycle into the DAG.
|
|
|
|
if (!LLD->isPredecessor(TheSelect->getOperand(0).Val) &&
|
|
|
|
!RLD->isPredecessor(TheSelect->getOperand(0).Val) &&
|
|
|
|
!LLD->isPredecessor(TheSelect->getOperand(1).Val) &&
|
|
|
|
!RLD->isPredecessor(TheSelect->getOperand(1).Val)) {
|
|
|
|
Addr = DAG.getNode(ISD::SELECT_CC, LLD->getBasePtr().getValueType(),
|
2006-10-10 04:57:25 +08:00
|
|
|
TheSelect->getOperand(0),
|
|
|
|
TheSelect->getOperand(1),
|
|
|
|
LLD->getBasePtr(), RLD->getBasePtr(),
|
|
|
|
TheSelect->getOperand(4));
|
2007-01-16 13:59:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Addr.Val) {
|
|
|
|
SDOperand Load;
|
|
|
|
if (LLD->getExtensionType() == ISD::NON_EXTLOAD)
|
|
|
|
Load = DAG.getLoad(TheSelect->getValueType(0), LLD->getChain(),
|
|
|
|
Addr,LLD->getSrcValue(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LLD->getSrcValueOffset(),
|
|
|
|
LLD->isVolatile(),
|
|
|
|
LLD->getAlignment());
|
2007-01-16 13:59:59 +08:00
|
|
|
else {
|
|
|
|
Load = DAG.getExtLoad(LLD->getExtensionType(),
|
|
|
|
TheSelect->getValueType(0),
|
|
|
|
LLD->getChain(), Addr, LLD->getSrcValue(),
|
|
|
|
LLD->getSrcValueOffset(),
|
2007-04-23 07:15:30 +08:00
|
|
|
LLD->getLoadedVT(),
|
|
|
|
LLD->isVolatile(),
|
|
|
|
LLD->getAlignment());
|
2007-01-16 13:59:59 +08:00
|
|
|
}
|
|
|
|
// Users of the select now use the result of the load.
|
|
|
|
CombineTo(TheSelect, Load);
|
|
|
|
|
|
|
|
// Users of the old loads now use the new load's chain. We know the
|
|
|
|
// old-load value is dead now.
|
|
|
|
CombineTo(LHS.Val, Load.getValue(0), Load.getValue(1));
|
|
|
|
CombineTo(RHS.Val, Load.getValue(0), Load.getValue(1));
|
|
|
|
return true;
|
2006-10-10 04:57:25 +08:00
|
|
|
}
|
|
|
|
}
|
Fold (select C, load A, load B) -> load (select C, A, B). This happens quite
a lot throughout many programs. In particular, specfp triggers it a bunch for
constant FP nodes when you have code like cond ? 1.0 : -1.0.
If the PPC ISel exposed the loads implicit in pic references to external globals,
we would be able to eliminate a load in cases like this as well:
%X = external global int
%Y = external global int
int* %test4(bool %C) {
%G = select bool %C, int* %X, int* %Y
ret int* %G
}
Note that this breaks things that use SrcValue's (see the fixme), but since nothing
uses them yet, this is ok.
Also, simplify some code to use hasOneUse() on an SDOperand instead of hasNUsesOfValue directly.
llvm-svn: 23781
2005-10-18 14:04:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1,
|
|
|
|
SDOperand N2, SDOperand N3,
|
2007-04-11 14:50:51 +08:00
|
|
|
ISD::CondCode CC, bool NotExtCompare) {
|
2005-10-08 08:29:44 +08:00
|
|
|
|
|
|
|
MVT::ValueType VT = N2.getValueType();
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
|
|
|
|
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val);
|
|
|
|
ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.Val);
|
|
|
|
|
|
|
|
// Determine if the condition we're dealing with is constant
|
|
|
|
SDOperand SCC = SimplifySetCC(TLI.getSetCCResultTy(), N0, N1, CC, false);
|
2006-10-14 11:52:46 +08:00
|
|
|
if (SCC.Val) AddToWorkList(SCC.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.Val);
|
|
|
|
|
|
|
|
// fold select_cc true, x, y -> x
|
|
|
|
if (SCCC && SCCC->getValue())
|
|
|
|
return N2;
|
|
|
|
// fold select_cc false, x, y -> y
|
|
|
|
if (SCCC && SCCC->getValue() == 0)
|
|
|
|
return N3;
|
|
|
|
|
|
|
|
// Check to see if we can simplify the select into an fabs node
|
|
|
|
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
|
|
|
|
// Allow either -0.0 or 0.0
|
|
|
|
if (CFP->getValue() == 0.0) {
|
|
|
|
// select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
|
|
|
|
if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
|
|
|
|
N0 == N2 && N3.getOpcode() == ISD::FNEG &&
|
|
|
|
N2 == N3.getOperand(0))
|
|
|
|
return DAG.getNode(ISD::FABS, VT, N0);
|
|
|
|
|
|
|
|
// select (setl[te] X, +/-0.0), fneg(X), X -> fabs
|
|
|
|
if ((CC == ISD::SETLT || CC == ISD::SETLE) &&
|
|
|
|
N0 == N3 && N2.getOpcode() == ISD::FNEG &&
|
|
|
|
N2.getOperand(0) == N3)
|
|
|
|
return DAG.getNode(ISD::FABS, VT, N3);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if we can perform the "gzip trick", transforming
|
|
|
|
// select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
|
Compile:
int test3(int a, int b) { return (a < 0) ? a : 0; }
to:
_test3:
srawi r2, r3, 31
and r3, r2, r3
blr
instead of:
_test3:
cmpwi cr0, r3, 1
li r2, 0
blt cr0, LBB2_2 ;entry
LBB2_1: ;entry
mr r3, r2
LBB2_2: ;entry
blr
This implements: PowerPC/select_lt0.ll:seli32_a_a
llvm-svn: 30517
2006-09-20 14:41:35 +08:00
|
|
|
if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
|
2005-10-08 08:29:44 +08:00
|
|
|
MVT::isInteger(N0.getValueType()) &&
|
Compile:
int test3(int a, int b) { return (a < 0) ? a : 0; }
to:
_test3:
srawi r2, r3, 31
and r3, r2, r3
blr
instead of:
_test3:
cmpwi cr0, r3, 1
li r2, 0
blt cr0, LBB2_2 ;entry
LBB2_1: ;entry
mr r3, r2
LBB2_2: ;entry
blr
This implements: PowerPC/select_lt0.ll:seli32_a_a
llvm-svn: 30517
2006-09-20 14:41:35 +08:00
|
|
|
MVT::isInteger(N2.getValueType()) &&
|
|
|
|
(N1C->isNullValue() || // (a < 0) ? b : 0
|
|
|
|
(N1C->getValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0
|
2005-10-08 08:29:44 +08:00
|
|
|
MVT::ValueType XType = N0.getValueType();
|
|
|
|
MVT::ValueType AType = N2.getValueType();
|
|
|
|
if (XType >= AType) {
|
|
|
|
// and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
|
2005-10-11 05:26:48 +08:00
|
|
|
// single-bit constant.
|
2005-10-08 08:29:44 +08:00
|
|
|
if (N2C && ((N2C->getValue() & (N2C->getValue()-1)) == 0)) {
|
|
|
|
unsigned ShCtV = Log2_64(N2C->getValue());
|
|
|
|
ShCtV = MVT::getSizeInBits(XType)-ShCtV-1;
|
|
|
|
SDOperand ShCt = DAG.getConstant(ShCtV, TLI.getShiftAmountTy());
|
|
|
|
SDOperand Shift = DAG.getNode(ISD::SRL, XType, N0, ShCt);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Shift.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
if (XType > AType) {
|
|
|
|
Shift = DAG.getNode(ISD::TRUNCATE, AType, Shift);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Shift.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::AND, AType, Shift, N2);
|
|
|
|
}
|
|
|
|
SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(XType)-1,
|
|
|
|
TLI.getShiftAmountTy()));
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Shift.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
if (XType > AType) {
|
|
|
|
Shift = DAG.getNode(ISD::TRUNCATE, AType, Shift);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Shift.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::AND, AType, Shift, N2);
|
|
|
|
}
|
|
|
|
}
|
2005-10-11 05:26:48 +08:00
|
|
|
|
|
|
|
// fold select C, 16, 0 -> shl C, 4
|
|
|
|
if (N2C && N3C && N3C->isNullValue() && isPowerOf2_64(N2C->getValue()) &&
|
|
|
|
TLI.getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) {
|
2007-04-11 14:50:51 +08:00
|
|
|
|
|
|
|
// If the caller doesn't want us to simplify this into a zext of a compare,
|
|
|
|
// don't do it.
|
|
|
|
if (NotExtCompare && N2C->getValue() == 1)
|
|
|
|
return SDOperand();
|
|
|
|
|
2005-10-11 05:26:48 +08:00
|
|
|
// Get a SetCC of the condition
|
|
|
|
// FIXME: Should probably make sure that setcc is legal if we ever have a
|
|
|
|
// target where it isn't.
|
2006-02-18 10:40:58 +08:00
|
|
|
SDOperand Temp, SCC;
|
2005-10-11 05:26:48 +08:00
|
|
|
// cast from setcc result type to select result type
|
2006-02-18 10:40:58 +08:00
|
|
|
if (AfterLegalize) {
|
|
|
|
SCC = DAG.getSetCC(TLI.getSetCCResultTy(), N0, N1, CC);
|
2006-12-08 06:36:47 +08:00
|
|
|
if (N2.getValueType() < SCC.getValueType())
|
|
|
|
Temp = DAG.getZeroExtendInReg(SCC, N2.getValueType());
|
|
|
|
else
|
|
|
|
Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getValueType(), SCC);
|
2006-02-18 10:40:58 +08:00
|
|
|
} else {
|
|
|
|
SCC = DAG.getSetCC(MVT::i1, N0, N1, CC);
|
2005-10-11 05:26:48 +08:00
|
|
|
Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getValueType(), SCC);
|
2006-02-18 10:40:58 +08:00
|
|
|
}
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(SCC.Val);
|
|
|
|
AddToWorkList(Temp.Val);
|
2007-04-11 14:43:25 +08:00
|
|
|
|
|
|
|
if (N2C->getValue() == 1)
|
|
|
|
return Temp;
|
2005-10-11 05:26:48 +08:00
|
|
|
// shl setcc result by log2 n2c
|
|
|
|
return DAG.getNode(ISD::SHL, N2.getValueType(), Temp,
|
|
|
|
DAG.getConstant(Log2_64(N2C->getValue()),
|
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
}
|
|
|
|
|
2005-10-08 08:29:44 +08:00
|
|
|
// Check to see if this is the equivalent of setcc
|
|
|
|
// FIXME: Turn all of these into setcc if setcc if setcc is legal
|
|
|
|
// otherwise, go ahead with the folds.
|
|
|
|
if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getValue() == 1ULL)) {
|
|
|
|
MVT::ValueType XType = N0.getValueType();
|
|
|
|
if (TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultTy())) {
|
|
|
|
SDOperand Res = DAG.getSetCC(TLI.getSetCCResultTy(), N0, N1, CC);
|
|
|
|
if (Res.getValueType() != VT)
|
|
|
|
Res = DAG.getNode(ISD::ZERO_EXTEND, VT, Res);
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// seteq X, 0 -> srl (ctlz X, log2(size(X)))
|
|
|
|
if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
|
|
|
|
TLI.isOperationLegal(ISD::CTLZ, XType)) {
|
|
|
|
SDOperand Ctlz = DAG.getNode(ISD::CTLZ, XType, N0);
|
|
|
|
return DAG.getNode(ISD::SRL, XType, Ctlz,
|
|
|
|
DAG.getConstant(Log2_32(MVT::getSizeInBits(XType)),
|
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
}
|
|
|
|
// setgt X, 0 -> srl (and (-X, ~X), size(X)-1)
|
|
|
|
if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
|
|
|
|
SDOperand NegN0 = DAG.getNode(ISD::SUB, XType, DAG.getConstant(0, XType),
|
|
|
|
N0);
|
|
|
|
SDOperand NotN0 = DAG.getNode(ISD::XOR, XType, N0,
|
|
|
|
DAG.getConstant(~0ULL, XType));
|
|
|
|
return DAG.getNode(ISD::SRL, XType,
|
|
|
|
DAG.getNode(ISD::AND, XType, NegN0, NotN0),
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(XType)-1,
|
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
}
|
|
|
|
// setgt X, -1 -> xor (srl (X, size(X)-1), 1)
|
|
|
|
if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
|
|
|
|
SDOperand Sign = DAG.getNode(ISD::SRL, XType, N0,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(XType)-1,
|
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
return DAG.getNode(ISD::XOR, XType, Sign, DAG.getConstant(1, XType));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
|
|
|
|
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
|
|
|
|
if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
|
Codegen integer abs more efficiently using the trick from the PPC CWG. This
improves codegen on many architectures. Tests committed as CodeGen/*/iabs.ll
X86 Old: X86 New:
_test: _test:
movl 4(%esp), %ecx movl 4(%esp), %eax
movl %ecx, %eax movl %eax, %ecx
negl %eax sarl $31, %ecx
testl %ecx, %ecx addl %ecx, %eax
cmovns %ecx, %eax xorl %ecx, %eax
ret ret
PPC Old: PPC New:
_test: _test:
cmpwi cr0, r3, -1 srawi r2, r3, 31
neg r2, r3 add r3, r3, r2
bgt cr0, LBB1_2 ; xor r3, r3, r2
LBB1_1: ; blr
mr r3, r2
LBB1_2: ;
blr
ARM Old: ARM New:
_test: _test:
rsb r3, r0, #0 add r3, r0, r0, asr #31
cmp r0, #0 eor r0, r3, r0, asr #31
movge r3, r0 bx lr
mov r0, r3
bx lr
Thumb Old: Thumb New:
_test: _test:
neg r2, r0 asr r2, r0, #31
cmp r0, #0 add r0, r0, r2
bge LBB1_2 eor r0, r2
LBB1_1: @ bx lr
cpy r0, r2
LBB1_2: @
bx lr
Sparc Old: Sparc New:
test: test:
save -96, %o6, %o6 save -96, %o6, %o6
sethi 0, %l0 sra %i0, 31, %l0
sub %l0, %i0, %l0 add %i0, %l0, %l1
subcc %i0, -1, %l1 xor %l1, %l0, %i0
bg .BB1_2 restore %g0, %g0, %g0
nop retl
.BB1_1: nop
or %g0, %l0, %i0
.BB1_2:
restore %g0, %g0, %g0
retl
nop
It also helps alpha/ia64 :)
llvm-svn: 35881
2007-04-11 13:11:38 +08:00
|
|
|
N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
|
|
|
|
N2.getOperand(0) == N1 && MVT::isInteger(N0.getValueType())) {
|
|
|
|
MVT::ValueType XType = N0.getValueType();
|
|
|
|
SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(XType)-1,
|
|
|
|
TLI.getShiftAmountTy()));
|
|
|
|
SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift);
|
|
|
|
AddToWorkList(Shift.Val);
|
|
|
|
AddToWorkList(Add.Val);
|
|
|
|
return DAG.getNode(ISD::XOR, XType, Add, Shift);
|
|
|
|
}
|
|
|
|
// Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
|
|
|
|
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
|
|
|
|
if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
|
|
|
|
N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
|
|
|
|
if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
|
2005-10-08 08:29:44 +08:00
|
|
|
MVT::ValueType XType = N0.getValueType();
|
|
|
|
if (SubC->isNullValue() && MVT::isInteger(XType)) {
|
|
|
|
SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
|
|
|
|
DAG.getConstant(MVT::getSizeInBits(XType)-1,
|
Codegen integer abs more efficiently using the trick from the PPC CWG. This
improves codegen on many architectures. Tests committed as CodeGen/*/iabs.ll
X86 Old: X86 New:
_test: _test:
movl 4(%esp), %ecx movl 4(%esp), %eax
movl %ecx, %eax movl %eax, %ecx
negl %eax sarl $31, %ecx
testl %ecx, %ecx addl %ecx, %eax
cmovns %ecx, %eax xorl %ecx, %eax
ret ret
PPC Old: PPC New:
_test: _test:
cmpwi cr0, r3, -1 srawi r2, r3, 31
neg r2, r3 add r3, r3, r2
bgt cr0, LBB1_2 ; xor r3, r3, r2
LBB1_1: ; blr
mr r3, r2
LBB1_2: ;
blr
ARM Old: ARM New:
_test: _test:
rsb r3, r0, #0 add r3, r0, r0, asr #31
cmp r0, #0 eor r0, r3, r0, asr #31
movge r3, r0 bx lr
mov r0, r3
bx lr
Thumb Old: Thumb New:
_test: _test:
neg r2, r0 asr r2, r0, #31
cmp r0, #0 add r0, r0, r2
bge LBB1_2 eor r0, r2
LBB1_1: @ bx lr
cpy r0, r2
LBB1_2: @
bx lr
Sparc Old: Sparc New:
test: test:
save -96, %o6, %o6 save -96, %o6, %o6
sethi 0, %l0 sra %i0, 31, %l0
sub %l0, %i0, %l0 add %i0, %l0, %l1
subcc %i0, -1, %l1 xor %l1, %l0, %i0
bg .BB1_2 restore %g0, %g0, %g0
nop retl
.BB1_1: nop
or %g0, %l0, %i0
.BB1_2:
restore %g0, %g0, %g0
retl
nop
It also helps alpha/ia64 :)
llvm-svn: 35881
2007-04-11 13:11:38 +08:00
|
|
|
TLI.getShiftAmountTy()));
|
2005-10-08 08:29:44 +08:00
|
|
|
SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift);
|
2006-03-01 12:03:14 +08:00
|
|
|
AddToWorkList(Shift.Val);
|
|
|
|
AddToWorkList(Add.Val);
|
2005-10-08 08:29:44 +08:00
|
|
|
return DAG.getNode(ISD::XOR, XType, Add, Shift);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Codegen integer abs more efficiently using the trick from the PPC CWG. This
improves codegen on many architectures. Tests committed as CodeGen/*/iabs.ll
X86 Old: X86 New:
_test: _test:
movl 4(%esp), %ecx movl 4(%esp), %eax
movl %ecx, %eax movl %eax, %ecx
negl %eax sarl $31, %ecx
testl %ecx, %ecx addl %ecx, %eax
cmovns %ecx, %eax xorl %ecx, %eax
ret ret
PPC Old: PPC New:
_test: _test:
cmpwi cr0, r3, -1 srawi r2, r3, 31
neg r2, r3 add r3, r3, r2
bgt cr0, LBB1_2 ; xor r3, r3, r2
LBB1_1: ; blr
mr r3, r2
LBB1_2: ;
blr
ARM Old: ARM New:
_test: _test:
rsb r3, r0, #0 add r3, r0, r0, asr #31
cmp r0, #0 eor r0, r3, r0, asr #31
movge r3, r0 bx lr
mov r0, r3
bx lr
Thumb Old: Thumb New:
_test: _test:
neg r2, r0 asr r2, r0, #31
cmp r0, #0 add r0, r0, r2
bge LBB1_2 eor r0, r2
LBB1_1: @ bx lr
cpy r0, r2
LBB1_2: @
bx lr
Sparc Old: Sparc New:
test: test:
save -96, %o6, %o6 save -96, %o6, %o6
sethi 0, %l0 sra %i0, 31, %l0
sub %l0, %i0, %l0 add %i0, %l0, %l1
subcc %i0, -1, %l1 xor %l1, %l0, %i0
bg .BB1_2 restore %g0, %g0, %g0
nop retl
.BB1_1: nop
or %g0, %l0, %i0
.BB1_2:
restore %g0, %g0, %g0
retl
nop
It also helps alpha/ia64 :)
llvm-svn: 35881
2007-04-11 13:11:38 +08:00
|
|
|
|
2005-09-20 06:34:01 +08:00
|
|
|
return SDOperand();
|
|
|
|
}
|
|
|
|
|
2007-02-09 06:13:59 +08:00
|
|
|
/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
|
2005-09-16 08:54:12 +08:00
|
|
|
SDOperand DAGCombiner::SimplifySetCC(MVT::ValueType VT, SDOperand N0,
|
2005-10-06 05:43:42 +08:00
|
|
|
SDOperand N1, ISD::CondCode Cond,
|
|
|
|
bool foldBooleans) {
|
2007-02-09 06:13:59 +08:00
|
|
|
TargetLowering::DAGCombinerInfo
|
|
|
|
DagCombineInfo(DAG, !AfterLegalize, false, this);
|
|
|
|
return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo);
|
2005-09-16 08:54:12 +08:00
|
|
|
}
|
|
|
|
|
2005-10-20 10:15:44 +08:00
|
|
|
/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant,
|
|
|
|
/// return a DAG expression to select that will generate the same value by
|
|
|
|
/// multiplying by a magic number. See:
|
|
|
|
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
|
|
|
|
SDOperand DAGCombiner::BuildSDIV(SDNode *N) {
|
2006-06-13 00:07:18 +08:00
|
|
|
std::vector<SDNode*> Built;
|
2006-05-17 01:42:15 +08:00
|
|
|
SDOperand S = TLI.BuildSDIV(N, DAG, &Built);
|
|
|
|
|
2006-06-13 00:07:18 +08:00
|
|
|
for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
|
2006-05-17 01:42:15 +08:00
|
|
|
ii != ee; ++ii)
|
|
|
|
AddToWorkList(*ii);
|
|
|
|
return S;
|
2005-10-20 10:15:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
|
|
|
|
/// return a DAG expression to select that will generate the same value by
|
|
|
|
/// multiplying by a magic number. See:
|
|
|
|
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
|
|
|
|
SDOperand DAGCombiner::BuildUDIV(SDNode *N) {
|
2006-06-13 00:07:18 +08:00
|
|
|
std::vector<SDNode*> Built;
|
2006-05-17 01:42:15 +08:00
|
|
|
SDOperand S = TLI.BuildUDIV(N, DAG, &Built);
|
|
|
|
|
2006-06-13 00:07:18 +08:00
|
|
|
for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
|
2006-05-17 01:42:15 +08:00
|
|
|
ii != ee; ++ii)
|
|
|
|
AddToWorkList(*ii);
|
|
|
|
return S;
|
2005-10-20 10:15:44 +08:00
|
|
|
}
|
|
|
|
|
2006-10-08 07:37:56 +08:00
|
|
|
/// FindBaseOffset - Return true if base is known not to alias with anything
|
|
|
|
/// but itself. Provides base object and offset as results.
|
|
|
|
static bool FindBaseOffset(SDOperand Ptr, SDOperand &Base, int64_t &Offset) {
|
|
|
|
// Assume it is a primitive operation.
|
|
|
|
Base = Ptr; Offset = 0;
|
|
|
|
|
|
|
|
// If it's an adding a simple constant then integrate the offset.
|
|
|
|
if (Base.getOpcode() == ISD::ADD) {
|
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) {
|
|
|
|
Base = Base.getOperand(0);
|
|
|
|
Offset += C->getValue();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's any of the following then it can't alias with anything but itself.
|
|
|
|
return isa<FrameIndexSDNode>(Base) ||
|
|
|
|
isa<ConstantPoolSDNode>(Base) ||
|
|
|
|
isa<GlobalAddressSDNode>(Base);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isAlias - Return true if there is any possibility that the two addresses
|
|
|
|
/// overlap.
|
2006-10-18 20:29:57 +08:00
|
|
|
bool DAGCombiner::isAlias(SDOperand Ptr1, int64_t Size1,
|
|
|
|
const Value *SrcValue1, int SrcValueOffset1,
|
|
|
|
SDOperand Ptr2, int64_t Size2,
|
|
|
|
const Value *SrcValue2, int SrcValueOffset2)
|
|
|
|
{
|
2006-10-08 07:37:56 +08:00
|
|
|
// If they are the same then they must be aliases.
|
|
|
|
if (Ptr1 == Ptr2) return true;
|
|
|
|
|
|
|
|
// Gather base node and offset information.
|
|
|
|
SDOperand Base1, Base2;
|
|
|
|
int64_t Offset1, Offset2;
|
|
|
|
bool KnownBase1 = FindBaseOffset(Ptr1, Base1, Offset1);
|
|
|
|
bool KnownBase2 = FindBaseOffset(Ptr2, Base2, Offset2);
|
|
|
|
|
|
|
|
// If they have a same base address then...
|
|
|
|
if (Base1 == Base2) {
|
|
|
|
// Check to see if the addresses overlap.
|
|
|
|
return!((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
|
|
|
|
}
|
|
|
|
|
2006-10-18 20:29:57 +08:00
|
|
|
// If we know both bases then they can't alias.
|
|
|
|
if (KnownBase1 && KnownBase2) return false;
|
|
|
|
|
2006-10-19 03:08:31 +08:00
|
|
|
if (CombinerGlobalAA) {
|
|
|
|
// Use alias analysis information.
|
|
|
|
int Overlap1 = Size1 + SrcValueOffset1 + Offset1;
|
|
|
|
int Overlap2 = Size2 + SrcValueOffset2 + Offset2;
|
|
|
|
AliasAnalysis::AliasResult AAResult =
|
2006-10-18 20:29:57 +08:00
|
|
|
AA.alias(SrcValue1, Overlap1, SrcValue2, Overlap2);
|
2006-10-19 03:08:31 +08:00
|
|
|
if (AAResult == AliasAnalysis::NoAlias)
|
|
|
|
return false;
|
|
|
|
}
|
2006-10-18 20:29:57 +08:00
|
|
|
|
|
|
|
// Otherwise we have to assume they alias.
|
|
|
|
return true;
|
2006-10-08 07:37:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// FindAliasInfo - Extracts the relevant alias information from the memory
|
|
|
|
/// node. Returns true if the operand was a load.
|
2006-10-11 21:47:09 +08:00
|
|
|
bool DAGCombiner::FindAliasInfo(SDNode *N,
|
2006-10-18 20:29:57 +08:00
|
|
|
SDOperand &Ptr, int64_t &Size,
|
|
|
|
const Value *&SrcValue, int &SrcValueOffset) {
|
2006-10-11 21:47:09 +08:00
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
|
|
|
Ptr = LD->getBasePtr();
|
2006-10-12 01:47:52 +08:00
|
|
|
Size = MVT::getSizeInBits(LD->getLoadedVT()) >> 3;
|
2006-10-11 21:47:09 +08:00
|
|
|
SrcValue = LD->getSrcValue();
|
2006-10-18 20:29:57 +08:00
|
|
|
SrcValueOffset = LD->getSrcValueOffset();
|
2006-10-08 07:37:56 +08:00
|
|
|
return true;
|
2006-10-11 21:47:09 +08:00
|
|
|
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
|
|
|
Ptr = ST->getBasePtr();
|
2006-10-14 05:14:26 +08:00
|
|
|
Size = MVT::getSizeInBits(ST->getStoredVT()) >> 3;
|
2006-10-11 21:47:09 +08:00
|
|
|
SrcValue = ST->getSrcValue();
|
2006-10-18 20:29:57 +08:00
|
|
|
SrcValueOffset = ST->getSrcValueOffset();
|
2006-10-11 21:47:09 +08:00
|
|
|
} else {
|
2006-10-08 07:37:56 +08:00
|
|
|
assert(0 && "FindAliasInfo expected a memory operand");
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
|
|
|
|
/// looking for aliasing nodes and adding them to the Aliases vector.
|
2006-10-05 23:07:25 +08:00
|
|
|
void DAGCombiner::GatherAllAliases(SDNode *N, SDOperand OriginalChain,
|
2006-10-05 00:53:27 +08:00
|
|
|
SmallVector<SDOperand, 8> &Aliases) {
|
2006-10-05 23:07:25 +08:00
|
|
|
SmallVector<SDOperand, 8> Chains; // List of chains to visit.
|
2006-10-05 00:53:27 +08:00
|
|
|
std::set<SDNode *> Visited; // Visited node set.
|
|
|
|
|
2006-09-26 00:29:54 +08:00
|
|
|
// Get alias information for node.
|
|
|
|
SDOperand Ptr;
|
|
|
|
int64_t Size;
|
2006-10-11 21:47:09 +08:00
|
|
|
const Value *SrcValue;
|
2006-10-18 20:29:57 +08:00
|
|
|
int SrcValueOffset;
|
|
|
|
bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset);
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
// Starting off.
|
2006-10-05 23:07:25 +08:00
|
|
|
Chains.push_back(OriginalChain);
|
|
|
|
|
|
|
|
// Look at each chain and determine if it is an alias. If so, add it to the
|
|
|
|
// aliases list. If not, then continue up the chain looking for the next
|
|
|
|
// candidate.
|
|
|
|
while (!Chains.empty()) {
|
|
|
|
SDOperand Chain = Chains.back();
|
|
|
|
Chains.pop_back();
|
2006-10-05 00:53:27 +08:00
|
|
|
|
2006-10-05 23:07:25 +08:00
|
|
|
// Don't bother if we've been before.
|
|
|
|
if (Visited.find(Chain.Val) != Visited.end()) continue;
|
|
|
|
Visited.insert(Chain.Val);
|
|
|
|
|
|
|
|
switch (Chain.getOpcode()) {
|
|
|
|
case ISD::EntryToken:
|
|
|
|
// Entry token is ideal chain operand, but handled in FindBetterChain.
|
|
|
|
break;
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 23:07:25 +08:00
|
|
|
case ISD::LOAD:
|
|
|
|
case ISD::STORE: {
|
|
|
|
// Get alias information for Chain.
|
|
|
|
SDOperand OpPtr;
|
|
|
|
int64_t OpSize;
|
2006-10-11 21:47:09 +08:00
|
|
|
const Value *OpSrcValue;
|
2006-10-18 20:29:57 +08:00
|
|
|
int OpSrcValueOffset;
|
|
|
|
bool IsOpLoad = FindAliasInfo(Chain.Val, OpPtr, OpSize,
|
|
|
|
OpSrcValue, OpSrcValueOffset);
|
2006-10-05 23:07:25 +08:00
|
|
|
|
|
|
|
// If chain is alias then stop here.
|
|
|
|
if (!(IsLoad && IsOpLoad) &&
|
2006-10-18 20:29:57 +08:00
|
|
|
isAlias(Ptr, Size, SrcValue, SrcValueOffset,
|
|
|
|
OpPtr, OpSize, OpSrcValue, OpSrcValueOffset)) {
|
2006-10-05 23:07:25 +08:00
|
|
|
Aliases.push_back(Chain);
|
|
|
|
} else {
|
|
|
|
// Look further up the chain.
|
|
|
|
Chains.push_back(Chain.getOperand(0));
|
|
|
|
// Clean up old chain.
|
|
|
|
AddToWorkList(Chain.Val);
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
2006-10-05 23:07:25 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ISD::TokenFactor:
|
|
|
|
// We have to check each of the operands of the token factor, so we queue
|
|
|
|
// then up. Adding the operands to the queue (stack) in reverse order
|
|
|
|
// maintains the original order and increases the likelihood that getNode
|
|
|
|
// will find a matching token factor (CSE.)
|
|
|
|
for (unsigned n = Chain.getNumOperands(); n;)
|
|
|
|
Chains.push_back(Chain.getOperand(--n));
|
|
|
|
// Eliminate the token factor if we can.
|
|
|
|
AddToWorkList(Chain.Val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
// For all other instructions we will just have to take what we can get.
|
|
|
|
Aliases.push_back(Chain);
|
|
|
|
break;
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
|
|
|
}
|
2006-10-05 00:53:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking
|
|
|
|
/// for a better chain (aliasing node.)
|
|
|
|
SDOperand DAGCombiner::FindBetterChain(SDNode *N, SDOperand OldChain) {
|
|
|
|
SmallVector<SDOperand, 8> Aliases; // Ops for replacing token factor.
|
|
|
|
|
|
|
|
// Accumulate all the aliases to this node.
|
|
|
|
GatherAllAliases(N, OldChain, Aliases);
|
|
|
|
|
|
|
|
if (Aliases.size() == 0) {
|
|
|
|
// If no operands then chain to entry token.
|
|
|
|
return DAG.getEntryNode();
|
|
|
|
} else if (Aliases.size() == 1) {
|
|
|
|
// If a single operand then chain to it. We don't need to revisit it.
|
|
|
|
return Aliases[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a custom tailored token factor.
|
|
|
|
SDOperand NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&Aliases[0], Aliases.size());
|
|
|
|
|
|
|
|
// Make sure the old chain gets cleaned up.
|
|
|
|
if (NewChain != OldChain) AddToWorkList(OldChain.Val);
|
2006-09-26 00:29:54 +08:00
|
|
|
|
2006-10-05 00:53:27 +08:00
|
|
|
return NewChain;
|
2006-09-26 00:29:54 +08:00
|
|
|
}
|
|
|
|
|
2005-09-01 08:19:25 +08:00
|
|
|
// SelectionDAG::Combine - This is the entry point for the file.
|
|
|
|
//
|
2006-10-17 04:52:31 +08:00
|
|
|
void SelectionDAG::Combine(bool RunningAfterLegalize, AliasAnalysis &AA) {
|
2007-01-16 12:55:25 +08:00
|
|
|
if (!RunningAfterLegalize && ViewDAGCombine1)
|
|
|
|
viewGraph();
|
|
|
|
if (RunningAfterLegalize && ViewDAGCombine2)
|
|
|
|
viewGraph();
|
2005-09-01 08:19:25 +08:00
|
|
|
/// run - This is the main entry point to this class.
|
|
|
|
///
|
2006-10-17 04:52:31 +08:00
|
|
|
DAGCombiner(*this, AA).Run(RunningAfterLegalize);
|
2005-09-01 08:19:25 +08:00
|
|
|
}
|