Semantically revert r236031, which is not a good idea for in-order targets.

At the least it should be guarded by some kind of target hook.
It also introduced catastrophic compile time and code quality
regressions on some out of tree targets (test case still being
reduced/sanitized).

Sanjay agreed with reverting this patch until these issues can be
resolved.

llvm-svn: 236199
This commit is contained in:
Owen Anderson 2015-04-30 04:06:32 +00:00
parent 66beaa9349
commit d8a029c81b
2 changed files with 0 additions and 73 deletions

View File

@ -7647,33 +7647,6 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
return SDValue();
}
static SDValue ReassociateBinops(SDNode *N, SelectionDAG &DAG) {
assert(N->getNumOperands() == 2 && "Invalid node for binop reassociation");
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
unsigned Opcode = N->getOpcode();
// Canonicalize chains of this operation to LHS to allow the following fold.
if (N0.getOpcode() != Opcode && N1.getOpcode() == Opcode)
return DAG.getNode(Opcode, DL, VT, N1, N0);
// Convert a chain of 3 dependent operations into 2 independent operations
// and 1 dependent operation:
// (op N0: (op N00: (op z, w), N01: y), N1: x) ->
// (op N00: (op z, w), (op N1: x, N01: y))
if (N0.getOpcode() == Opcode && N0.hasOneUse() && N1.getOpcode() != Opcode) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == Opcode) {
SDValue N01 = N0.getOperand(1);
SDValue NewOp = DAG.getNode(Opcode, DL, VT, N1, N01);
return DAG.getNode(Opcode, DL, VT, N00, NewOp);
}
}
return SDValue();
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@ -7808,10 +7781,6 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N0.getOperand(0), DAG.getConstantFP(4.0, DL, VT));
}
}
if (SDValue Reassociated = ReassociateBinops(N, DAG))
return Reassociated;
} // enable-unsafe-fp-math
// FADD -> FMA combines:

View File

@ -114,45 +114,3 @@ define float @test11(float %a) {
ret float %t2
}
; Verify that the first two adds are independent; the destination registers
; are used as source registers for the third add.
define float @reassociate_adds1(float %a, float %b, float %c, float %d) {
; CHECK-LABEL: reassociate_adds1:
; CHECK: # BB#0:
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%add0 = fadd float %a, %b
%add1 = fadd float %add0, %c
%add2 = fadd float %add1, %d
ret float %add2
}
define float @reassociate_adds2(float %a, float %b, float %c, float %d) {
; CHECK-LABEL: reassociate_adds2:
; CHECK: # BB#0:
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%add0 = fadd float %a, %b
%add1 = fadd float %c, %add0
%add2 = fadd float %add1, %d
ret float %add2
}
define float @reassociate_adds3(float %a, float %b, float %c, float %d) {
; CHECK-LABEL: reassociate_adds3:
; CHECK: # BB#0:
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%add0 = fadd float %a, %b
%add1 = fadd float %add0, %c
%add2 = fadd float %d, %add1
ret float %add2
}