2018-07-12 04:25:49 +08:00
|
|
|
; RUN: llc -mtriple=x86_64-pc-linux -x86-cmov-converter=true -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap %s
|
2017-07-17 21:58:20 +08:00
|
|
|
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
;; This test checks that x86-cmov-converter optimization transform CMOV
|
|
|
|
;; instruction into branches when it is profitable.
|
2017-08-13 21:59:24 +08:00
|
|
|
;; There are 5 cases below:
|
2017-07-17 21:58:20 +08:00
|
|
|
;; 1. CmovInCriticalPath:
|
|
|
|
;; CMOV depends on the condition and it is in the hot path.
|
|
|
|
;; Thus, it worths transforming.
|
|
|
|
;;
|
|
|
|
;; 2. CmovNotInCriticalPath:
|
2017-08-08 20:17:56 +08:00
|
|
|
;; Similar test like in (1), just that CMOV is not in the hot path.
|
2017-07-17 21:58:20 +08:00
|
|
|
;; Thus, it does not worth transforming.
|
|
|
|
;;
|
|
|
|
;; 3. MaxIndex:
|
|
|
|
;; Maximum calculation algorithm that is looking for the max index,
|
|
|
|
;; calculating CMOV value is cheaper than calculating CMOV condition.
|
|
|
|
;; Thus, it worths transforming.
|
|
|
|
;;
|
|
|
|
;; 4. MaxValue:
|
|
|
|
;; Maximum calculation algorithm that is looking for the max value,
|
|
|
|
;; calculating CMOV value is not cheaper than calculating CMOV condition.
|
|
|
|
;; Thus, it does not worth transforming.
|
|
|
|
;;
|
|
|
|
;; 5. BinarySearch:
|
|
|
|
;; Usually, binary search CMOV is not predicted.
|
|
|
|
;; Thus, it does not worth transforming.
|
|
|
|
;;
|
|
|
|
;; Test was created using the following command line:
|
|
|
|
;; > clang -S -O2 -m64 -fno-vectorize -fno-unroll-loops -emit-llvm foo.c -o -
|
|
|
|
;; Where foo.c is:
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
;;void CmovInHotPath(int n, int a, int b, int *c, int *d) {
|
|
|
|
;; for (int i = 0; i < n; i++) {
|
2017-08-08 20:17:56 +08:00
|
|
|
;; int t = c[i] + 1;
|
2017-07-17 21:58:20 +08:00
|
|
|
;; if (c[i] * a > b)
|
|
|
|
;; t = 10;
|
2017-08-08 20:17:56 +08:00
|
|
|
;; c[i] = (c[i] + 1) * t;
|
2017-07-17 21:58:20 +08:00
|
|
|
;; }
|
|
|
|
;;}
|
|
|
|
;;
|
|
|
|
;;
|
|
|
|
;;void CmovNotInHotPath(int n, int a, int b, int *c, int *d) {
|
|
|
|
;; for (int i = 0; i < n; i++) {
|
|
|
|
;; int t = c[i];
|
|
|
|
;; if (c[i] * a > b)
|
|
|
|
;; t = 10;
|
|
|
|
;; c[i] = t;
|
|
|
|
;; d[i] /= b;
|
|
|
|
;; }
|
|
|
|
;;}
|
|
|
|
;;
|
|
|
|
;;
|
|
|
|
;;int MaxIndex(int n, int *a) {
|
|
|
|
;; int t = 0;
|
|
|
|
;; for (int i = 1; i < n; i++) {
|
|
|
|
;; if (a[i] > a[t])
|
|
|
|
;; t = i;
|
|
|
|
;; }
|
|
|
|
;; return a[t];
|
|
|
|
;;}
|
|
|
|
;;
|
|
|
|
;;
|
|
|
|
;;int MaxValue(int n, int *a) {
|
|
|
|
;; int t = a[0];
|
|
|
|
;; for (int i = 1; i < n; i++) {
|
|
|
|
;; if (a[i] > t)
|
|
|
|
;; t = a[i];
|
|
|
|
;; }
|
|
|
|
;; return t;
|
|
|
|
;;}
|
|
|
|
;;
|
|
|
|
;;typedef struct Node Node;
|
|
|
|
;;struct Node {
|
|
|
|
;; unsigned Val;
|
|
|
|
;; Node *Right;
|
|
|
|
;; Node *Left;
|
|
|
|
;;};
|
|
|
|
;;
|
|
|
|
;;unsigned BinarySearch(unsigned Mask, Node *Curr, Node *Next) {
|
|
|
|
;; while (Curr->Val > Next->Val) {
|
|
|
|
;; Curr = Next;
|
|
|
|
;; if (Mask & (0x1 << Curr->Val))
|
|
|
|
;; Next = Curr->Right;
|
|
|
|
;; else
|
|
|
|
;; Next = Curr->Left;
|
|
|
|
;; }
|
|
|
|
;; return Curr->Val;
|
|
|
|
;;}
|
2017-08-08 20:17:56 +08:00
|
|
|
;;
|
|
|
|
;;
|
|
|
|
;;void SmallGainPerLoop(int n, int a, int b, int *c, int *d) {
|
|
|
|
;; for (int i = 0; i < n; i++) {
|
|
|
|
;; int t = c[i];
|
|
|
|
;; if (c[i] * a > b)
|
|
|
|
;; t = 10;
|
|
|
|
;; c[i] = t;
|
|
|
|
;; }
|
|
|
|
;;}
|
2017-07-17 21:58:20 +08:00
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
|
|
|
|
%struct.Node = type { i32, %struct.Node*, %struct.Node* }
|
|
|
|
|
|
|
|
; CHECK-LABEL: CmovInHotPath
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: jg
|
|
|
|
|
|
|
|
define void @CmovInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture readnone %d) #0 {
|
|
|
|
entry:
|
|
|
|
%cmp14 = icmp sgt i32 %n, 0
|
|
|
|
br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
|
|
%wide.trip.count = zext i32 %n to i64
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
|
|
|
|
%arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
|
|
|
|
%0 = load i32, i32* %arrayidx, align 4
|
2017-08-08 20:17:56 +08:00
|
|
|
%add = add nsw i32 %0, 1
|
2017-07-17 21:58:20 +08:00
|
|
|
%mul = mul nsw i32 %0, %a
|
|
|
|
%cmp3 = icmp sgt i32 %mul, %b
|
2017-08-08 20:17:56 +08:00
|
|
|
%. = select i1 %cmp3, i32 10, i32 %add
|
|
|
|
%mul7 = mul nsw i32 %., %add
|
|
|
|
store i32 %mul7, i32* %arrayidx, align 4
|
2017-07-17 21:58:20 +08:00
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
|
|
|
|
br i1 %exitcond, label %for.cond.cleanup, label %for.body
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: CmovNotInHotPath
|
|
|
|
; CHECK: cmovg
|
|
|
|
|
|
|
|
define void @CmovNotInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture %d) #0 {
|
|
|
|
entry:
|
|
|
|
%cmp18 = icmp sgt i32 %n, 0
|
|
|
|
br i1 %cmp18, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
|
|
%wide.trip.count = zext i32 %n to i64
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
|
|
|
|
%arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
|
|
|
|
%0 = load i32, i32* %arrayidx, align 4
|
|
|
|
%mul = mul nsw i32 %0, %a
|
|
|
|
%cmp3 = icmp sgt i32 %mul, %b
|
|
|
|
%. = select i1 %cmp3, i32 10, i32 %0
|
|
|
|
store i32 %., i32* %arrayidx, align 4
|
|
|
|
%arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
|
|
|
|
%1 = load i32, i32* %arrayidx7, align 4
|
|
|
|
%div = sdiv i32 %1, %b
|
|
|
|
store i32 %div, i32* %arrayidx7, align 4
|
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
|
|
|
|
br i1 %exitcond, label %for.cond.cleanup, label %for.body
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: MaxIndex
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: jg
|
|
|
|
|
|
|
|
define i32 @MaxIndex(i32 %n, i32* nocapture readonly %a) #0 {
|
|
|
|
entry:
|
|
|
|
%cmp14 = icmp sgt i32 %n, 1
|
|
|
|
br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
|
|
%wide.trip.count = zext i32 %n to i64
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.cond.cleanup.loopexit: ; preds = %for.body
|
|
|
|
%phitmp = sext i32 %i.0.t.0 to i64
|
|
|
|
br label %for.cond.cleanup
|
|
|
|
|
|
|
|
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
|
|
|
|
%t.0.lcssa = phi i64 [ 0, %entry ], [ %phitmp, %for.cond.cleanup.loopexit ]
|
|
|
|
%arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %t.0.lcssa
|
|
|
|
%0 = load i32, i32* %arrayidx5, align 4
|
|
|
|
ret i32 %0
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
|
|
|
|
%t.015 = phi i32 [ %i.0.t.0, %for.body ], [ 0, %for.body.preheader ]
|
|
|
|
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
|
|
|
|
%1 = load i32, i32* %arrayidx, align 4
|
|
|
|
%idxprom1 = sext i32 %t.015 to i64
|
|
|
|
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
|
|
|
|
%2 = load i32, i32* %arrayidx2, align 4
|
|
|
|
%cmp3 = icmp sgt i32 %1, %2
|
|
|
|
%3 = trunc i64 %indvars.iv to i32
|
|
|
|
%i.0.t.0 = select i1 %cmp3, i32 %3, i32 %t.015
|
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
|
|
|
|
br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: MaxValue
|
|
|
|
; CHECK-NOT: jg
|
|
|
|
; CHECK: cmovg
|
|
|
|
|
|
|
|
define i32 @MaxValue(i32 %n, i32* nocapture readonly %a) #0 {
|
|
|
|
entry:
|
|
|
|
%0 = load i32, i32* %a, align 4
|
|
|
|
%cmp13 = icmp sgt i32 %n, 1
|
|
|
|
br i1 %cmp13, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
|
|
%wide.trip.count = zext i32 %n to i64
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
|
|
%t.0.lcssa = phi i32 [ %0, %entry ], [ %.t.0, %for.body ]
|
|
|
|
ret i32 %t.0.lcssa
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
|
|
|
|
%t.014 = phi i32 [ %.t.0, %for.body ], [ %0, %for.body.preheader ]
|
|
|
|
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
|
|
|
|
%1 = load i32, i32* %arrayidx1, align 4
|
|
|
|
%cmp2 = icmp sgt i32 %1, %t.014
|
|
|
|
%.t.0 = select i1 %cmp2, i32 %1, i32 %t.014
|
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
|
|
|
|
br i1 %exitcond, label %for.cond.cleanup, label %for.body
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: BinarySearch
|
2018-01-26 14:26:56 +08:00
|
|
|
; CHECK: set
|
2017-07-17 21:58:20 +08:00
|
|
|
|
|
|
|
define i32 @BinarySearch(i32 %Mask, %struct.Node* nocapture readonly %Curr, %struct.Node* nocapture readonly %Next) #0 {
|
|
|
|
entry:
|
|
|
|
%Val8 = getelementptr inbounds %struct.Node, %struct.Node* %Curr, i64 0, i32 0
|
|
|
|
%0 = load i32, i32* %Val8, align 8
|
|
|
|
%Val19 = getelementptr inbounds %struct.Node, %struct.Node* %Next, i64 0, i32 0
|
|
|
|
%1 = load i32, i32* %Val19, align 8
|
|
|
|
%cmp10 = icmp ugt i32 %0, %1
|
|
|
|
br i1 %cmp10, label %while.body, label %while.end
|
|
|
|
|
|
|
|
while.body: ; preds = %entry, %while.body
|
|
|
|
%2 = phi i32 [ %4, %while.body ], [ %1, %entry ]
|
|
|
|
%Next.addr.011 = phi %struct.Node* [ %3, %while.body ], [ %Next, %entry ]
|
|
|
|
%shl = shl i32 1, %2
|
|
|
|
%and = and i32 %shl, %Mask
|
|
|
|
%tobool = icmp eq i32 %and, 0
|
|
|
|
%Left = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 2
|
|
|
|
%Right = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 1
|
|
|
|
%Left.sink = select i1 %tobool, %struct.Node** %Left, %struct.Node** %Right
|
|
|
|
%3 = load %struct.Node*, %struct.Node** %Left.sink, align 8
|
|
|
|
%Val1 = getelementptr inbounds %struct.Node, %struct.Node* %3, i64 0, i32 0
|
|
|
|
%4 = load i32, i32* %Val1, align 8
|
|
|
|
%cmp = icmp ugt i32 %2, %4
|
|
|
|
br i1 %cmp, label %while.body, label %while.end
|
|
|
|
|
|
|
|
while.end: ; preds = %while.body, %entry
|
|
|
|
%.lcssa = phi i32 [ %0, %entry ], [ %2, %while.body ]
|
|
|
|
ret i32 %.lcssa
|
|
|
|
}
|
|
|
|
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
;; The following test checks that x86-cmov-converter optimization transforms
|
|
|
|
;; CMOV instructions into branch correctly.
|
|
|
|
;;
|
|
|
|
;; MBB:
|
|
|
|
;; cond = cmp ...
|
|
|
|
;; v1 = CMOVgt t1, f1, cond
|
|
|
|
;; v2 = CMOVle s1, f2, cond
|
|
|
|
;;
|
|
|
|
;; Where: t1 = 11, f1 = 22, f2 = a
|
|
|
|
;;
|
|
|
|
;; After CMOV transformation
|
|
|
|
;; -------------------------
|
|
|
|
;; MBB:
|
|
|
|
;; cond = cmp ...
|
|
|
|
;; ja %SinkMBB
|
|
|
|
;;
|
|
|
|
;; FalseMBB:
|
|
|
|
;; jmp %SinkMBB
|
|
|
|
;;
|
|
|
|
;; SinkMBB:
|
|
|
|
;; %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
|
|
|
|
;; %v2 = phi[%f1, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
|
|
|
|
;; ; true-value with false-value
|
|
|
|
;; ; Phi instruction cannot use
|
|
|
|
;; ; previous Phi instruction result
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
|
|
|
|
; CHECK-LABEL: Transform
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: divl [[a:%[0-9a-z]*]]
|
|
|
|
; CHECK: movl $11, [[s1:%[0-9a-z]*]]
|
|
|
|
; CHECK: movl [[a]], [[s2:%[0-9a-z]*]]
|
2017-08-30 12:34:48 +08:00
|
|
|
; CHECK: cmpl [[a]], %edx
|
2017-07-17 21:58:20 +08:00
|
|
|
; CHECK: ja [[SinkBB:.*]]
|
|
|
|
; CHECK: [[FalseBB:.*]]:
|
|
|
|
; CHECK: movl $22, [[s1]]
|
|
|
|
; CHECK: movl $22, [[s2]]
|
|
|
|
; CHECK: [[SinkBB]]:
|
|
|
|
; CHECK: ja
|
|
|
|
|
|
|
|
define void @Transform(i32 *%arr, i32 *%arr2, i32 %a, i32 %b, i32 %c, i32 %n) #0 {
|
|
|
|
entry:
|
|
|
|
%cmp10 = icmp ugt i32 0, %n
|
|
|
|
br i1 %cmp10, label %while.body, label %while.end
|
|
|
|
|
|
|
|
while.body: ; preds = %entry, %while.body
|
|
|
|
%i = phi i32 [ %i_inc, %while.body ], [ 0, %entry ]
|
|
|
|
%arr_i = getelementptr inbounds i32, i32* %arr, i32 %i
|
|
|
|
%x = load i32, i32* %arr_i, align 4
|
|
|
|
%div = udiv i32 %x, %a
|
|
|
|
%cond = icmp ugt i32 %div, %a
|
|
|
|
%condOpp = icmp ule i32 %div, %a
|
|
|
|
%s1 = select i1 %cond, i32 11, i32 22
|
|
|
|
%s2 = select i1 %condOpp, i32 %s1, i32 %a
|
|
|
|
%sum = urem i32 %s1, %s2
|
|
|
|
store i32 %sum, i32* %arr_i, align 4
|
|
|
|
%i_inc = add i32 %i, 1
|
|
|
|
%cmp = icmp ugt i32 %i_inc, %n
|
|
|
|
br i1 %cmp, label %while.body, label %while.end
|
|
|
|
|
|
|
|
while.end: ; preds = %while.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; Test that we always will convert a cmov with a memory operand into a branch,
|
|
|
|
; even outside of a loop.
|
|
|
|
define i32 @test_cmov_memoperand(i32 %a, i32 %b, i32 %x, i32* %y) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edx, %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%load = load i32, i32* %y
|
|
|
|
%z = select i1 %cond, i32 %x, i32 %load
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: ja [[FALSE_BB:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl (%rcx), %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
ret i32 %z
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test that we can convert a group of cmovs where only one has a memory
|
|
|
|
; operand.
|
|
|
|
define i32 @test_cmov_memoperand_in_group(i32 %a, i32 %b, i32 %x, i32* %y.ptr) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_in_group:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edx, %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%y = load i32, i32* %y.ptr
|
|
|
|
%z1 = select i1 %cond, i32 %x, i32 %a
|
|
|
|
%z2 = select i1 %cond, i32 %x, i32 %y
|
|
|
|
%z3 = select i1 %cond, i32 %x, i32 %b
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: ja [[FALSE_BB:.*]]
|
|
|
|
; CHECK-DAG: movl %{{.*}}, %[[R1:.*]]
|
|
|
|
; CHECK-DAG: movl (%r{{..}}), %[[R2:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: movl %{{.*}} %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
; CHECK: addl
|
|
|
|
; CHECK-DAG: %[[R1]]
|
|
|
|
; CHECK-DAG: ,
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK-DAG: addl
|
|
|
|
; CHECK-DAG: %[[R2]]
|
|
|
|
; CHECK-DAG: ,
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: retq
|
|
|
|
%s1 = add i32 %z1, %z2
|
|
|
|
%s2 = add i32 %s1, %z3
|
|
|
|
ret i32 %s2
|
|
|
|
}
|
|
|
|
|
|
|
|
; Same as before but with operands reversed in the select with a load.
|
|
|
|
define i32 @test_cmov_memoperand_in_group2(i32 %a, i32 %b, i32 %x, i32* %y.ptr) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_in_group2:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edx, %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%y = load i32, i32* %y.ptr
|
|
|
|
%z2 = select i1 %cond, i32 %a, i32 %x
|
|
|
|
%z1 = select i1 %cond, i32 %y, i32 %x
|
|
|
|
%z3 = select i1 %cond, i32 %b, i32 %x
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: jbe [[FALSE_BB:.*]]
|
|
|
|
; CHECK-DAG: movl %{{.*}}, %[[R1:.*]]
|
|
|
|
; CHECK-DAG: movl (%r{{..}}), %[[R2:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: movl %{{.*}} %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
; CHECK: addl
|
|
|
|
; CHECK-DAG: %[[R1]]
|
|
|
|
; CHECK-DAG: ,
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK-DAG: addl
|
|
|
|
; CHECK-DAG: %[[R2]]
|
|
|
|
; CHECK-DAG: ,
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-DAG: %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: retq
|
|
|
|
%s1 = add i32 %z1, %z2
|
|
|
|
%s2 = add i32 %s1, %z3
|
|
|
|
ret i32 %s2
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test that we don't convert a group of cmovs with conflicting directions of
|
|
|
|
; loads.
|
|
|
|
define i32 @test_cmov_memoperand_conflicting_dir(i32 %a, i32 %b, i32 %x, i32* %y1.ptr, i32* %y2.ptr) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_conflicting_dir:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
|
|
|
; CHECK: cmpl
|
|
|
|
%y1 = load i32, i32* %y1.ptr
|
|
|
|
%y2 = load i32, i32* %y2.ptr
|
|
|
|
%z1 = select i1 %cond, i32 %x, i32 %y1
|
|
|
|
%z2 = select i1 %cond, i32 %y2, i32 %x
|
|
|
|
; CHECK: cmoval
|
|
|
|
; CHECK: cmoval
|
|
|
|
%s1 = add i32 %z1, %z2
|
|
|
|
ret i32 %s1
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test that we can convert a group of cmovs where only one has a memory
|
2017-08-20 07:35:50 +08:00
|
|
|
; operand and where that memory operand's registers come from a prior cmov in
|
|
|
|
; the group.
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
define i32 @test_cmov_memoperand_in_group_reuse_for_addr(i32 %a, i32 %b, i32* %x, i32* %y) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edi, %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%p = select i1 %cond, i32* %x, i32* %y
|
|
|
|
%load = load i32, i32* %p
|
|
|
|
%z = select i1 %cond, i32 %a, i32 %load
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: ja [[FALSE_BB:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl (%r{{..}}), %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
; CHECK: retq
|
|
|
|
ret i32 %z
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test that we can convert a group of two cmovs with memory operands where one
|
|
|
|
; uses the result of the other as part of the address.
|
|
|
|
define i32 @test_cmov_memoperand_in_group_reuse_for_addr2(i32 %a, i32 %b, i32* %x, i32** %y) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr2:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edi, %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%load1 = load i32*, i32** %y
|
|
|
|
%p = select i1 %cond, i32* %x, i32* %load1
|
|
|
|
%load2 = load i32, i32* %p
|
|
|
|
%z = select i1 %cond, i32 %a, i32 %load2
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: ja [[FALSE_BB:.*]]
|
|
|
|
; CHECK: movq (%r{{..}}), %[[R1:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl (%[[R1]]), %eax
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
; CHECK: retq
|
|
|
|
ret i32 %z
|
|
|
|
}
|
|
|
|
|
2017-08-20 07:35:50 +08:00
|
|
|
; Test that we can convert a group of cmovs where only one has a memory
|
|
|
|
; operand and where that memory operand's registers come from a prior cmov and
|
|
|
|
; where that cmov gets *its* input from a prior cmov in the group.
|
|
|
|
define i32 @test_cmov_memoperand_in_group_reuse_for_addr3(i32 %a, i32 %b, i32* %x, i32* %y, i32* %z) #0 {
|
|
|
|
; CHECK-LABEL: test_cmov_memoperand_in_group_reuse_for_addr3:
|
|
|
|
entry:
|
|
|
|
%cond = icmp ugt i32 %a, %b
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl %edi, %eax
|
2017-08-20 07:35:50 +08:00
|
|
|
; CHECK: cmpl
|
|
|
|
%p = select i1 %cond, i32* %x, i32* %y
|
|
|
|
%p2 = select i1 %cond, i32* %z, i32* %p
|
|
|
|
%load = load i32, i32* %p2
|
|
|
|
%r = select i1 %cond, i32 %a, i32 %load
|
|
|
|
; CHECK-NOT: cmov
|
|
|
|
; CHECK: ja [[FALSE_BB:.*]]
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK: movl (%r{{..}}), %eax
|
2017-08-20 07:35:50 +08:00
|
|
|
; CHECK: [[FALSE_BB]]:
|
|
|
|
; CHECK: retq
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
2017-07-17 21:58:20 +08:00
|
|
|
attributes #0 = {"target-cpu"="x86-64"}
|