2018-05-18 23:26:38 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
2012-11-26 08:24:07 +08:00
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
|
|
|
|
|
|
|
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
|
|
|
|
|
|
|
declare i32 @abs(i32)
|
|
|
|
declare i64 @labs(i64)
|
|
|
|
declare i64 @llabs(i64)
|
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
; Test that the abs library call simplifier works correctly.
|
2018-05-20 22:23:23 +08:00
|
|
|
; abs(x) -> x <s 0 ? -x : x.
|
2012-11-26 08:24:07 +08:00
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
define i32 @test_abs(i32 %x) {
|
|
|
|
; CHECK-LABEL: @test_abs(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ISPOS:%.*]] = icmp slt i32 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[ISPOS]], i32 [[NEG]], i32 [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[TMP1]]
|
|
|
|
;
|
2012-11-26 08:24:07 +08:00
|
|
|
%ret = call i32 @abs(i32 %x)
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
define i64 @test_labs(i64 %x) {
|
|
|
|
; CHECK-LABEL: @test_labs(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ISPOS:%.*]] = icmp slt i64 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[ISPOS]], i64 [[NEG]], i64 [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret i64 [[TMP1]]
|
|
|
|
;
|
2012-11-26 08:24:07 +08:00
|
|
|
%ret = call i64 @labs(i64 %x)
|
|
|
|
ret i64 %ret
|
|
|
|
}
|
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
define i64 @test_llabs(i64 %x) {
|
|
|
|
; CHECK-LABEL: @test_llabs(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ISPOS:%.*]] = icmp slt i64 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[ISPOS]], i64 [[NEG]], i64 [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret i64 [[TMP1]]
|
|
|
|
;
|
2012-11-26 08:24:07 +08:00
|
|
|
%ret = call i64 @llabs(i64 %x)
|
|
|
|
ret i64 %ret
|
|
|
|
}
|
2017-12-08 02:13:33 +08:00
|
|
|
|
2018-05-20 22:23:23 +08:00
|
|
|
; We have a canonical form of abs to make CSE easier.
|
2018-05-18 23:26:38 +08:00
|
|
|
|
|
|
|
define i8 @abs_canonical_1(i8 %x) {
|
|
|
|
; CHECK-LABEL: @abs_canonical_1(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[NEG]], i8 [[X]]
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp sgt i8 %x, 0
|
|
|
|
%neg = sub i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %x, i8 %neg
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2018-05-20 22:23:23 +08:00
|
|
|
; Vectors should work too.
|
2018-05-18 23:26:38 +08:00
|
|
|
|
|
|
|
define <2 x i8> @abs_canonical_2(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @abs_canonical_2(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select <2 x i1> [[CMP]], <2 x i8> [[NEG]], <2 x i8> [[X]]
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i8> [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
|
|
|
|
%neg = sub <2 x i8> zeroinitializer, %x
|
|
|
|
%abs = select <2 x i1> %cmp, <2 x i8> %x, <2 x i8> %neg
|
|
|
|
ret <2 x i8> %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
; NSW should not change.
|
|
|
|
|
|
|
|
define i8 @abs_canonical_3(i8 %x) {
|
|
|
|
; CHECK-LABEL: @abs_canonical_3(
|
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[NEG]], i8 [[X]]
|
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp slt i8 %x, 0
|
|
|
|
%neg = sub nsw i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %neg, i8 %x
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @abs_canonical_4(i8 %x) {
|
|
|
|
; CHECK-LABEL: @abs_canonical_4(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[NEG]], i8 [[X]]
|
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp slt i8 %x, 1
|
|
|
|
%neg = sub i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %neg, i8 %x
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2018-05-20 22:23:23 +08:00
|
|
|
; We have a canonical form of nabs to make CSE easier.
|
2018-05-18 23:26:38 +08:00
|
|
|
|
|
|
|
define i8 @nabs_canonical_1(i8 %x) {
|
|
|
|
; CHECK-LABEL: @nabs_canonical_1(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[X]], i8 [[NEG]]
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp sgt i8 %x, 0
|
|
|
|
%neg = sub i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %neg, i8 %x
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2018-05-20 22:23:23 +08:00
|
|
|
; Vectors should work too.
|
2018-05-18 23:26:38 +08:00
|
|
|
|
|
|
|
define <2 x i8> @nabs_canonical_2(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @nabs_canonical_2(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X]]
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select <2 x i1> [[CMP]], <2 x i8> [[X]], <2 x i8> [[NEG]]
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i8> [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
|
|
|
|
%neg = sub <2 x i8> zeroinitializer, %x
|
|
|
|
%abs = select <2 x i1> %cmp, <2 x i8> %neg, <2 x i8> %x
|
|
|
|
ret <2 x i8> %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
; NSW should not change.
|
|
|
|
|
|
|
|
define i8 @nabs_canonical_3(i8 %x) {
|
|
|
|
; CHECK-LABEL: @nabs_canonical_3(
|
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub nsw i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[X]], i8 [[NEG]]
|
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp slt i8 %x, 0
|
|
|
|
%neg = sub nsw i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %x, i8 %neg
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @nabs_canonical_4(i8 %x) {
|
|
|
|
; CHECK-LABEL: @nabs_canonical_4(
|
2018-05-20 22:23:23 +08:00
|
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 0
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[CMP]], i8 [[X]], i8 [[NEG]]
|
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%cmp = icmp slt i8 %x, 1
|
|
|
|
%neg = sub i8 0, %x
|
|
|
|
%abs = select i1 %cmp, i8 %x, i8 %neg
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
; The following 5 tests use a shift+add+xor to implement abs():
|
|
|
|
; B = ashr i8 A, 7 -- smear the sign bit.
|
|
|
|
; xor (add A, B), B -- add -1 and flip bits if negative
|
|
|
|
|
|
|
|
define i8 @shifty_abs_commute0(i8 %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute0(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 0
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[TMP1]], i8 [[TMP2]], i8 [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%signbit = ashr i8 %x, 7
|
|
|
|
%add = add i8 %signbit, %x
|
|
|
|
%abs = xor i8 %add, %signbit
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2018-05-18 00:29:52 +08:00
|
|
|
define i8 @shifty_abs_commute0_nsw(i8 %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute0_nsw(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 0
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i8 0, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[TMP1]], i8 [[TMP2]], i8 [[X]]
|
2018-05-18 00:29:52 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%signbit = ashr i8 %x, 7
|
|
|
|
%add = add nsw i8 %signbit, %x
|
|
|
|
%abs = xor i8 %add, %signbit
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
; The nuw flag creates a contradiction. If the shift produces all 1s, the only
|
|
|
|
; way for the add to not wrap is for %x to be 0, but then the shift couldn't
|
|
|
|
; have produced all 1s. We partially optimize this.
|
|
|
|
define i8 @shifty_abs_commute0_nuw(i8 %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute0_nuw(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i8 [[X:%.*]], 0
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[TMP1]], i8 [[X]], i8 0
|
2018-05-18 00:29:52 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%signbit = ashr i8 %x, 7
|
|
|
|
%add = add nuw i8 %signbit, %x
|
|
|
|
%abs = xor i8 %add, %signbit
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
2017-12-08 02:13:33 +08:00
|
|
|
define <2 x i8> @shifty_abs_commute1(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute1(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub <2 x i8> zeroinitializer, [[X]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select <2 x i1> [[TMP1]], <2 x i8> [[TMP2]], <2 x i8> [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i8> [[ABS]]
|
|
|
|
;
|
|
|
|
%signbit = ashr <2 x i8> %x, <i8 7, i8 7>
|
|
|
|
%add = add <2 x i8> %signbit, %x
|
|
|
|
%abs = xor <2 x i8> %signbit, %add
|
|
|
|
ret <2 x i8> %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i8> @shifty_abs_commute2(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute2(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[X:%.*]], <i8 3, i8 3>
|
[InstCombine] canonicalize shifty abs(): ashr+add+xor --> cmp+neg+sel
We want to do this for 2 reasons:
1. Value tracking does not recognize the ashr variant, so it would fail to match for cases like D39766.
2. DAGCombiner does better at producing optimal codegen when we have the cmp+sel pattern.
More detail about what happens in the backend:
1. DAGCombiner has a generic transform for all targets to convert the scalar cmp+sel variant of abs
into the shift variant. That is the opposite of this IR canonicalization.
2. DAGCombiner has a generic transform for all targets to convert the vector cmp+sel variant of abs
into either an ABS node or the shift variant. That is again the opposite of this IR canonicalization.
3. DAGCombiner has a generic transform for all targets to convert the exact shift variants produced by #1 or #2
into an ISD::ABS node. Note: It would be an efficiency improvement if we had #1 go directly to an ABS node
when that's legal/custom.
4. The pattern matching above is incomplete, so it is possible to escape the intended/optimal codegen in a
variety of ways.
a. For #2, the vector path is missing the case for setlt with a '1' constant.
b. For #3, we are missing a match for commuted versions of the shift variants.
5. Therefore, this IR canonicalization can only help get us to the optimal codegen. The version of cmp+sel
produced by this patch will be recognized in the DAG and converted to an ABS node when possible or the
shift sequence when not.
6. In the following examples with this patch applied, we may get conditional moves rather than the shift
produced by the generic DAGCombiner transforms. The conditional move is created using a target-specific
decision for any given target. Whether it is optimal or not for a particular subtarget may be up for debate.
define i32 @abs_shifty(i32 %x) {
%signbit = ashr i32 %x, 31
%add = add i32 %signbit, %x
%abs = xor i32 %signbit, %add
ret i32 %abs
}
define i32 @abs_cmpsubsel(i32 %x) {
%cmp = icmp slt i32 %x, zeroinitializer
%sub = sub i32 zeroinitializer, %x
%abs = select i1 %cmp, i32 %sub, i32 %x
ret i32 %abs
}
define <4 x i32> @abs_shifty_vec(<4 x i32> %x) {
%signbit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%add = add <4 x i32> %signbit, %x
%abs = xor <4 x i32> %signbit, %add
ret <4 x i32> %abs
}
define <4 x i32> @abs_cmpsubsel_vec(<4 x i32> %x) {
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sub = sub <4 x i32> zeroinitializer, %x
%abs = select <4 x i1> %cmp, <4 x i32> %sub, <4 x i32> %x
ret <4 x i32> %abs
}
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=x86_64 -mattr=avx
> abs_shifty:
> movl %edi, %eax
> negl %eax
> cmovll %edi, %eax
> retq
>
> abs_cmpsubsel:
> movl %edi, %eax
> negl %eax
> cmovll %edi, %eax
> retq
>
> abs_shifty_vec:
> vpabsd %xmm0, %xmm0
> retq
>
> abs_cmpsubsel_vec:
> vpabsd %xmm0, %xmm0
> retq
>
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=aarch64
> abs_shifty:
> cmp w0, #0 // =0
> cneg w0, w0, mi
> ret
>
> abs_cmpsubsel:
> cmp w0, #0 // =0
> cneg w0, w0, mi
> ret
>
> abs_shifty_vec:
> abs v0.4s, v0.4s
> ret
>
> abs_cmpsubsel_vec:
> abs v0.4s, v0.4s
> ret
>
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=powerpc64le
> abs_shifty:
> srawi 4, 3, 31
> add 3, 3, 4
> xor 3, 3, 4
> blr
>
> abs_cmpsubsel:
> srawi 4, 3, 31
> add 3, 3, 4
> xor 3, 3, 4
> blr
>
> abs_shifty_vec:
> vspltisw 3, -16
> vspltisw 4, 15
> vsubuwm 3, 4, 3
> vsraw 3, 2, 3
> vadduwm 2, 2, 3
> xxlxor 34, 34, 35
> blr
>
> abs_cmpsubsel_vec:
> vspltisw 3, -16
> vspltisw 4, 15
> vsubuwm 3, 4, 3
> vsraw 3, 2, 3
> vadduwm 2, 2, 3
> xxlxor 34, 34, 35
> blr
>
Differential Revision: https://reviews.llvm.org/D40984
llvm-svn: 320921
2017-12-17 00:41:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i8> [[Y]], zeroinitializer
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub <2 x i8> zeroinitializer, [[Y]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select <2 x i1> [[TMP1]], <2 x i8> [[TMP2]], <2 x i8> [[Y]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i8> [[ABS]]
|
|
|
|
;
|
|
|
|
%y = mul <2 x i8> %x, <i8 3, i8 3> ; extra op to thwart complexity-based canonicalization
|
|
|
|
%signbit = ashr <2 x i8> %y, <i8 7, i8 7>
|
|
|
|
%add = add <2 x i8> %y, %signbit
|
|
|
|
%abs = xor <2 x i8> %signbit, %add
|
|
|
|
ret <2 x i8> %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @shifty_abs_commute3(i8 %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_commute3(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i8 [[X:%.*]], 3
|
[InstCombine] canonicalize shifty abs(): ashr+add+xor --> cmp+neg+sel
We want to do this for 2 reasons:
1. Value tracking does not recognize the ashr variant, so it would fail to match for cases like D39766.
2. DAGCombiner does better at producing optimal codegen when we have the cmp+sel pattern.
More detail about what happens in the backend:
1. DAGCombiner has a generic transform for all targets to convert the scalar cmp+sel variant of abs
into the shift variant. That is the opposite of this IR canonicalization.
2. DAGCombiner has a generic transform for all targets to convert the vector cmp+sel variant of abs
into either an ABS node or the shift variant. That is again the opposite of this IR canonicalization.
3. DAGCombiner has a generic transform for all targets to convert the exact shift variants produced by #1 or #2
into an ISD::ABS node. Note: It would be an efficiency improvement if we had #1 go directly to an ABS node
when that's legal/custom.
4. The pattern matching above is incomplete, so it is possible to escape the intended/optimal codegen in a
variety of ways.
a. For #2, the vector path is missing the case for setlt with a '1' constant.
b. For #3, we are missing a match for commuted versions of the shift variants.
5. Therefore, this IR canonicalization can only help get us to the optimal codegen. The version of cmp+sel
produced by this patch will be recognized in the DAG and converted to an ABS node when possible or the
shift sequence when not.
6. In the following examples with this patch applied, we may get conditional moves rather than the shift
produced by the generic DAGCombiner transforms. The conditional move is created using a target-specific
decision for any given target. Whether it is optimal or not for a particular subtarget may be up for debate.
define i32 @abs_shifty(i32 %x) {
%signbit = ashr i32 %x, 31
%add = add i32 %signbit, %x
%abs = xor i32 %signbit, %add
ret i32 %abs
}
define i32 @abs_cmpsubsel(i32 %x) {
%cmp = icmp slt i32 %x, zeroinitializer
%sub = sub i32 zeroinitializer, %x
%abs = select i1 %cmp, i32 %sub, i32 %x
ret i32 %abs
}
define <4 x i32> @abs_shifty_vec(<4 x i32> %x) {
%signbit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%add = add <4 x i32> %signbit, %x
%abs = xor <4 x i32> %signbit, %add
ret <4 x i32> %abs
}
define <4 x i32> @abs_cmpsubsel_vec(<4 x i32> %x) {
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sub = sub <4 x i32> zeroinitializer, %x
%abs = select <4 x i1> %cmp, <4 x i32> %sub, <4 x i32> %x
ret <4 x i32> %abs
}
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=x86_64 -mattr=avx
> abs_shifty:
> movl %edi, %eax
> negl %eax
> cmovll %edi, %eax
> retq
>
> abs_cmpsubsel:
> movl %edi, %eax
> negl %eax
> cmovll %edi, %eax
> retq
>
> abs_shifty_vec:
> vpabsd %xmm0, %xmm0
> retq
>
> abs_cmpsubsel_vec:
> vpabsd %xmm0, %xmm0
> retq
>
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=aarch64
> abs_shifty:
> cmp w0, #0 // =0
> cneg w0, w0, mi
> ret
>
> abs_cmpsubsel:
> cmp w0, #0 // =0
> cneg w0, w0, mi
> ret
>
> abs_shifty_vec:
> abs v0.4s, v0.4s
> ret
>
> abs_cmpsubsel_vec:
> abs v0.4s, v0.4s
> ret
>
> $ ./opt -instcombine shiftyabs.ll -S | ./llc -o - -mtriple=powerpc64le
> abs_shifty:
> srawi 4, 3, 31
> add 3, 3, 4
> xor 3, 3, 4
> blr
>
> abs_cmpsubsel:
> srawi 4, 3, 31
> add 3, 3, 4
> xor 3, 3, 4
> blr
>
> abs_shifty_vec:
> vspltisw 3, -16
> vspltisw 4, 15
> vsubuwm 3, 4, 3
> vsraw 3, 2, 3
> vadduwm 2, 2, 3
> xxlxor 34, 34, 35
> blr
>
> abs_cmpsubsel_vec:
> vspltisw 3, -16
> vspltisw 4, 15
> vsubuwm 3, 4, 3
> vsraw 3, 2, 3
> vadduwm 2, 2, 3
> xxlxor 34, 34, 35
> blr
>
Differential Revision: https://reviews.llvm.org/D40984
llvm-svn: 320921
2017-12-17 00:41:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[Y]], 0
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sub i8 0, [[Y]]
|
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = select i1 [[TMP1]], i8 [[TMP2]], i8 [[Y]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%y = mul i8 %x, 3 ; extra op to thwart complexity-based canonicalization
|
|
|
|
%signbit = ashr i8 %y, 7
|
|
|
|
%add = add i8 %y, %signbit
|
|
|
|
%abs = xor i8 %add, %signbit
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|
|
|
|
; Negative test - don't transform if it would increase instruction count.
|
|
|
|
|
|
|
|
declare void @extra_use(i8)
|
|
|
|
|
|
|
|
define i8 @shifty_abs_too_many_uses(i8 %x) {
|
|
|
|
; CHECK-LABEL: @shifty_abs_too_many_uses(
|
2018-05-18 23:26:38 +08:00
|
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7
|
|
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SIGNBIT]], [[X]]
|
2017-12-08 02:13:33 +08:00
|
|
|
; CHECK-NEXT: [[ABS:%.*]] = xor i8 [[ADD]], [[SIGNBIT]]
|
|
|
|
; CHECK-NEXT: call void @extra_use(i8 [[SIGNBIT]])
|
|
|
|
; CHECK-NEXT: ret i8 [[ABS]]
|
|
|
|
;
|
|
|
|
%signbit = ashr i8 %x, 7
|
|
|
|
%add = add i8 %x, %signbit
|
|
|
|
%abs = xor i8 %add, %signbit
|
|
|
|
call void @extra_use(i8 %signbit)
|
|
|
|
ret i8 %abs
|
|
|
|
}
|
|
|
|
|