2017-07-06 23:31:38 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
2017-11-03 20:12:27 +08:00
|
|
|
; RUN: opt -S -expandmemcmp -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32
|
2018-01-07 00:16:04 +08:00
|
|
|
; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_1LD
|
|
|
|
; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_2LD
|
2017-06-09 04:40:39 +08:00
|
|
|
|
|
|
|
declare i32 @memcmp(i8* nocapture, i8* nocapture, i64)
|
|
|
|
|
|
|
|
define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-06-28 07:15:01 +08:00
|
|
|
; ALL-LABEL: @cmp2(
|
2017-07-06 23:31:38 +08:00
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
2017-06-28 07:15:01 +08:00
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]])
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]])
|
2017-08-01 02:08:24 +08:00
|
|
|
; ALL-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32
|
|
|
|
; ALL-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32
|
|
|
|
; ALL-NEXT: [[TMP9:%.*]] = sub i32 [[TMP7]], [[TMP8]]
|
|
|
|
; ALL-NEXT: ret i32 [[TMP9]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-LABEL: @cmp3(
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: br label [[LOADBB:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_SRC1:%.*]] = phi i16 [ [[TMP7:%.*]], [[LOADBB]] ]
|
|
|
|
; ALL-NEXT: [[PHI_SRC2:%.*]] = phi i16 [ [[TMP8:%.*]], [[LOADBB]] ]
|
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = icmp ult i16 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL: loadbb:
|
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = load i16, i16* [[TMP3]]
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = load i16, i16* [[TMP4]]
|
|
|
|
; ALL-NEXT: [[TMP7]] = call i16 @llvm.bswap.i16(i16 [[TMP5]])
|
|
|
|
; ALL-NEXT: [[TMP8]] = call i16 @llvm.bswap.i16(i16 [[TMP6]])
|
|
|
|
; ALL-NEXT: [[TMP9:%.*]] = icmp eq i16 [[TMP7]], [[TMP8]]
|
|
|
|
; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 2
|
|
|
|
; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 2
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
|
|
|
|
; ALL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: br label [[ENDBLOCK]]
|
|
|
|
; ALL: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-06-28 07:15:01 +08:00
|
|
|
; ALL-LABEL: @cmp4(
|
2017-07-06 23:31:38 +08:00
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
2017-06-28 07:15:01 +08:00
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]])
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]])
|
2017-08-01 02:08:24 +08:00
|
|
|
; ALL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]]
|
2017-06-28 07:15:01 +08:00
|
|
|
; ALL-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]]
|
2017-08-01 02:08:24 +08:00
|
|
|
; ALL-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32
|
|
|
|
; ALL-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32
|
|
|
|
; ALL-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]]
|
|
|
|
; ALL-NEXT: ret i32 [[TMP11]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-LABEL: @cmp5(
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: br label [[LOADBB:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP7:%.*]], [[LOADBB]] ]
|
|
|
|
; ALL-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP8:%.*]], [[LOADBB]] ]
|
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL: loadbb:
|
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]]
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]]
|
|
|
|
; ALL-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]])
|
|
|
|
; ALL-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]])
|
|
|
|
; ALL-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]]
|
|
|
|
; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4
|
|
|
|
; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 4
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
|
|
|
|
; ALL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: br label [[ENDBLOCK]]
|
|
|
|
; ALL: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-LABEL: @cmp6(
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: br label [[LOADBB:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ]
|
|
|
|
; ALL-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ]
|
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL: loadbb:
|
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]]
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]]
|
|
|
|
; ALL-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]])
|
|
|
|
; ALL-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]])
|
|
|
|
; ALL-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]]
|
|
|
|
; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP10:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; ALL-NEXT: [[TMP11:%.*]] = bitcast i8* [[Y]] to i16*
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 2
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 2
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i32
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i32
|
|
|
|
; ALL-NEXT: [[TMP20:%.*]] = icmp eq i32 [[TMP18]], [[TMP19]]
|
|
|
|
; ALL-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
2017-08-02 01:24:54 +08:00
|
|
|
; ALL-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp7(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
|
|
|
|
; ALL-NEXT: ret i32 [[CALL]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-06-20 23:58:30 +08:00
|
|
|
; X32-LABEL: @cmp8(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: br label [[LOADBB:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ]
|
|
|
|
; X32-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
|
|
|
|
; X32-NEXT: [[TMP1:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; X32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32: loadbb:
|
|
|
|
; X32-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]]
|
|
|
|
; X32-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]]
|
|
|
|
; X32-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]])
|
|
|
|
; X32-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]])
|
|
|
|
; X32-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]]
|
|
|
|
; X32-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP10:%.*]] = bitcast i8* [[X]] to i32*
|
|
|
|
; X32-NEXT: [[TMP11:%.*]] = bitcast i8* [[Y]] to i32*
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP17]] = call i32 @llvm.bswap.i32(i32 [[TMP15]])
|
|
|
|
; X32-NEXT: [[TMP18:%.*]] = icmp eq i32 [[TMP16]], [[TMP17]]
|
|
|
|
; X32-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: ret i32 [[PHI_RES]]
|
2017-06-20 23:58:30 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp8(
|
2017-07-06 23:31:38 +08:00
|
|
|
; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
2017-06-20 23:58:30 +08:00
|
|
|
; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
2017-06-28 07:15:01 +08:00
|
|
|
; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
2017-06-20 23:58:30 +08:00
|
|
|
; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]])
|
2017-06-28 07:15:01 +08:00
|
|
|
; X64-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]])
|
2017-08-01 02:08:24 +08:00
|
|
|
; X64-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP5]], [[TMP6]]
|
2017-06-28 07:15:01 +08:00
|
|
|
; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]]
|
2017-08-01 02:08:24 +08:00
|
|
|
; X64-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32
|
|
|
|
; X64-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32
|
|
|
|
; X64-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]]
|
|
|
|
; X64-NEXT: ret i32 [[TMP11]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 8)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp9(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
|
|
|
|
; X32-NEXT: ret i32 [[CALL]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp9(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: br label [[LOADBB:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ]
|
|
|
|
; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ]
|
|
|
|
; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64: loadbb:
|
|
|
|
; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]])
|
|
|
|
; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]])
|
|
|
|
; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
|
|
|
|
; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8
|
|
|
|
; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 8
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32
|
|
|
|
; X64-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: br label [[ENDBLOCK]]
|
|
|
|
; X64: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp10(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
|
|
|
|
; X32-NEXT: ret i32 [[CALL]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp10(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: br label [[LOADBB:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ]
|
|
|
|
; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ]
|
|
|
|
; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64: loadbb:
|
|
|
|
; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]])
|
|
|
|
; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]])
|
|
|
|
; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
|
|
|
|
; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[Y]] to i16*
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP12:%.*]] = getelementptr i16, i16* [[TMP10]], i16 4
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP13:%.*]] = getelementptr i16, i16* [[TMP11]], i16 4
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]])
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]])
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i64
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i64
|
|
|
|
; X64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[TMP18]], [[TMP19]]
|
|
|
|
; X64-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp11(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
|
|
|
|
; ALL-NEXT: ret i32 [[CALL]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp12(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
|
|
|
|
; X32-NEXT: ret i32 [[CALL]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp12(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: br label [[LOADBB:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ]
|
|
|
|
; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ]
|
|
|
|
; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64: loadbb:
|
|
|
|
; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]])
|
|
|
|
; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]])
|
|
|
|
; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
|
|
|
|
; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[X]] to i32*
|
|
|
|
; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[Y]] to i32*
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 2
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]])
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP17:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP15]])
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP19]] = zext i32 [[TMP17]] to i64
|
|
|
|
; X64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[TMP18]], [[TMP19]]
|
|
|
|
; X64-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp13(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
|
|
|
|
; ALL-NEXT: ret i32 [[CALL]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp14(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
|
|
|
|
; ALL-NEXT: ret i32 [[CALL]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp15(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
|
|
|
|
; ALL-NEXT: ret i32 [[CALL]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp16(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
|
|
|
|
; X32-NEXT: ret i32 [[CALL]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp16(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: br label [[LOADBB:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: res_block:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ]
|
|
|
|
; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ]
|
|
|
|
; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]]
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: br label [[ENDBLOCK:%.*]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64: loadbb:
|
|
|
|
; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]])
|
|
|
|
; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]])
|
|
|
|
; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]]
|
|
|
|
; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: loadbb1:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[X]] to i64*
|
|
|
|
; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[Y]] to i64*
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[TMP10]], i64 1
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP13:%.*]] = getelementptr i64, i64* [[TMP11]], i64 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]])
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]])
|
|
|
|
; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]]
|
|
|
|
; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64: endblock:
|
2017-11-03 20:12:27 +08:00
|
|
|
; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ]
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: ret i32 [[PHI_RES]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq2(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
|
|
|
; ALL-LABEL: @cmp_eq2(
|
2017-07-06 23:31:38 +08:00
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
2017-06-20 23:58:30 +08:00
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]]
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0
|
2017-06-09 04:40:39 +08:00
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq3(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2018-01-07 00:16:04 +08:00
|
|
|
; X32-LABEL: @cmp_eq3(
|
|
|
|
; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
|
|
|
; X32-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
|
|
|
|
; X32-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
|
|
|
|
; X32-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]]
|
|
|
|
; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2
|
|
|
|
; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2
|
|
|
|
; X32-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X32-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X32-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i16
|
|
|
|
; X32-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i16
|
|
|
|
; X32-NEXT: [[TMP12:%.*]] = xor i16 [[TMP10]], [[TMP11]]
|
|
|
|
; X32-NEXT: [[TMP13:%.*]] = or i16 [[TMP5]], [[TMP12]]
|
|
|
|
; X32-NEXT: [[TMP14:%.*]] = icmp ne i16 [[TMP13]], 0
|
|
|
|
; X32-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_1LD-LABEL: @cmp_eq3(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq3(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i16
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i16
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = xor i16 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = or i16 [[TMP5]], [[TMP12]]
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i16 [[TMP13]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq4(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
|
|
|
; ALL-LABEL: @cmp_eq4(
|
2017-07-06 23:31:38 +08:00
|
|
|
; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
2017-06-20 23:58:30 +08:00
|
|
|
; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; ALL-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
|
|
|
|
; ALL-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0
|
2017-06-09 04:40:39 +08:00
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2018-01-07 00:16:04 +08:00
|
|
|
; X32-LABEL: @cmp_eq5(
|
|
|
|
; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
|
|
|
|
; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4
|
|
|
|
; X32-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X32-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X32-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i32
|
|
|
|
; X32-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i32
|
|
|
|
; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
|
|
|
|
; X32-NEXT: [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
|
|
|
|
; X32-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
|
|
|
|
; X32-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_1LD-LABEL: @cmp_eq5(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq5(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i32
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i32
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2018-01-07 00:16:04 +08:00
|
|
|
; X32-LABEL: @cmp_eq6(
|
|
|
|
; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
|
|
|
|
; X32-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
|
|
|
|
; X32-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
|
|
|
|
; X32-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
|
|
|
|
; X32-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]]
|
|
|
|
; X32-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i32
|
|
|
|
; X32-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i32
|
|
|
|
; X32-NEXT: [[TMP14:%.*]] = xor i32 [[TMP12]], [[TMP13]]
|
|
|
|
; X32-NEXT: [[TMP15:%.*]] = or i32 [[TMP5]], [[TMP14]]
|
|
|
|
; X32-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
|
|
; X32-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0
|
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_1LD-LABEL: @cmp_eq6(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
|
|
|
|
; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq6(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 2
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 2
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]]
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i32
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i32
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = xor i32 [[TMP12]], [[TMP13]]
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = or i32 [[TMP5]], [[TMP14]]
|
|
|
|
; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-07-26 01:04:37 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
|
|
|
; ALL-LABEL: @cmp_eq7(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 7)
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
2017-06-09 04:40:39 +08:00
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-06-20 23:58:30 +08:00
|
|
|
; X32-LABEL: @cmp_eq8(
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32*
|
|
|
|
; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]]
|
|
|
|
; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]]
|
2018-01-07 00:16:04 +08:00
|
|
|
; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
|
|
|
|
; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 1
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 1
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
|
2017-11-03 20:12:27 +08:00
|
|
|
; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]]
|
2018-01-07 00:16:04 +08:00
|
|
|
; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]]
|
|
|
|
; X32-NEXT: [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]]
|
|
|
|
; X32-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
|
|
|
|
; X32-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
2017-06-20 23:58:30 +08:00
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp_eq8(
|
2017-07-06 23:31:38 +08:00
|
|
|
; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
2017-06-20 23:58:30 +08:00
|
|
|
; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
|
|
|
|
; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0
|
|
|
|
; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 8)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp_eq9(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9)
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
2018-01-07 00:16:04 +08:00
|
|
|
; X64_1LD-LABEL: @cmp_eq9(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 8
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq9(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 8
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]]
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]]
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]]
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp_eq10(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10)
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
2018-01-07 00:16:04 +08:00
|
|
|
; X64_1LD-LABEL: @cmp_eq10(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 4
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
|
|
|
|
; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq10(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i16*
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP6]], i16 4
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[TMP7]], i16 4
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]]
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]]
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = xor i64 [[TMP12]], [[TMP13]]
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = or i64 [[TMP5]], [[TMP14]]
|
|
|
|
; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp_eq11(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11)
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp_eq12(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12)
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
2018-01-07 00:16:04 +08:00
|
|
|
; X64_1LD-LABEL: @cmp_eq12(
|
|
|
|
; X64_1LD-NEXT: br label [[LOADBB:%.*]]
|
|
|
|
; X64_1LD: res_block:
|
|
|
|
; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
|
|
|
|
; X64_1LD: loadbb:
|
|
|
|
; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
|
|
|
|
; X64_1LD: loadbb1:
|
|
|
|
; X64_1LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
|
|
|
|
; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
|
|
|
|
; X64_1LD-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
|
|
|
|
; X64_1LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
|
|
|
|
; X64_1LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]]
|
|
|
|
; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP10]], [[TMP11]]
|
|
|
|
; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
|
|
|
|
; X64_1LD: endblock:
|
|
|
|
; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
|
|
|
|
; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
|
|
|
|
; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_1LD-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64_2LD-LABEL: @cmp_eq12(
|
|
|
|
; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64*
|
|
|
|
; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]]
|
|
|
|
; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]]
|
|
|
|
; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]]
|
|
|
|
; X64_2LD-NEXT: [[TMP6:%.*]] = bitcast i8* [[X]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[Y]] to i32*
|
|
|
|
; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP6]], i32 2
|
|
|
|
; X64_2LD-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
|
|
|
|
; X64_2LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]]
|
|
|
|
; X64_2LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]]
|
|
|
|
; X64_2LD-NEXT: [[TMP12:%.*]] = zext i32 [[TMP10]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
|
|
|
|
; X64_2LD-NEXT: [[TMP14:%.*]] = xor i64 [[TMP12]], [[TMP13]]
|
|
|
|
; X64_2LD-NEXT: [[TMP15:%.*]] = or i64 [[TMP5]], [[TMP14]]
|
|
|
|
; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
|
|
|
|
; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32
|
|
|
|
; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0
|
|
|
|
; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64_2LD-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp_eq13(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13)
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp_eq14(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14)
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
2017-07-26 01:04:37 +08:00
|
|
|
; ALL-LABEL: @cmp_eq15(
|
|
|
|
; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15)
|
|
|
|
; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
|
|
|
; ALL-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; ALL-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) {
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-LABEL: @cmp_eq16(
|
2017-07-26 01:04:37 +08:00
|
|
|
; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16)
|
|
|
|
; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X32-NEXT: ret i32 [[CONV]]
|
|
|
|
;
|
|
|
|
; X64-LABEL: @cmp_eq16(
|
2017-10-30 22:19:33 +08:00
|
|
|
; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128*
|
|
|
|
; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128*
|
|
|
|
; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]]
|
|
|
|
; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]]
|
|
|
|
; X64-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]]
|
|
|
|
; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
|
|
|
|
; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0
|
[x86, CGP] increase memcmp() expansion up to 4 load pairs
It should be a win to avoid going out to the system lib for all small memcmp() calls using scalar ops. For x86 32-bit, this means most everything up to 16 bytes. For 64-bit, that doubles because we can do 8-byte loads.
Notes:
Reduced from 4 to 2 loads for -Os behavior, which might not be optimal in all cases. It's effectively a question of how much do we trust the system implementation. Linux and macOS (and Windows I assume, but did not test) have optimized memcmp() code for x86, so it's probably not bad either way? PPC is using 8/4 for defaults on these. We do not expand at all for -Oz.
There are still potential improvements to make for the CGP expansion IR and/or lowering such as avoiding select-of-constants (D34904) and not doing zexts to the max load type before doing a compare.
We have special-case SSE/AVX codegen for (memcmp(x, y, 16/32) == 0) that will no longer be produced after this patch. I've shown the experimental justification for that change in PR33329:
https://bugs.llvm.org/show_bug.cgi?id=33329#c12
TLDR: While the vector code is a likely winner, we can't guarantee that it's a winner in all cases on all CPUs, so I'm willing to sacrifice it for the greater good of expanding all small memcmp(). If we want to resurrect that codegen, it can be done by adjusting the CGP params or poking a hole to let those fall-through the CGP expansion.
Committed on behalf of Sanjay Patel
Differential Revision: https://reviews.llvm.org/D35067
llvm-svn: 308322
2017-07-18 23:55:30 +08:00
|
|
|
; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
|
|
|
|
; X64-NEXT: ret i32 [[CONV]]
|
2017-06-09 04:40:39 +08:00
|
|
|
;
|
|
|
|
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16)
|
|
|
|
%cmp = icmp eq i32 %call, 0
|
|
|
|
%conv = zext i1 %cmp to i32
|
|
|
|
ret i32 %conv
|
|
|
|
}
|
|
|
|
|