2016-09-05 16:26:51 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2018-04-12 00:03:07 +08:00
|
|
|
; RUN: llc -fast-isel-sink-local-values < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
|
2016-09-05 16:26:51 +08:00
|
|
|
|
|
|
|
; ModuleID = 'mask_set.c'
|
|
|
|
source_filename = "mask_set.c"
|
|
|
|
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
|
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
declare void @llvm.dbg.declare(metadata, metadata, metadata)
|
|
|
|
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
|
|
declare i64 @calc_expected_mask_val(i8* %valp, i32 %el_size, i32 %length)
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
|
|
declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fname, i8* %input)
|
|
|
|
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
|
|
define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){
|
|
|
|
; CHECK-LABEL: test_xmm:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2019-05-04 03:06:57 +08:00
|
|
|
; CHECK-NEXT: subq $56, %rsp
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 64
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: vpmovw2m %xmm0, %k0
|
2018-01-09 08:50:47 +08:00
|
|
|
; CHECK-NEXT: movl $2, %esi
|
[FastISel] Sink local value materializations to first use
Summary:
Local values are constants, global addresses, and stack addresses that
can't be folded into the instruction that uses them. For example, when
storing the address of a global variable into memory, we need to
materialize that address into a register.
FastISel doesn't want to materialize any given local value more than
once, so it generates all local value materialization code at
EmitStartPt, which always dominates the current insertion point. This
allows it to maintain a map of local value registers, and it knows that
the local value area will always dominate the current insertion point.
The downside is that local value instructions are always emitted without
a source location. This is done to prevent jumpy line tables, but it
means that the local value area will be considered part of the previous
statement. Consider this C code:
call1(); // line 1
++global; // line 2
++global; // line 3
call2(&global, &local); // line 4
Today we end up with assembly and line tables like this:
.loc 1 1
callq call1
leaq global(%rip), %rdi
leaq local(%rsp), %rsi
.loc 1 2
addq $1, global(%rip)
.loc 1 3
addq $1, global(%rip)
.loc 1 4
callq call2
The LEA instructions in the local value area have no source location and
are treated as being on line 1. Stepping through the code in a debugger
and correlating it with the assembly won't make much sense, because
these materializations are only required for line 4.
This is actually problematic for the VS debugger "set next statement"
feature, which effectively assumes that there are no registers live
across statement boundaries. By sinking the local value code into the
statement and fixing up the source location, we can make that feature
work. This was filed as https://bugs.llvm.org/show_bug.cgi?id=35975 and
https://crbug.com/793819.
This change is obviously not enough to make this feature work reliably
in all cases, but I felt that it was worth doing anyway because it
usually generates smaller, more comprehensible -O0 code. I measured a
0.12% regression in code generation time with LLC on the sqlite3
amalgamation, so I think this is worth doing.
There are some special cases worth calling out in the commit message:
1. local values materialized for phis
2. local values used by no-op casts
3. dead local value code
Local values can be materialized for phis, and this does not show up as
a vreg use in MachineRegisterInfo. In this case, if there are no other
uses, this patch sinks the value to the first terminator, EH label, or
the end of the BB if nothing else exists.
Local values may also be used by no-op casts, which adds the register to
the RegFixups table. Without reversing the RegFixups map direction, we
don't have enough information to sink these instructions.
Lastly, if the local value register has no other uses, we can delete it.
This comes up when fastisel tries two instruction selection approaches
and the first materializes the value but fails and the second succeeds
without using the local value.
Reviewers: aprantl, dblaikie, qcolombet, MatzeB, vsk, echristo
Subscribers: dotdash, chandlerc, hans, sdardis, amccarth, javed.absar, zturner, llvm-commits, hiraditya
Differential Revision: https://reviews.llvm.org/D43093
llvm-svn: 327581
2018-03-15 05:54:21 +08:00
|
|
|
; CHECK-NEXT: movl $8, %eax
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: movq %rdx, %rdi
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
|
[FastISel] Sink local value materializations to first use
Summary:
Local values are constants, global addresses, and stack addresses that
can't be folded into the instruction that uses them. For example, when
storing the address of a global variable into memory, we need to
materialize that address into a register.
FastISel doesn't want to materialize any given local value more than
once, so it generates all local value materialization code at
EmitStartPt, which always dominates the current insertion point. This
allows it to maintain a map of local value registers, and it knows that
the local value area will always dominate the current insertion point.
The downside is that local value instructions are always emitted without
a source location. This is done to prevent jumpy line tables, but it
means that the local value area will be considered part of the previous
statement. Consider this C code:
call1(); // line 1
++global; // line 2
++global; // line 3
call2(&global, &local); // line 4
Today we end up with assembly and line tables like this:
.loc 1 1
callq call1
leaq global(%rip), %rdi
leaq local(%rsp), %rsi
.loc 1 2
addq $1, global(%rip)
.loc 1 3
addq $1, global(%rip)
.loc 1 4
callq call2
The LEA instructions in the local value area have no source location and
are treated as being on line 1. Stepping through the code in a debugger
and correlating it with the assembly won't make much sense, because
these materializations are only required for line 4.
This is actually problematic for the VS debugger "set next statement"
feature, which effectively assumes that there are no registers live
across statement boundaries. By sinking the local value code into the
statement and fixing up the source location, we can make that feature
work. This was filed as https://bugs.llvm.org/show_bug.cgi?id=35975 and
https://crbug.com/793819.
This change is obviously not enough to make this feature work reliably
in all cases, but I felt that it was worth doing anyway because it
usually generates smaller, more comprehensible -O0 code. I measured a
0.12% regression in code generation time with LLC on the sqlite3
amalgamation, so I think this is worth doing.
There are some special cases worth calling out in the commit message:
1. local values materialized for phis
2. local values used by no-op casts
3. dead local value code
Local values can be materialized for phis, and this does not show up as
a vreg use in MachineRegisterInfo. In this case, if there are no other
uses, this patch sinks the value to the first terminator, EH label, or
the end of the BB if nothing else exists.
Local values may also be used by no-op casts, which adds the register to
the RegFixups table. Without reversing the RegFixups map direction, we
don't have enough information to sink these instructions.
Lastly, if the local value register has no other uses, we can delete it.
This comes up when fastisel tries two instruction selection approaches
and the first materializes the value but fails and the second succeeds
without using the local value.
Reviewers: aprantl, dblaikie, qcolombet, MatzeB, vsk, echristo
Subscribers: dotdash, chandlerc, hans, sdardis, amccarth, javed.absar, zturner, llvm-commits, hiraditya
Differential Revision: https://reviews.llvm.org/D43093
llvm-svn: 327581
2018-03-15 05:54:21 +08:00
|
|
|
; CHECK-NEXT: movl %eax, %edx
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
|
|
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
|
|
|
|
; CHECK-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: callq _calc_expected_mask_val
|
2019-05-16 20:50:39 +08:00
|
|
|
; CHECK-NEXT: ## kill: def $eax killed $eax killed $rax
|
|
|
|
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
|
|
|
|
; CHECK-NEXT: movzwl %ax, %esi
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 2-byte Reload
|
2018-01-09 08:50:47 +08:00
|
|
|
; CHECK-NEXT: kmovb %k0, %edi
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx ## 8-byte Reload
|
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
|
2016-09-05 16:26:51 +08:00
|
|
|
; CHECK-NEXT: callq _check_mask16
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: vpmovd2m %xmm0, %k0
|
2018-01-09 08:50:47 +08:00
|
|
|
; CHECK-NEXT: kmovq %k0, %k1
|
|
|
|
; CHECK-NEXT: kmovd %k0, %esi
|
2019-05-16 20:50:39 +08:00
|
|
|
; CHECK-NEXT: ## kill: def $sil killed $sil killed $esi
|
|
|
|
; CHECK-NEXT: movzbl %sil, %edi
|
|
|
|
; CHECK-NEXT: ## kill: def $di killed $di killed $edi
|
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
|
|
|
|
; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
|
|
|
|
; CHECK-NEXT: movq %rcx, %rdi
|
|
|
|
; CHECK-NEXT: movl $4, %r8d
|
|
|
|
; CHECK-NEXT: movl %r8d, %esi
|
|
|
|
; CHECK-NEXT: movl %r8d, %edx
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
|
|
|
|
; CHECK-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: callq _calc_expected_mask_val
|
2019-05-16 20:50:39 +08:00
|
|
|
; CHECK-NEXT: ## kill: def $ax killed $ax killed $rax
|
|
|
|
; CHECK-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %r9w ## 2-byte Reload
|
|
|
|
; CHECK-NEXT: movzwl %r9w, %edi
|
|
|
|
; CHECK-NEXT: movzwl %ax, %esi
|
2018-11-07 14:57:03 +08:00
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx ## 8-byte Reload
|
|
|
|
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: callq _check_mask16
|
2019-05-04 03:06:57 +08:00
|
|
|
; CHECK-NEXT: addq $56, %rsp
|
2017-09-03 02:53:46 +08:00
|
|
|
; CHECK-NEXT: retq
|
2016-09-05 16:26:51 +08:00
|
|
|
%d2 = bitcast <2 x i64> %a to <8 x i16>
|
|
|
|
%m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2)
|
|
|
|
%conv7 = zext i8 %m2 to i16
|
|
|
|
%call9 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 2, i32 8)
|
|
|
|
%conv10 = trunc i64 %call9 to i16
|
|
|
|
%call12 = call i32 @check_mask16(i16 zeroext %conv7, i16 zeroext %conv10, i8* %fname, i8* %arraydecay)
|
|
|
|
%d3 = bitcast <2 x i64> %a to <4 x i32>
|
|
|
|
%m3 = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %d3)
|
|
|
|
%conv14 = zext i8 %m3 to i16
|
|
|
|
%call16 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 4, i32 4)
|
|
|
|
%conv17 = trunc i64 %call16 to i16
|
|
|
|
%call19 = call i32 @check_mask16(i16 zeroext %conv14, i16 zeroext %conv17, i8* %fname, i8* %arraydecay)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Function Attrs: nounwind readnone
|
|
|
|
declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>)
|
|
|
|
|
|
|
|
; Function Attrs: nounwind readnone
|
|
|
|
declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>)
|
|
|
|
|