2014-05-20 03:45:57 +08:00
|
|
|
; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X32 --check-prefix=CHECK
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X64 --check-prefix=CHECK
|
2014-05-17 06:47:49 +08:00
|
|
|
|
|
|
|
define <4 x i32> @blendvb_fallback_v4i32(<4 x i1> %mask, <4 x i32> %x, <4 x i32> %y) {
|
|
|
|
; CHECK-LABEL: @blendvb_fallback_v4i32
|
[x86] Revert r218588, r218589, and r218600. These patches were pursuing
a flawed direction and causing miscompiles. Read on for details.
Fundamentally, the premise of this patch series was to map
VECTOR_SHUFFLE DAG nodes into VSELECT DAG nodes for all blends because
we are going to *have* to lower to VSELECT nodes for some blends to
trigger the instruction selection patterns of variable blend
instructions. This doesn't actually work out so well.
In order to match performance with the existing VECTOR_SHUFFLE
lowering code, we would need to re-slice the blend in order to fit it
into either the integer or floating point blends available on the ISA.
When coming from VECTOR_SHUFFLE (or other vNi1 style VSELECT sources)
this works well because the X86 backend ensures that these types of
operands to VSELECT get sign extended into '-1' and '0' for true and
false, allowing us to re-slice the bits in whatever granularity without
changing semantics.
However, if the VSELECT condition comes from some other source, for
example code lowering vector comparisons, it will likely only have the
required bit set -- the high bit. We can't blindly slice up this style
of VSELECT. Reid found some code using Halide that triggers this and I'm
hopeful to eventually get a test case, but I don't need it to understand
why this is A Bad Idea.
There is another aspect that makes this approach flawed. When in
VECTOR_SHUFFLE form, we have very distilled information that represents
the *constant* blend mask. Converting back to a VSELECT form actually
can lose this information, and so I think now that it is better to treat
this as VECTOR_SHUFFLE until the very last moment and only use VSELECT
nodes for instruction selection purposes.
My plan is to:
1) Clean up and formalize the target pre-legalization DAG combine that
converts a VSELECT with a constant condition operand into
a VECTOR_SHUFFLE.
2) Remove any fancy lowering from VSELECT during *legalization* relying
entirely on the DAG combine to catch cases where we can match to an
immediate-controlled blend instruction.
One additional step that I'm not planning on but would be interested in
others' opinions on: we could add an X86ISD::VSELECT or X86ISD::BLENDV
which encodes a fully legalized VSELECT node. Then it would be easy to
write isel patterns only in terms of this to ensure VECTOR_SHUFFLE
legalization only ever forms the fully legalized construct and we can't
cycle between it and VSELECT combining.
llvm-svn: 218658
2014-09-30 10:52:28 +08:00
|
|
|
; CHECK: vblendvps
|
2014-05-17 06:47:49 +08:00
|
|
|
; CHECK: ret
|
|
|
|
%ret = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %y
|
|
|
|
ret <4 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @blendvb_fallback_v8i32(<8 x i1> %mask, <8 x i32> %x, <8 x i32> %y) {
|
|
|
|
; CHECK-LABEL: @blendvb_fallback_v8i32
|
|
|
|
; CHECK: vblendvps
|
|
|
|
; CHECK: ret
|
|
|
|
%ret = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
|
|
ret <8 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @blendvb_fallback_v8f32(<8 x i1> %mask, <8 x float> %x, <8 x float> %y) {
|
|
|
|
; CHECK-LABEL: @blendvb_fallback_v8f32
|
|
|
|
; CHECK: vblendvps
|
|
|
|
; CHECK: ret
|
|
|
|
%ret = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
|
|
|
|
ret <8 x float> %ret
|
|
|
|
}
|
2014-05-20 03:45:57 +08:00
|
|
|
|
|
|
|
declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
|
|
|
|
|
|
|
|
define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
|
|
; CHECK-LABEL: insertps_from_vector_load:
|
|
|
|
; On X32, account for the argument's move to registers
|
|
|
|
; X32: movl 4(%esp), %eax
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = load <4 x float>* %pb, align 16
|
|
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
;; Use a non-zero CountS for insertps
|
|
|
|
define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
|
|
; CHECK-LABEL: insertps_from_vector_load_offset:
|
|
|
|
; On X32, account for the argument's move to registers
|
|
|
|
; X32: movl 4(%esp), %eax
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
|
|
|
; CHECK: insertps $96, 4(%{{...}}), %
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = load <4 x float>* %pb, align 16
|
|
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
|
|
|
|
; CHECK-LABEL: insertps_from_vector_load_offset_2:
|
|
|
|
; On X32, account for the argument's move to registers
|
|
|
|
; X32: movl 4(%esp), %eax
|
|
|
|
; X32: movl 8(%esp), %ecx
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
; CHECK: vinsertps $-64, 12(%{{...}},%{{...}}), %
|
2014-05-20 03:45:57 +08:00
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
|
|
|
|
%2 = load <4 x float>* %1, align 16
|
|
|
|
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
|
|
|
|
ret <4 x float> %3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
|
|
|
|
; CHECK-LABEL: insertps_from_broadcast_loadf32:
|
|
|
|
; On X32, account for the arguments' move to registers
|
|
|
|
; X32: movl 8(%esp), %eax
|
|
|
|
; X32: movl 4(%esp), %ecx
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = getelementptr inbounds float* %fb, i64 %index
|
|
|
|
%2 = load float* %1, align 4
|
|
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
|
|
ret <4 x float> %7
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
|
|
|
|
; CHECK-LABEL: insertps_from_broadcast_loadv4f32:
|
|
|
|
; On X32, account for the arguments' move to registers
|
|
|
|
; X32: movl 4(%esp), %{{...}}
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = load <4 x float>* %b, align 4
|
|
|
|
%2 = extractelement <4 x float> %1, i32 0
|
|
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
|
|
ret <4 x float> %7
|
|
|
|
}
|
|
|
|
|
|
|
|
;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
|
|
|
|
define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
|
|
|
|
; CHECK-LABEL: insertps_from_broadcast_multiple_use:
|
|
|
|
; On X32, account for the arguments' move to registers
|
|
|
|
; X32: movl 8(%esp), %eax
|
|
|
|
; X32: movl 4(%esp), %ecx
|
|
|
|
; CHECK: vbroadcastss
|
|
|
|
; CHECK-NOT: mov
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK: insertps $48
|
|
|
|
; CHECK: vaddps
|
|
|
|
; CHECK: vaddps
|
|
|
|
; CHECK: vaddps
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%1 = getelementptr inbounds float* %fb, i64 %index
|
|
|
|
%2 = load float* %1, align 4
|
|
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
|
|
%8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
|
|
|
|
%9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
|
|
|
|
%10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
|
|
|
|
%11 = fadd <4 x float> %7, %8
|
|
|
|
%12 = fadd <4 x float> %9, %10
|
|
|
|
%13 = fadd <4 x float> %11, %12
|
|
|
|
ret <4 x float> %13
|
|
|
|
}
|