[ARM] Fix for MVE VREV64

The VREV64 instruction is apparently unpredictable if Qd == Qm, due to the
cross-beat nature of the instruction. This adds an earlyclobber to Qd, which
seems to be the same way we deal with this on other instructions like the
write-back on loads and stores.

Differential Revision: https://reviews.llvm.org/D65502

llvm-svn: 367544
This commit is contained in:
David Green 2019-08-01 11:22:03 +00:00
parent 7d766c393e
commit 1343814fb4
2 changed files with 15 additions and 10 deletions

View File

@ -985,9 +985,9 @@ def MVE_VBIC : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm),
let Inst{0} = 0b0;
}
class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7>
class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7, string cstr="">
: MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), iname,
suffix, "$Qd, $Qm", ""> {
suffix, "$Qd, $Qm", cstr> {
let Inst{28} = 0b1;
let Inst{25-23} = 0b111;
@ -1001,9 +1001,9 @@ class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7>
let Inst{0} = 0b0;
}
def MVE_VREV64_8 : MVE_VREV<"vrev64", "8", 0b00, 0b00>;
def MVE_VREV64_16 : MVE_VREV<"vrev64", "16", 0b01, 0b00>;
def MVE_VREV64_32 : MVE_VREV<"vrev64", "32", 0b10, 0b00>;
def MVE_VREV64_8 : MVE_VREV<"vrev64", "8", 0b00, 0b00, "@earlyclobber $Qd">;
def MVE_VREV64_16 : MVE_VREV<"vrev64", "16", 0b01, 0b00, "@earlyclobber $Qd">;
def MVE_VREV64_32 : MVE_VREV<"vrev64", "32", 0b10, 0b00, "@earlyclobber $Qd">;
def MVE_VREV32_8 : MVE_VREV<"vrev32", "8", 0b00, 0b01>;
def MVE_VREV32_16 : MVE_VREV<"vrev32", "16", 0b01, 0b01>;

View File

@ -42,7 +42,8 @@ entry:
define arm_aapcs_vfpcc <4 x i32> @shuffle5_i32(<4 x i32> %src) {
; CHECK-LABEL: shuffle5_i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vrev64.32 q0, q0
; CHECK-NEXT: vrev64.32 q1, q0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@ -122,7 +123,8 @@ entry:
define arm_aapcs_vfpcc <8 x i16> @shuffle5_i16(<8 x i16> %src) {
; CHECK-LABEL: shuffle5_i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vrev64.16 q0, q0
; CHECK-NEXT: vrev64.16 q1, q0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@ -235,7 +237,8 @@ entry:
define arm_aapcs_vfpcc <16 x i8> @shuffle5_i8(<16 x i8> %src) {
; CHECK-LABEL: shuffle5_i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vrev64.8 q0, q0
; CHECK-NEXT: vrev64.8 q1, q0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
@ -334,7 +337,8 @@ entry:
define arm_aapcs_vfpcc <4 x float> @shuffle5_f32(<4 x float> %src) {
; CHECK-LABEL: shuffle5_f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vrev64.32 q0, q0
; CHECK-NEXT: vrev64.32 q1, q0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@ -405,7 +409,8 @@ entry:
define arm_aapcs_vfpcc <8 x half> @shuffle5_f16(<8 x half> %src) {
; CHECK-LABEL: shuffle5_f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vrev64.16 q0, q0
; CHECK-NEXT: vrev64.16 q1, q0
; CHECK-NEXT: vmov q0, q1
; CHECK-NEXT: bx lr
entry:
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>