From c9fa5dd61882c2b272d887abc3a33dae5bbbcedc Mon Sep 17 00:00:00 2001 From: Bill Schmidt Date: Fri, 25 Jul 2014 01:55:55 +0000 Subject: [PATCH] [PATCH][PPC64LE] Correct little-endian usage of vmrgh* and vmrgl*. Because the PowerPC vmrgh* and vmrgl* instructions have a built-in big-endian bias, it is necessary to swap their inputs in little-endian mode when using them to implement a vector shuffle. This was previously missed in the vector LE implementation. There was already logic to distinguish between unary and "normal" vmrg* vector shuffles, so this patch extends that logic to use a third option: "swapped" vmrg* vector shuffles that are used for little endian in place of the "normal" ones. I've updated the vec-shuffle-le.ll test to check for the expected register ordering on the generated instructions. This bug was discovered when testing the LE and ELFv2 patches for safety if they were backported to 3.4. A different vectorization decision was made in 3.4 than on mainline trunk, and that exposed the problem. I've verified this fix takes care of that issue. llvm-svn: 213915 --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 65 +++++++++++------ llvm/lib/Target/PowerPC/PPCISelLowering.h | 4 +- llvm/lib/Target/PowerPC/PPCInstrAltivec.td | 80 ++++++++++++++------- llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll | 24 +++++-- 4 files changed, 119 insertions(+), 54 deletions(-) diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 456f978835a4..d596bda49fd0 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -926,31 +926,51 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). +/// The ShuffleKind distinguishes between big-endian merges with two +/// different inputs (0), either-endian merges with two identical inputs (1), +/// and little-endian merges with two different inputs (2). For the latter, +/// the input operands are swapped (see PPCInstrAltivec.td). bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, - bool isUnary, SelectionDAG &DAG) { + unsigned ShuffleKind, SelectionDAG &DAG) { if (DAG.getTarget().getDataLayout()->isLittleEndian()) { - if (!isUnary) + if (ShuffleKind == 1) // unary + return isVMerge(N, UnitSize, 0, 0); + else if (ShuffleKind == 2) // swapped return isVMerge(N, UnitSize, 0, 16); - return isVMerge(N, UnitSize, 0, 0); + else + return false; } else { - if (!isUnary) + if (ShuffleKind == 1) // unary + return isVMerge(N, UnitSize, 8, 8); + else if (ShuffleKind == 0) // normal return isVMerge(N, UnitSize, 8, 24); - return isVMerge(N, UnitSize, 8, 8); + else + return false; } } /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). +/// The ShuffleKind distinguishes between big-endian merges with two +/// different inputs (0), either-endian merges with two identical inputs (1), +/// and little-endian merges with two different inputs (2). For the latter, +/// the input operands are swapped (see PPCInstrAltivec.td). bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, - bool isUnary, SelectionDAG &DAG) { + unsigned ShuffleKind, SelectionDAG &DAG) { if (DAG.getTarget().getDataLayout()->isLittleEndian()) { - if (!isUnary) + if (ShuffleKind == 1) // unary + return isVMerge(N, UnitSize, 8, 8); + else if (ShuffleKind == 2) // swapped return isVMerge(N, UnitSize, 8, 24); - return isVMerge(N, UnitSize, 8, 8); + else + return false; } else { - if (!isUnary) + if (ShuffleKind == 1) // unary + return isVMerge(N, UnitSize, 0, 0); + else if (ShuffleKind == 0) // normal return isVMerge(N, UnitSize, 0, 16); - return isVMerge(N, UnitSize, 0, 0); + else + return false; } } @@ -6021,12 +6041,12 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, PPC::isVPKUWUMShuffleMask(SVOp, true, DAG) || PPC::isVPKUHUMShuffleMask(SVOp, true, DAG) || PPC::isVSLDOIShuffleMask(SVOp, true, DAG) != -1 || - PPC::isVMRGLShuffleMask(SVOp, 1, true, DAG) || - PPC::isVMRGLShuffleMask(SVOp, 2, true, DAG) || - PPC::isVMRGLShuffleMask(SVOp, 4, true, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 1, true, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 2, true, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 4, true, DAG)) { + PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || + PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || + PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG)) { return Op; } } @@ -6034,15 +6054,16 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, // Altivec has a variety of "shuffle immediates" that take two vector inputs // and produce a fixed permutation. If any of these match, do not lower to // VPERM. + unsigned int ShuffleKind = isLittleEndian ? 2 : 0; if (PPC::isVPKUWUMShuffleMask(SVOp, false, DAG) || PPC::isVPKUHUMShuffleMask(SVOp, false, DAG) || PPC::isVSLDOIShuffleMask(SVOp, false, DAG) != -1 || - PPC::isVMRGLShuffleMask(SVOp, 1, false, DAG) || - PPC::isVMRGLShuffleMask(SVOp, 2, false, DAG) || - PPC::isVMRGLShuffleMask(SVOp, 4, false, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 1, false, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 2, false, DAG) || - PPC::isVMRGHShuffleMask(SVOp, 4, false, DAG)) + PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || + PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || + PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || + PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG)) return Op; // Check to see if this is a shuffle of 4-byte values. If so, we can use our diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index ae8c300a4ffa..2ac82bd475b2 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -308,12 +308,12 @@ namespace llvm { /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, - bool isUnary, SelectionDAG &DAG); + unsigned ShuffleKind, SelectionDAG &DAG); /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, - bool isUnary, SelectionDAG &DAG); + unsigned ShuffleKind, SelectionDAG &DAG); /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift /// amount, otherwise return -1. diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index dce46d84e6e1..0924cd8bb1b8 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -44,65 +44,81 @@ def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 1, false, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 1, 0, *CurDAG); }]>; def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 2, false, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 2, 0, *CurDAG); }]>; def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 4, false, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 4, 0, *CurDAG); }]>; def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 1, false, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 1, 0, *CurDAG); }]>; def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 2, false, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 2, 0, *CurDAG); }]>; def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 4, false, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 4, 0, *CurDAG); }]>; def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 1, true, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 1, 1, *CurDAG); }]>; def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle node:$lhs, node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 2, true, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 2, 1, *CurDAG); }]>; def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle node:$lhs, node:$rhs), [{ - return PPC::isVMRGLShuffleMask(cast(N), 4, true, - *CurDAG); + return PPC::isVMRGLShuffleMask(cast(N), 4, 1, *CurDAG); }]>; def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle node:$lhs, node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 1, true, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 1, 1, *CurDAG); }]>; def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle node:$lhs, node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 2, true, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 2, 1, *CurDAG); }]>; def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), (vector_shuffle node:$lhs, node:$rhs), [{ - return PPC::isVMRGHShuffleMask(cast(N), 4, true, - *CurDAG); + return PPC::isVMRGHShuffleMask(cast(N), 4, 1, *CurDAG); +}]>; + + +// These fragments are provided for little-endian, where the inputs must be +// swapped for correct semantics. +def vmrglb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 1, 2, *CurDAG); +}]>; +def vmrglh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 2, 2, *CurDAG); +}]>; +def vmrglw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 4, 2, *CurDAG); +}]>; +def vmrghb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 1, 2, *CurDAG); +}]>; +def vmrghh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 2, 2, *CurDAG); +}]>; +def vmrghw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 4, 2, *CurDAG); }]>; @@ -803,6 +819,22 @@ def:Pat<(vmrghh_unary_shuffle v16i8:$vA, undef), def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef), (VMRGHW $vA, $vA)>; +// Match vmrg*(y,x), i.e., swapped operands. These fragments +// are matched for little-endian, where the inputs must be +// swapped for correct semantics. +def:Pat<(vmrglb_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGLB $vB, $vA)>; +def:Pat<(vmrglh_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGLH $vB, $vA)>; +def:Pat<(vmrglw_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGLW $vB, $vA)>; +def:Pat<(vmrghb_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGHB $vB, $vA)>; +def:Pat<(vmrghh_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGHH $vB, $vA)>; +def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB), + (VMRGHW $vB, $vA)>; + // Logical Operations def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>; diff --git a/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll index 635721c929d5..3ab4cc9045b4 100644 --- a/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll +++ b/llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll @@ -48,7 +48,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrglb +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrglb [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void } @@ -69,7 +71,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrghb +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrghb [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void } @@ -90,7 +94,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrglh +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrglh [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void } @@ -111,7 +117,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrghh +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrghh [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void } @@ -132,7 +140,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrglw +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrglw [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void } @@ -153,7 +163,9 @@ entry: %tmp = load <16 x i8>* %A %tmp2 = load <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> -; CHECK: vmrghw +; CHECK: lvx [[REG1:[0-9]+]] +; CHECK: lvx [[REG2:[0-9]+]] +; CHECK: vmrghw [[REG3:[0-9]+]], [[REG2]], [[REG1]] store <16 x i8> %tmp3, <16 x i8>* %A ret void }