[PowerPC] Custom lower rotl v1i128 to vector_shuffle.

Summary: A bug is reported in bugzilla-45628, where the swap_with_shift case can’t be matched to a single HW instruction xxswapd as expected.
In fact the case matches the idiom of rotate. We have MatchRotate to handle an ‘or’ of two operands and generate a rot[lr] if the case matches the idiom of rotate. While PPC doesn’t support ROTL v1i128. We can custom lower ROTL v1i128 to the vector_shuffle. The vector_shuffle will be matched to a single HW instruction during the phase of instruction selection.

Reviewed By: steven.zhang

Differential Revision: https://reviews.llvm.org/D81076
This commit is contained in:
Esme-Yi 2020-06-18 01:32:23 +00:00
parent 49279ca160
commit ad6024e29f
3 changed files with 309 additions and 290 deletions

View File

@ -777,6 +777,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
if (!Subtarget.hasP8Altivec())
setOperationAction(ISD::ABS, MVT::v2i64, Expand);
// Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
// With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
if (Subtarget.hasAltivec())
for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
@ -9648,6 +9650,36 @@ SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
}
/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
/// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
/// i.e (or (shl x, C1), (srl x, 128-C1)).
SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
assert(Op.getValueType() == MVT::v1i128 &&
"Only set v1i128 as custom, other type shouldn't reach here!");
SDLoc dl(Op);
SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
unsigned SHLAmt = N1.getConstantOperandVal(0);
if (SHLAmt % 8 == 0) {
SmallVector<int, 16> Mask(16, 0);
std::iota(Mask.begin(), Mask.end(), 0);
std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
if (SDValue Shuffle =
DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
DAG.getUNDEF(MVT::v16i8), Mask))
return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
}
SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
DAG.getConstant(SHLAmt, dl, MVT::i32));
SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
/// is a shuffle we can handle in a single instruction, return it. Otherwise,
/// return the code it can be lowered into. Worst case, it can always be
@ -10929,6 +10961,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::MUL: return LowerMUL(Op, DAG);
case ISD::ABS: return LowerABS(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::ROTL: return LowerROTL(Op, DAG);
// For counter-based loop handling.
case ISD::INTRINSIC_W_CHAIN: return SDValue();

View File

@ -1103,6 +1103,7 @@ namespace llvm {
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;

View File

@ -1,37 +1,37 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s \
; RUN: -check-prefix=CHECK-VSX
; RUN: -check-prefix=P9-VSX
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -mattr=-vsx < %s | FileCheck %s \
; RUN: -check-prefix=CHECK-NOVSX
; RUN: -check-prefix=P9-NOVSX
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s \
; RUN: -check-prefix=P8-VSX
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -mattr=-vsx < %s | FileCheck %s \
; RUN: -check-prefix=P8-NOVSX
define <1 x i128> @rotl_64(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_64:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI0_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v5, v3, 15
; CHECK-VSX-NEXT: vsro v2, v2, v3
; CHECK-VSX-NEXT: vsl v4, v4, v5
; CHECK-VSX-NEXT: vsr v2, v2, v5
; CHECK-VSX-NEXT: xxlor v2, v4, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_64:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: xxswapd v2, v2
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_64:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI0_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v5, v3, 15
; CHECK-NOVSX-NEXT: vsro v2, v2, v3
; CHECK-NOVSX-NEXT: vsl v4, v4, v5
; CHECK-NOVSX-NEXT: vsr v2, v2, v5
; CHECK-NOVSX-NEXT: vor v2, v4, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_64:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 8
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_64:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxswapd v2, v2
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_64:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 8
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 64>
%shr = lshr <1 x i128> %num, <i128 64>
@ -40,39 +40,25 @@ entry:
}
define <1 x i128> @rotl_32(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_32:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI1_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI1_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_32:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: xxsldwi v2, v2, v2, 3
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_32:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI1_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI1_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_32:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 12
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_32:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxsldwi v2, v2, v2, 3
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_32:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 12
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 32>
%shr = lshr <1 x i128> %num, <i128 96>
@ -81,39 +67,25 @@ entry:
}
define <1 x i128> @rotl_96(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_96:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI2_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI2_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_96:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: xxsldwi v2, v2, v2, 1
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_96:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI2_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI2_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_96:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 4
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_96:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxsldwi v2, v2, v2, 1
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_96:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 4
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 96>
%shr = lshr <1 x i128> %num, <i128 32>
@ -122,39 +94,25 @@ entry:
}
define <1 x i128> @rotl_16(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_16:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI3_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI3_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_16:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: vsldoi v2, v2, v2, 14
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_16:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI3_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI3_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_16:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 14
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_16:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: vsldoi v2, v2, v2, 14
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_16:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 14
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 16>
%shr = lshr <1 x i128> %num, <i128 112>
@ -163,39 +121,25 @@ entry:
}
define <1 x i128> @rotl_112(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_112:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI4_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI4_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_112:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: vsldoi v2, v2, v2, 2
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_112:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI4_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI4_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_112:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 2
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_112:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: vsldoi v2, v2, v2, 2
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_112:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 2
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 112>
%shr = lshr <1 x i128> %num, <i128 16>
@ -204,39 +148,25 @@ entry:
}
define <1 x i128> @rotl_8(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_8:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI5_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI5_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_8:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: vsldoi v2, v2, v2, 15
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_8:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI5_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI5_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_8:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 15
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_8:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: vsldoi v2, v2, v2, 15
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_8:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 15
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 8>
%shr = lshr <1 x i128> %num, <i128 120>
@ -245,39 +175,25 @@ entry:
}
define <1 x i128> @rotl_120(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_120:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI6_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI6_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_120:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: vsldoi v2, v2, v2, 1
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_120:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI6_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI6_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_120:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 1
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_120:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: vsldoi v2, v2, v2, 1
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_120:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 1
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 120>
%shr = lshr <1 x i128> %num, <i128 8>
@ -286,39 +202,62 @@ entry:
}
define <1 x i128> @rotl_28(<1 x i128> %num) {
; CHECK-VSX-LABEL: rotl_28:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI7_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI7_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: rotl_28:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: mfvsrld r4, v2
; P9-VSX-NEXT: mfvsrd r3, v2
; P9-VSX-NEXT: rotldi r5, r4, 28
; P9-VSX-NEXT: rldimi r5, r3, 28, 0
; P9-VSX-NEXT: rotldi r3, r3, 28
; P9-VSX-NEXT: rldimi r3, r4, 28, 0
; P9-VSX-NEXT: mtvsrdd v2, r5, r3
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: rotl_28:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI7_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI7_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: rotl_28:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: addi r3, r1, -32
; P9-NOVSX-NEXT: stvx v2, 0, r3
; P9-NOVSX-NEXT: ld r4, -32(r1)
; P9-NOVSX-NEXT: ld r3, -24(r1)
; P9-NOVSX-NEXT: rotldi r5, r4, 28
; P9-NOVSX-NEXT: rldimi r5, r3, 28, 0
; P9-NOVSX-NEXT: rotldi r3, r3, 28
; P9-NOVSX-NEXT: rldimi r3, r4, 28, 0
; P9-NOVSX-NEXT: std r3, -16(r1)
; P9-NOVSX-NEXT: addi r3, r1, -16
; P9-NOVSX-NEXT: std r5, -8(r1)
; P9-NOVSX-NEXT: lvx v2, 0, r3
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: rotl_28:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxswapd vs0, v2
; P8-VSX-NEXT: mfvsrd r3, v2
; P8-VSX-NEXT: rotldi r5, r3, 28
; P8-VSX-NEXT: mffprd r4, f0
; P8-VSX-NEXT: rldimi r5, r4, 28, 0
; P8-VSX-NEXT: rotldi r4, r4, 28
; P8-VSX-NEXT: rldimi r4, r3, 28, 0
; P8-VSX-NEXT: mtfprd f0, r5
; P8-VSX-NEXT: mtfprd f1, r4
; P8-VSX-NEXT: xxmrghd v2, vs1, vs0
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: rotl_28:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: addi r3, r1, -32
; P8-NOVSX-NEXT: stvx v2, 0, r3
; P8-NOVSX-NEXT: ld r3, -24(r1)
; P8-NOVSX-NEXT: ld r4, -32(r1)
; P8-NOVSX-NEXT: rotldi r5, r4, 28
; P8-NOVSX-NEXT: rotldi r6, r3, 28
; P8-NOVSX-NEXT: rldimi r5, r3, 28, 0
; P8-NOVSX-NEXT: rldimi r6, r4, 28, 0
; P8-NOVSX-NEXT: addi r3, r1, -16
; P8-NOVSX-NEXT: std r5, -8(r1)
; P8-NOVSX-NEXT: std r6, -16(r1)
; P8-NOVSX-NEXT: lvx v2, 0, r3
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 28>
%shr = lshr <1 x i128> %num, <i128 100>
@ -327,39 +266,75 @@ entry:
}
define <1 x i128> @NO_rotl(<1 x i128> %num) {
; CHECK-VSX-LABEL: NO_rotl:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI8_0@toc@l
; CHECK-VSX-NEXT: lxvx v3, 0, r3
; CHECK-VSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha
; CHECK-VSX-NEXT: addi r3, r3, .LCPI8_1@toc@l
; CHECK-VSX-NEXT: vslo v4, v2, v3
; CHECK-VSX-NEXT: vspltb v3, v3, 15
; CHECK-VSX-NEXT: vsl v3, v4, v3
; CHECK-VSX-NEXT: lxvx v4, 0, r3
; CHECK-VSX-NEXT: vsro v2, v2, v4
; CHECK-VSX-NEXT: vspltb v4, v4, 15
; CHECK-VSX-NEXT: vsr v2, v2, v4
; CHECK-VSX-NEXT: xxlor v2, v3, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: NO_rotl:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha
; P9-VSX-NEXT: addi r3, r3, .LCPI8_0@toc@l
; P9-VSX-NEXT: lxvx v3, 0, r3
; P9-VSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha
; P9-VSX-NEXT: addi r3, r3, .LCPI8_1@toc@l
; P9-VSX-NEXT: vslo v4, v2, v3
; P9-VSX-NEXT: vspltb v3, v3, 15
; P9-VSX-NEXT: vsl v3, v4, v3
; P9-VSX-NEXT: lxvx v4, 0, r3
; P9-VSX-NEXT: vsro v2, v2, v4
; P9-VSX-NEXT: vspltb v4, v4, 15
; P9-VSX-NEXT: vsr v2, v2, v4
; P9-VSX-NEXT: xxlor v2, v3, v2
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: NO_rotl:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI8_0@toc@l
; CHECK-NOVSX-NEXT: lvx v3, 0, r3
; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha
; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI8_1@toc@l
; CHECK-NOVSX-NEXT: vslo v4, v2, v3
; CHECK-NOVSX-NEXT: vspltb v3, v3, 15
; CHECK-NOVSX-NEXT: vsl v3, v4, v3
; CHECK-NOVSX-NEXT: lvx v4, 0, r3
; CHECK-NOVSX-NEXT: vsro v2, v2, v4
; CHECK-NOVSX-NEXT: vspltb v4, v4, 15
; CHECK-NOVSX-NEXT: vsr v2, v2, v4
; CHECK-NOVSX-NEXT: vor v2, v3, v2
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: NO_rotl:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha
; P9-NOVSX-NEXT: addi r3, r3, .LCPI8_0@toc@l
; P9-NOVSX-NEXT: lvx v3, 0, r3
; P9-NOVSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha
; P9-NOVSX-NEXT: addi r3, r3, .LCPI8_1@toc@l
; P9-NOVSX-NEXT: vslo v4, v2, v3
; P9-NOVSX-NEXT: vspltb v3, v3, 15
; P9-NOVSX-NEXT: vsl v3, v4, v3
; P9-NOVSX-NEXT: lvx v4, 0, r3
; P9-NOVSX-NEXT: vsro v2, v2, v4
; P9-NOVSX-NEXT: vspltb v4, v4, 15
; P9-NOVSX-NEXT: vsr v2, v2, v4
; P9-NOVSX-NEXT: vor v2, v3, v2
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: NO_rotl:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxswapd vs0, v2
; P8-VSX-NEXT: li r3, 0
; P8-VSX-NEXT: mfvsrd r5, v2
; P8-VSX-NEXT: mffprd r4, f0
; P8-VSX-NEXT: mtfprd f0, r3
; P8-VSX-NEXT: rotldi r3, r4, 20
; P8-VSX-NEXT: sldi r4, r4, 20
; P8-VSX-NEXT: rldimi r3, r5, 20, 0
; P8-VSX-NEXT: mtfprd f1, r4
; P8-VSX-NEXT: rldicl r4, r5, 28, 36
; P8-VSX-NEXT: mtfprd f2, r3
; P8-VSX-NEXT: mtfprd f3, r4
; P8-VSX-NEXT: xxmrghd v2, vs2, vs1
; P8-VSX-NEXT: xxmrghd v3, vs0, vs3
; P8-VSX-NEXT: xxlor v2, v2, v3
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: NO_rotl:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha
; P8-NOVSX-NEXT: addis r4, r2, .LCPI8_1@toc@ha
; P8-NOVSX-NEXT: addi r3, r3, .LCPI8_0@toc@l
; P8-NOVSX-NEXT: lvx v3, 0, r3
; P8-NOVSX-NEXT: addi r3, r4, .LCPI8_1@toc@l
; P8-NOVSX-NEXT: lvx v4, 0, r3
; P8-NOVSX-NEXT: vslo v5, v2, v3
; P8-NOVSX-NEXT: vspltb v3, v3, 15
; P8-NOVSX-NEXT: vsro v2, v2, v4
; P8-NOVSX-NEXT: vspltb v4, v4, 15
; P8-NOVSX-NEXT: vsl v3, v5, v3
; P8-NOVSX-NEXT: vsr v2, v2, v4
; P8-NOVSX-NEXT: vor v2, v3, v2
; P8-NOVSX-NEXT: blr
entry:
%shl = shl <1 x i128> %num, <i128 20>
%shr = lshr <1 x i128> %num, <i128 100>
@ -368,15 +343,25 @@ entry:
}
define <1 x i128> @shufflevector(<1 x i128> %num) {
; CHECK-VSX-LABEL: shufflevector:
; CHECK-VSX: # %bb.0: # %entry
; CHECK-VSX-NEXT: xxswapd v2, v2
; CHECK-VSX-NEXT: blr
; P9-VSX-LABEL: shufflevector:
; P9-VSX: # %bb.0: # %entry
; P9-VSX-NEXT: xxswapd v2, v2
; P9-VSX-NEXT: blr
;
; CHECK-NOVSX-LABEL: shufflevector:
; CHECK-NOVSX: # %bb.0: # %entry
; CHECK-NOVSX-NEXT: vsldoi v2, v2, v2, 8
; CHECK-NOVSX-NEXT: blr
; P9-NOVSX-LABEL: shufflevector:
; P9-NOVSX: # %bb.0: # %entry
; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 8
; P9-NOVSX-NEXT: blr
;
; P8-VSX-LABEL: shufflevector:
; P8-VSX: # %bb.0: # %entry
; P8-VSX-NEXT: xxswapd v2, v2
; P8-VSX-NEXT: blr
;
; P8-NOVSX-LABEL: shufflevector:
; P8-NOVSX: # %bb.0: # %entry
; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 8
; P8-NOVSX-NEXT: blr
entry:
%0 = bitcast <1 x i128> %num to <2 x i64>
%vecins2 = shufflevector <2 x i64> %0, <2 x i64> undef, <2 x i32> <i32 1, i32 0>