From 1b6cccba3ec1ab97688c8e0c1da3f8a5a7fa4d17 Mon Sep 17 00:00:00 2001 From: Esme-Yi Date: Thu, 4 Jun 2020 10:09:06 +0000 Subject: [PATCH] [PowerPC][NFC] Testing ROTL of v1i128. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: A bug is reported in bugzilla-45628, where the swap_with_shift case can’t be matched to a single HW instruction xxswapd as expected. In fact the case matches the idiom of rotate, but PPC doesn’t support ROTL v1i128. This is a NFC patch for testing ROTL with v1i128 at master. Reviewed By: steven.zhang Differential Revision: https://reviews.llvm.org/D81073 --- llvm/test/CodeGen/PowerPC/pr45628.ll | 385 +++++++++++++++++++++++++++ 1 file changed, 385 insertions(+) create mode 100644 llvm/test/CodeGen/PowerPC/pr45628.ll diff --git a/llvm/test/CodeGen/PowerPC/pr45628.ll b/llvm/test/CodeGen/PowerPC/pr45628.ll new file mode 100644 index 000000000000..5b3b16a3d159 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/pr45628.ll @@ -0,0 +1,385 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \ +; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s \ +; RUN: -check-prefix=CHECK-VSX +; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \ +; RUN: -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -mattr=-vsx < %s | FileCheck %s \ +; RUN: -check-prefix=CHECK-NOVSX + +define <1 x i128> @rotl_64(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_64: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v5, v3, 15 +; CHECK-VSX-NEXT: vsro v2, v2, v3 +; CHECK-VSX-NEXT: vsl v4, v4, v5 +; CHECK-VSX-NEXT: vsr v2, v2, v5 +; CHECK-VSX-NEXT: xxlor v2, v4, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_64: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v5, v3, 15 +; CHECK-NOVSX-NEXT: vsro v2, v2, v3 +; CHECK-NOVSX-NEXT: vsl v4, v4, v5 +; CHECK-NOVSX-NEXT: vsr v2, v2, v5 +; CHECK-NOVSX-NEXT: vor v2, v4, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_32(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_32: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI1_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_32: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI1_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_96(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_96: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI2_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_96: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI2_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_16(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_16: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI3_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI3_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_16: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI3_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI3_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_112(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_112: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI4_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI4_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_112: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI4_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI4_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_8(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_8: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI5_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI5_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_8: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI5_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI5_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_120(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_120: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI6_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI6_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_120: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI6_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI6_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @rotl_28(<1 x i128> %num) { +; CHECK-VSX-LABEL: rotl_28: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI7_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI7_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: rotl_28: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI7_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI7_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @NO_rotl(<1 x i128> %num) { +; CHECK-VSX-LABEL: NO_rotl: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI8_0@toc@l +; CHECK-VSX-NEXT: lxvx v3, 0, r3 +; CHECK-VSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha +; CHECK-VSX-NEXT: addi r3, r3, .LCPI8_1@toc@l +; CHECK-VSX-NEXT: vslo v4, v2, v3 +; CHECK-VSX-NEXT: vspltb v3, v3, 15 +; CHECK-VSX-NEXT: vsl v3, v4, v3 +; CHECK-VSX-NEXT: lxvx v4, 0, r3 +; CHECK-VSX-NEXT: vsro v2, v2, v4 +; CHECK-VSX-NEXT: vspltb v4, v4, 15 +; CHECK-VSX-NEXT: vsr v2, v2, v4 +; CHECK-VSX-NEXT: xxlor v2, v3, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: NO_rotl: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI8_0@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI8_0@toc@l +; CHECK-NOVSX-NEXT: lvx v3, 0, r3 +; CHECK-NOVSX-NEXT: addis r3, r2, .LCPI8_1@toc@ha +; CHECK-NOVSX-NEXT: addi r3, r3, .LCPI8_1@toc@l +; CHECK-NOVSX-NEXT: vslo v4, v2, v3 +; CHECK-NOVSX-NEXT: vspltb v3, v3, 15 +; CHECK-NOVSX-NEXT: vsl v3, v4, v3 +; CHECK-NOVSX-NEXT: lvx v4, 0, r3 +; CHECK-NOVSX-NEXT: vsro v2, v2, v4 +; CHECK-NOVSX-NEXT: vspltb v4, v4, 15 +; CHECK-NOVSX-NEXT: vsr v2, v2, v4 +; CHECK-NOVSX-NEXT: vor v2, v3, v2 +; CHECK-NOVSX-NEXT: blr +entry: + %shl = shl <1 x i128> %num, + %shr = lshr <1 x i128> %num, + %or = or <1 x i128> %shl, %shr + ret <1 x i128> %or +} + +define <1 x i128> @shufflevector(<1 x i128> %num) { +; CHECK-VSX-LABEL: shufflevector: +; CHECK-VSX: # %bb.0: # %entry +; CHECK-VSX-NEXT: xxswapd v2, v2 +; CHECK-VSX-NEXT: blr +; +; CHECK-NOVSX-LABEL: shufflevector: +; CHECK-NOVSX: # %bb.0: # %entry +; CHECK-NOVSX-NEXT: vsldoi v2, v2, v2, 8 +; CHECK-NOVSX-NEXT: blr +entry: + %0 = bitcast <1 x i128> %num to <2 x i64> + %vecins2 = shufflevector <2 x i64> %0, <2 x i64> undef, <2 x i32> + %1 = bitcast <2 x i64> %vecins2 to <1 x i128> + ret <1 x i128> %1 +}