forked from OSchip/llvm-project
Rewrite handling of 64-bit palignr intrinsics to be vector shuffles.
Stop multiplying constant by 8 accordingly in the header and change intrinsic definition for what types we expect. Add to existing palignr test to check that we're emitting the correct things. llvm-svn: 101332
This commit is contained in:
parent
eabc9623da
commit
1bbc7086ff
|
@ -245,7 +245,7 @@ BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
|
|||
BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
|
||||
BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
|
||||
BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "")
|
||||
BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "")
|
||||
BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "")
|
||||
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
|
||||
|
||||
BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
|
||||
|
|
|
@ -982,8 +982,38 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
|
|||
return Builder.CreateStore(Ops[1], Ops[0]);
|
||||
}
|
||||
case X86::BI__builtin_ia32_palignr: {
|
||||
Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
|
||||
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
|
||||
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
|
||||
|
||||
// If palignr is shifting the pair of input vectors less than 9 bytes,
|
||||
// emit a shuffle instruction.
|
||||
if (shiftVal <= 8) {
|
||||
const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
|
||||
|
||||
llvm::SmallVector<llvm::Constant*, 8> Indices;
|
||||
for (unsigned i = 0; i != 8; ++i)
|
||||
Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
|
||||
|
||||
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
|
||||
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
|
||||
}
|
||||
|
||||
// If palignr is shifting the pair of input vectors more than 8 but less
|
||||
// than 16 bytes, emit a logical right shift of the destination.
|
||||
if (shiftVal < 16) {
|
||||
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
|
||||
const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
|
||||
const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
|
||||
|
||||
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
|
||||
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
|
||||
|
||||
// create i32 constant
|
||||
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
|
||||
return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
|
||||
}
|
||||
|
||||
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
|
||||
return llvm::Constant::getNullValue(ConvertType(E->getType()));
|
||||
}
|
||||
case X86::BI__builtin_ia32_palignr128: {
|
||||
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
|
||||
|
|
|
@ -67,7 +67,7 @@ _mm_abs_epi32(__m128i a)
|
|||
}
|
||||
|
||||
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
|
||||
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
|
||||
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
|
||||
|
||||
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
||||
_mm_hadd_epi16(__m128i a, __m128i b)
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
// RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
|
||||
|
||||
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
|
||||
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
|
||||
typedef __attribute__((vector_size(8))) int int2;
|
||||
typedef __attribute__((vector_size(16))) int int4;
|
||||
|
||||
// CHECK: palignr
|
||||
int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
|
||||
// CHECK: palignr
|
||||
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
|
||||
// CHECK: ret
|
||||
|
@ -17,3 +13,18 @@ int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
|
|||
int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
|
||||
// CHECK: xor
|
||||
int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
|
||||
|
||||
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
|
||||
typedef __attribute__((vector_size(8))) int int2;
|
||||
|
||||
// CHECK-NOT: palignr
|
||||
int2 align5(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 8); }
|
||||
|
||||
// CHECK: psrlq
|
||||
int2 align6(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 9); }
|
||||
|
||||
// CHECK: xor
|
||||
int2 align7(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 16); }
|
||||
|
||||
// CHECK: palignr
|
||||
int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
|
Loading…
Reference in New Issue