2007-08-21 02:05:56 +08:00
|
|
|
//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-08-21 02:05:56 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Builtin calls as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2010-06-16 06:44:06 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
|
|
|
#include "TargetInfo.h"
|
2007-08-31 12:31:45 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2008-08-11 13:35:13 +08:00
|
|
|
#include "clang/AST/Decl.h"
|
2009-06-14 09:05:48 +08:00
|
|
|
#include "clang/Basic/TargetBuiltins.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2011-07-09 06:45:14 +08:00
|
|
|
|
2007-08-21 02:05:56 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
2007-12-10 05:20:04 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-09-14 07:05:03 +08:00
|
|
|
/// getBuiltinLibFunction - Given a builtin id for a function like
|
|
|
|
/// "__builtin_fabsf", return a Function* for "fabsf".
|
|
|
|
llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
|
|
|
|
unsigned BuiltinID) {
|
|
|
|
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
|
|
|
|
|
|
|
|
// Get the name, skip over the __builtin_ prefix (if necessary).
|
|
|
|
StringRef Name;
|
|
|
|
GlobalDecl D(FD);
|
|
|
|
|
|
|
|
// If the builtin has been declared explicitly with an assembler label,
|
|
|
|
// use the mangled name. This differs from the plain label on platforms
|
|
|
|
// that prefix labels.
|
|
|
|
if (FD->hasAttr<AsmLabelAttr>())
|
|
|
|
Name = getMangledName(D);
|
|
|
|
else
|
|
|
|
Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
|
|
|
|
|
|
|
|
llvm::FunctionType *Ty =
|
|
|
|
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
|
|
|
|
|
|
|
|
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
|
|
|
|
}
|
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
/// Emit the conversions required to turn the given value into an
|
|
|
|
/// integer of the given size.
|
|
|
|
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
|
2011-07-18 12:24:23 +08:00
|
|
|
QualType T, llvm::IntegerType *IntType) {
|
2010-10-28 04:58:56 +08:00
|
|
|
V = CGF.EmitToMemory(V, T);
|
|
|
|
|
|
|
|
if (V->getType()->isPointerTy())
|
|
|
|
return CGF.Builder.CreatePtrToInt(V, IntType);
|
|
|
|
|
|
|
|
assert(V->getType() == IntType);
|
|
|
|
return V;
|
2010-07-18 15:23:17 +08:00
|
|
|
}
|
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
|
2011-07-18 12:24:23 +08:00
|
|
|
QualType T, llvm::Type *ResultType) {
|
2010-10-28 04:58:56 +08:00
|
|
|
V = CGF.EmitFromMemory(V, T);
|
|
|
|
|
|
|
|
if (ResultType->isPointerTy())
|
|
|
|
return CGF.Builder.CreateIntToPtr(V, ResultType);
|
|
|
|
|
|
|
|
assert(V->getType() == ResultType);
|
|
|
|
return V;
|
2010-07-18 15:23:17 +08:00
|
|
|
}
|
|
|
|
|
2009-04-07 08:55:51 +08:00
|
|
|
/// Utility to insert an atomic instruction based on Instrinsic::ID
|
|
|
|
/// and the expression node.
|
2010-03-20 15:04:11 +08:00
|
|
|
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
|
2011-09-07 09:41:24 +08:00
|
|
|
llvm::AtomicRMWInst::BinOp Kind,
|
|
|
|
const CallExpr *E) {
|
2010-10-28 04:58:56 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
assert(E->getArg(0)->getType()->isPointerType());
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(T,
|
|
|
|
E->getArg(0)->getType()->getPointeeType()));
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
|
|
|
|
|
2010-09-22 07:40:48 +08:00
|
|
|
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
2010-10-28 04:58:56 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::IntegerType *IntType =
|
2010-07-18 15:23:17 +08:00
|
|
|
llvm::IntegerType::get(CGF.getLLVMContext(),
|
2010-10-28 04:58:56 +08:00
|
|
|
CGF.getContext().getTypeSize(T));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
|
2010-10-28 04:58:56 +08:00
|
|
|
|
|
|
|
llvm::Value *Args[2];
|
|
|
|
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
|
|
|
|
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ValueType = Args[1]->getType();
|
2010-10-28 04:58:56 +08:00
|
|
|
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
|
|
|
|
|
2011-09-07 09:41:24 +08:00
|
|
|
llvm::Value *Result =
|
|
|
|
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
|
|
|
|
llvm::SequentiallyConsistent);
|
2010-10-28 04:58:56 +08:00
|
|
|
Result = EmitFromInt(CGF, Result, T, ValueType);
|
|
|
|
return RValue::get(Result);
|
2009-04-07 08:55:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Utility to insert an atomic instruction based Instrinsic::ID and
|
2010-10-28 04:58:56 +08:00
|
|
|
/// the expression node, where the return value is the result of the
|
|
|
|
/// operation.
|
2010-05-06 13:35:16 +08:00
|
|
|
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
|
2011-09-07 09:41:24 +08:00
|
|
|
llvm::AtomicRMWInst::BinOp Kind,
|
|
|
|
const CallExpr *E,
|
2009-04-07 08:55:51 +08:00
|
|
|
Instruction::BinaryOps Op) {
|
2010-10-28 04:58:56 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
assert(E->getArg(0)->getType()->isPointerType());
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(T,
|
|
|
|
E->getArg(0)->getType()->getPointeeType()));
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
|
|
|
|
|
2010-09-22 07:40:48 +08:00
|
|
|
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
2010-10-28 04:58:56 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::IntegerType *IntType =
|
2010-07-18 15:23:17 +08:00
|
|
|
llvm::IntegerType::get(CGF.getLLVMContext(),
|
2010-10-28 04:58:56 +08:00
|
|
|
CGF.getContext().getTypeSize(T));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
|
2010-10-28 04:58:56 +08:00
|
|
|
|
|
|
|
llvm::Value *Args[2];
|
|
|
|
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ValueType = Args[1]->getType();
|
2010-10-28 04:58:56 +08:00
|
|
|
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
|
|
|
|
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
|
|
|
|
|
2011-09-07 09:41:24 +08:00
|
|
|
llvm::Value *Result =
|
|
|
|
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
|
|
|
|
llvm::SequentiallyConsistent);
|
2010-10-28 04:58:56 +08:00
|
|
|
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
|
|
|
|
Result = EmitFromInt(CGF, Result, T, ValueType);
|
|
|
|
return RValue::get(Result);
|
2008-05-10 06:40:52 +08:00
|
|
|
}
|
|
|
|
|
2010-05-06 13:35:16 +08:00
|
|
|
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
|
|
|
|
/// which must be a scalar floating point type.
|
|
|
|
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
|
|
|
|
const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
|
|
|
|
assert(ValTyP && "isn't scalar fp type!");
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 13:35:16 +08:00
|
|
|
StringRef FnName;
|
|
|
|
switch (ValTyP->getKind()) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Isn't a scalar fp type!");
|
2010-05-06 13:35:16 +08:00
|
|
|
case BuiltinType::Float: FnName = "fabsf"; break;
|
|
|
|
case BuiltinType::Double: FnName = "fabs"; break;
|
|
|
|
case BuiltinType::LongDouble: FnName = "fabsl"; break;
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 13:35:16 +08:00
|
|
|
// The prototype is something that takes and returns whatever V's type is.
|
2011-07-29 21:56:53 +08:00
|
|
|
llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
|
2011-05-28 22:26:31 +08:00
|
|
|
false);
|
2010-05-06 13:35:16 +08:00
|
|
|
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
|
|
|
|
|
2013-05-14 20:23:08 +08:00
|
|
|
return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
|
2010-05-06 13:35:16 +08:00
|
|
|
}
|
|
|
|
|
2011-09-14 07:05:03 +08:00
|
|
|
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
|
|
|
|
const CallExpr *E, llvm::Value *calleeValue) {
|
2013-10-21 05:29:19 +08:00
|
|
|
return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
|
2011-09-14 07:05:03 +08:00
|
|
|
ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
|
|
|
|
}
|
|
|
|
|
2013-01-13 10:22:39 +08:00
|
|
|
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
|
|
|
|
/// depending on IntrinsicID.
|
|
|
|
///
|
|
|
|
/// \arg CGF The current codegen function.
|
|
|
|
/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
|
|
|
|
/// \arg X The first argument to the llvm.*.with.overflow.*.
|
|
|
|
/// \arg Y The second argument to the llvm.*.with.overflow.*.
|
|
|
|
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
|
|
|
|
/// \returns The result (i.e. sum/product) returned by the intrinsic.
|
|
|
|
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
|
|
|
|
const llvm::Intrinsic::ID IntrinsicID,
|
|
|
|
llvm::Value *X, llvm::Value *Y,
|
|
|
|
llvm::Value *&Carry) {
|
|
|
|
// Make sure we have integers of the same width.
|
|
|
|
assert(X->getType() == Y->getType() &&
|
|
|
|
"Arguments must be the same type. (Did you forget to make sure both "
|
|
|
|
"arguments have the same integer width?)");
|
|
|
|
|
2013-01-13 19:26:44 +08:00
|
|
|
llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
|
2013-01-13 10:22:39 +08:00
|
|
|
llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y);
|
|
|
|
Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
|
|
|
|
return CGF.Builder.CreateExtractValue(Tmp, 0);
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
2009-02-17 06:43:43 +08:00
|
|
|
unsigned BuiltinID, const CallExpr *E) {
|
2008-10-06 14:56:41 +08:00
|
|
|
// See if we can constant fold this builtin. If so, don't emit it at all.
|
2008-12-01 10:31:41 +08:00
|
|
|
Expr::EvalResult Result;
|
2012-01-07 04:03:09 +08:00
|
|
|
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
|
2011-04-26 07:10:07 +08:00
|
|
|
!Result.hasSideEffects()) {
|
2008-12-01 10:31:41 +08:00
|
|
|
if (Result.Val.isInt())
|
2011-02-08 16:22:06 +08:00
|
|
|
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
|
2009-07-25 07:12:58 +08:00
|
|
|
Result.Val.getInt()));
|
2010-10-02 07:43:16 +08:00
|
|
|
if (Result.Val.isFloat())
|
2011-02-08 16:22:06 +08:00
|
|
|
return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
|
|
|
|
Result.Val.getFloat()));
|
2008-10-06 14:09:18 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-10-06 14:56:41 +08:00
|
|
|
switch (BuiltinID) {
|
|
|
|
default: break; // Handle intrinsics and libm functions below.
|
2008-10-06 15:26:43 +08:00
|
|
|
case Builtin::BI__builtin___CFStringMakeConstantString:
|
2010-01-23 10:40:42 +08:00
|
|
|
case Builtin::BI__builtin___NSStringMakeConstantString:
|
2009-04-08 12:48:15 +08:00
|
|
|
return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
|
2008-07-10 01:28:44 +08:00
|
|
|
case Builtin::BI__builtin_stdarg_start:
|
2007-10-13 07:56:29 +08:00
|
|
|
case Builtin::BI__builtin_va_start:
|
|
|
|
case Builtin::BI__builtin_va_end: {
|
2009-02-12 06:25:55 +08:00
|
|
|
Value *ArgValue = EmitVAListRef(E->getArg(0));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DestType = Int8PtrTy;
|
2007-10-13 07:56:29 +08:00
|
|
|
if (ArgValue->getType() != DestType)
|
2009-09-09 23:08:12 +08:00
|
|
|
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
|
2009-07-26 17:28:40 +08:00
|
|
|
ArgValue->getName().data());
|
2007-10-13 07:56:29 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
|
2008-07-10 01:28:44 +08:00
|
|
|
Intrinsic::vaend : Intrinsic::vastart;
|
2007-12-18 08:25:38 +08:00
|
|
|
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
|
2007-10-13 07:56:29 +08:00
|
|
|
}
|
2008-02-10 04:26:43 +08:00
|
|
|
case Builtin::BI__builtin_va_copy: {
|
2009-01-21 01:46:04 +08:00
|
|
|
Value *DstPtr = EmitVAListRef(E->getArg(0));
|
|
|
|
Value *SrcPtr = EmitVAListRef(E->getArg(1));
|
2008-02-10 04:26:43 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Type = Int8PtrTy;
|
2008-02-10 04:26:43 +08:00
|
|
|
|
|
|
|
DstPtr = Builder.CreateBitCast(DstPtr, Type);
|
|
|
|
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
|
2009-09-09 23:08:12 +08:00
|
|
|
return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
|
2008-05-06 08:56:42 +08:00
|
|
|
DstPtr, SrcPtr));
|
2008-02-10 04:26:43 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
case Builtin::BI__builtin_abs:
|
2012-01-18 06:11:30 +08:00
|
|
|
case Builtin::BI__builtin_labs:
|
|
|
|
case Builtin::BI__builtin_llabs: {
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
|
|
|
|
2008-07-23 14:53:34 +08:00
|
|
|
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *CmpResult =
|
|
|
|
Builder.CreateICmpSGE(ArgValue,
|
2009-08-01 04:28:54 +08:00
|
|
|
llvm::Constant::getNullValue(ArgValue->getType()),
|
2008-07-23 14:53:34 +08:00
|
|
|
"abscond");
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *Result =
|
2007-11-21 03:05:17 +08:00
|
|
|
Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-11-21 03:05:17 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2012-08-15 04:09:28 +08:00
|
|
|
case Builtin::BI__builtin_conj:
|
|
|
|
case Builtin::BI__builtin_conjf:
|
|
|
|
case Builtin::BI__builtin_conjl: {
|
|
|
|
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
|
|
|
|
Value *Real = ComplexVal.first;
|
|
|
|
Value *Imag = ComplexVal.second;
|
2012-09-21 08:18:27 +08:00
|
|
|
Value *Zero =
|
|
|
|
Imag->getType()->isFPOrFPVectorTy()
|
2012-08-15 04:09:28 +08:00
|
|
|
? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
|
|
|
|
: llvm::Constant::getNullValue(Imag->getType());
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2012-08-15 04:09:28 +08:00
|
|
|
Imag = Builder.CreateFSub(Zero, Imag, "sub");
|
|
|
|
return RValue::getComplex(std::make_pair(Real, Imag));
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_creal:
|
|
|
|
case Builtin::BI__builtin_crealf:
|
2012-12-19 04:58:04 +08:00
|
|
|
case Builtin::BI__builtin_creall:
|
|
|
|
case Builtin::BIcreal:
|
|
|
|
case Builtin::BIcrealf:
|
|
|
|
case Builtin::BIcreall: {
|
2012-08-15 04:09:28 +08:00
|
|
|
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
|
|
|
|
return RValue::get(ComplexVal.first);
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2012-08-15 04:09:28 +08:00
|
|
|
case Builtin::BI__builtin_cimag:
|
|
|
|
case Builtin::BI__builtin_cimagf:
|
2012-12-19 04:58:04 +08:00
|
|
|
case Builtin::BI__builtin_cimagl:
|
|
|
|
case Builtin::BIcimag:
|
|
|
|
case Builtin::BIcimagf:
|
|
|
|
case Builtin::BIcimagl: {
|
2012-08-15 04:09:28 +08:00
|
|
|
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
|
|
|
|
return RValue::get(ComplexVal.second);
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2012-01-29 02:42:57 +08:00
|
|
|
case Builtin::BI__builtin_ctzs:
|
2008-02-06 15:19:27 +08:00
|
|
|
case Builtin::BI__builtin_ctz:
|
|
|
|
case Builtin::BI__builtin_ctzl:
|
|
|
|
case Builtin::BI__builtin_ctzll: {
|
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
|
2008-02-06 15:19:27 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(E->getType());
|
2013-04-17 06:48:15 +08:00
|
|
|
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
|
2012-01-27 06:14:27 +08:00
|
|
|
Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
|
2008-02-06 15:19:27 +08:00
|
|
|
if (Result->getType() != ResultType)
|
2009-11-16 21:11:21 +08:00
|
|
|
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
|
|
|
|
"cast");
|
2008-02-06 15:19:27 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
2012-01-29 02:42:57 +08:00
|
|
|
case Builtin::BI__builtin_clzs:
|
2008-05-27 23:32:46 +08:00
|
|
|
case Builtin::BI__builtin_clz:
|
|
|
|
case Builtin::BI__builtin_clzl:
|
|
|
|
case Builtin::BI__builtin_clzll: {
|
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
|
2008-05-27 23:32:46 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(E->getType());
|
2013-04-17 06:48:15 +08:00
|
|
|
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
|
2012-01-27 06:14:27 +08:00
|
|
|
Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
|
2008-05-27 23:32:46 +08:00
|
|
|
if (Result->getType() != ResultType)
|
2009-11-16 21:11:21 +08:00
|
|
|
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
|
|
|
|
"cast");
|
2008-05-27 23:32:46 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
2008-07-22 01:19:41 +08:00
|
|
|
case Builtin::BI__builtin_ffs:
|
|
|
|
case Builtin::BI__builtin_ffsl:
|
|
|
|
case Builtin::BI__builtin_ffsll: {
|
|
|
|
// ffs(x) -> x ? cttz(x) + 1 : 0
|
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(E->getType());
|
2011-12-12 12:28:35 +08:00
|
|
|
Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
|
|
|
|
Builder.getTrue()),
|
2011-09-28 05:06:10 +08:00
|
|
|
llvm::ConstantInt::get(ArgType, 1));
|
2009-08-01 04:28:54 +08:00
|
|
|
Value *Zero = llvm::Constant::getNullValue(ArgType);
|
2008-07-22 01:19:41 +08:00
|
|
|
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
|
|
|
|
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
|
|
|
|
if (Result->getType() != ResultType)
|
2009-11-16 21:11:21 +08:00
|
|
|
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
|
|
|
|
"cast");
|
2008-07-22 01:19:41 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_parity:
|
|
|
|
case Builtin::BI__builtin_parityl:
|
|
|
|
case Builtin::BI__builtin_parityll: {
|
|
|
|
// parity(x) -> ctpop(x) & 1
|
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(E->getType());
|
2011-09-28 05:06:10 +08:00
|
|
|
Value *Tmp = Builder.CreateCall(F, ArgValue);
|
|
|
|
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
|
2008-07-22 01:19:41 +08:00
|
|
|
if (Result->getType() != ResultType)
|
2009-11-16 21:11:21 +08:00
|
|
|
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
|
|
|
|
"cast");
|
2008-07-22 01:19:41 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_popcount:
|
|
|
|
case Builtin::BI__builtin_popcountl:
|
|
|
|
case Builtin::BI__builtin_popcountll: {
|
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(E->getType());
|
2011-09-28 05:06:10 +08:00
|
|
|
Value *Result = Builder.CreateCall(F, ArgValue);
|
2008-07-22 01:19:41 +08:00
|
|
|
if (Result->getType() != ResultType)
|
2009-11-16 21:11:21 +08:00
|
|
|
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
|
|
|
|
"cast");
|
2008-07-22 01:19:41 +08:00
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
2010-07-27 07:11:03 +08:00
|
|
|
case Builtin::BI__builtin_expect: {
|
2011-04-26 07:10:07 +08:00
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-09 06:45:14 +08:00
|
|
|
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
|
2011-07-09 06:45:14 +08:00
|
|
|
Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
|
|
|
|
|
|
|
|
Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
|
|
|
|
"expval");
|
|
|
|
return RValue::get(Result);
|
2010-07-27 07:11:03 +08:00
|
|
|
}
|
2012-10-06 22:42:22 +08:00
|
|
|
case Builtin::BI__builtin_bswap16:
|
2007-12-03 05:58:10 +08:00
|
|
|
case Builtin::BI__builtin_bswap32:
|
|
|
|
case Builtin::BI__builtin_bswap64: {
|
2007-12-13 15:34:23 +08:00
|
|
|
Value *ArgValue = EmitScalarExpr(E->getArg(0));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = ArgValue->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F, ArgValue));
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
2008-09-04 05:13:56 +08:00
|
|
|
case Builtin::BI__builtin_object_size: {
|
2012-05-23 12:13:20 +08:00
|
|
|
// We rely on constant folding to deal with expressions with side effects.
|
|
|
|
assert(!E->getArg(0)->HasSideEffects(getContext()) &&
|
|
|
|
"should have been constant folded");
|
|
|
|
|
2009-10-27 07:39:48 +08:00
|
|
|
// We pass this builtin onto the optimizer so that it can
|
|
|
|
// figure out the object size in more complex cases.
|
2011-07-15 01:45:50 +08:00
|
|
|
llvm::Type *ResType = ConvertType(E->getType());
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-23 11:49:37 +08:00
|
|
|
// LLVM only supports 0 and 2, make sure that we pass along that
|
|
|
|
// as a boolean.
|
|
|
|
Value *Ty = EmitScalarExpr(E->getArg(1));
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
|
|
|
|
assert(CI);
|
|
|
|
uint64_t val = CI->getZExtValue();
|
2012-09-21 08:18:27 +08:00
|
|
|
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
|
2013-10-08 03:00:18 +08:00
|
|
|
// FIXME: Get right address space.
|
|
|
|
llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
|
2012-05-22 23:26:48 +08:00
|
|
|
return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
|
2008-09-04 05:13:56 +08:00
|
|
|
}
|
2008-07-22 06:59:13 +08:00
|
|
|
case Builtin::BI__builtin_prefetch: {
|
|
|
|
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
|
|
|
|
// FIXME: Technically these constants should of type 'int', yes?
|
2009-09-09 23:08:12 +08:00
|
|
|
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::ConstantInt::get(Int32Ty, 0);
|
2009-09-09 23:08:12 +08:00
|
|
|
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::ConstantInt::get(Int32Ty, 3);
|
2011-06-14 13:00:30 +08:00
|
|
|
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
|
2011-06-14 13:00:30 +08:00
|
|
|
return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
|
2008-07-22 06:59:13 +08:00
|
|
|
}
|
2012-08-06 06:03:08 +08:00
|
|
|
case Builtin::BI__builtin_readcyclecounter: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
|
|
|
|
return RValue::get(Builder.CreateCall(F));
|
|
|
|
}
|
2008-07-22 06:59:13 +08:00
|
|
|
case Builtin::BI__builtin_trap: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::trap);
|
2008-07-22 06:59:13 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F));
|
2007-12-03 05:58:10 +08:00
|
|
|
}
|
2012-09-26 13:40:16 +08:00
|
|
|
case Builtin::BI__debugbreak: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
|
|
|
|
return RValue::get(Builder.CreateCall(F));
|
|
|
|
}
|
2009-09-21 11:09:59 +08:00
|
|
|
case Builtin::BI__builtin_unreachable: {
|
2013-01-18 19:30:38 +08:00
|
|
|
if (SanOpts->Unreachable)
|
2012-10-10 03:52:38 +08:00
|
|
|
EmitCheck(Builder.getFalse(), "builtin_unreachable",
|
|
|
|
EmitCheckSourceLocation(E->getExprLoc()),
|
2013-01-13 03:30:44 +08:00
|
|
|
ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
|
2011-01-12 11:41:02 +08:00
|
|
|
else
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
// We do need to preserve an insertion point.
|
2011-02-08 16:22:06 +08:00
|
|
|
EmitBlock(createBasicBlock("unreachable.cont"));
|
2011-01-12 11:41:02 +08:00
|
|
|
|
|
|
|
return RValue::get(0);
|
2009-09-21 11:09:59 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2008-07-22 02:44:41 +08:00
|
|
|
case Builtin::BI__builtin_powi:
|
|
|
|
case Builtin::BI__builtin_powif:
|
|
|
|
case Builtin::BI__builtin_powil: {
|
|
|
|
Value *Base = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Exponent = EmitScalarExpr(E->getArg(1));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = Base->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateCall2(F, Base, Exponent));
|
2008-07-22 02:44:41 +08:00
|
|
|
}
|
|
|
|
|
2007-12-20 08:44:32 +08:00
|
|
|
case Builtin::BI__builtin_isgreater:
|
|
|
|
case Builtin::BI__builtin_isgreaterequal:
|
|
|
|
case Builtin::BI__builtin_isless:
|
|
|
|
case Builtin::BI__builtin_islessequal:
|
|
|
|
case Builtin::BI__builtin_islessgreater:
|
|
|
|
case Builtin::BI__builtin_isunordered: {
|
|
|
|
// Ordered comparisons: we know the arguments to these are matching scalar
|
|
|
|
// floating point values.
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *LHS = EmitScalarExpr(E->getArg(0));
|
2007-12-20 08:44:32 +08:00
|
|
|
Value *RHS = EmitScalarExpr(E->getArg(1));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-12-20 08:44:32 +08:00
|
|
|
switch (BuiltinID) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Unknown ordered comparison");
|
2007-12-20 08:44:32 +08:00
|
|
|
case Builtin::BI__builtin_isgreater:
|
|
|
|
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_isgreaterequal:
|
|
|
|
LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_isless:
|
|
|
|
LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_islessequal:
|
|
|
|
LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_islessgreater:
|
|
|
|
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
|
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
case Builtin::BI__builtin_isunordered:
|
2007-12-20 08:44:32 +08:00
|
|
|
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// ZExt bool to int type.
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
|
2007-12-20 08:44:32 +08:00
|
|
|
}
|
2009-09-01 12:19:44 +08:00
|
|
|
case Builtin::BI__builtin_isnan: {
|
|
|
|
Value *V = EmitScalarExpr(E->getArg(0));
|
|
|
|
V = Builder.CreateFCmpUNO(V, V, "cmp");
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
|
2009-09-01 12:19:44 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 13:35:16 +08:00
|
|
|
case Builtin::BI__builtin_isinf: {
|
|
|
|
// isinf(x) --> fabs(x) == infinity
|
|
|
|
Value *V = EmitScalarExpr(E->getArg(0));
|
|
|
|
V = EmitFAbs(*this, V, E->getArg(0)->getType());
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 13:35:16 +08:00
|
|
|
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
|
2010-05-06 13:35:16 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 14:13:53 +08:00
|
|
|
// TODO: BI__builtin_isinf_sign
|
|
|
|
// isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
|
2010-05-19 19:24:26 +08:00
|
|
|
|
|
|
|
case Builtin::BI__builtin_isnormal: {
|
|
|
|
// isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
|
|
|
|
Value *V = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
|
|
|
|
|
|
|
|
Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
|
|
|
|
Value *IsLessThanInf =
|
|
|
|
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
|
|
|
|
APFloat Smallest = APFloat::getSmallestNormalized(
|
|
|
|
getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
|
|
|
|
Value *IsNormal =
|
|
|
|
Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
|
|
|
|
"isnormal");
|
|
|
|
V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
|
|
|
|
V = Builder.CreateAnd(V, IsNormal, "and");
|
|
|
|
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
|
|
|
|
}
|
|
|
|
|
2010-05-06 14:04:13 +08:00
|
|
|
case Builtin::BI__builtin_isfinite: {
|
2011-09-10 06:46:39 +08:00
|
|
|
// isfinite(x) --> x == x && fabs(x) != infinity;
|
2010-05-06 14:04:13 +08:00
|
|
|
Value *V = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 14:04:13 +08:00
|
|
|
Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
|
|
|
|
Value *IsNotInf =
|
|
|
|
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-05-06 14:04:13 +08:00
|
|
|
V = Builder.CreateAnd(Eq, IsNotInf, "and");
|
|
|
|
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
|
|
|
|
}
|
2010-06-14 18:30:41 +08:00
|
|
|
|
|
|
|
case Builtin::BI__builtin_fpclassify: {
|
|
|
|
Value *V = EmitScalarExpr(E->getArg(5));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
|
2010-06-14 18:30:41 +08:00
|
|
|
|
|
|
|
// Create Result
|
|
|
|
BasicBlock *Begin = Builder.GetInsertBlock();
|
|
|
|
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
|
|
|
|
Builder.SetInsertPoint(End);
|
|
|
|
PHINode *Result =
|
2011-03-30 19:28:58 +08:00
|
|
|
Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
|
2010-06-14 18:30:41 +08:00
|
|
|
"fpclassify_result");
|
|
|
|
|
|
|
|
// if (V==0) return FP_ZERO
|
|
|
|
Builder.SetInsertPoint(Begin);
|
|
|
|
Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
|
|
|
|
"iszero");
|
|
|
|
Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
|
|
|
|
BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
|
|
|
|
Builder.CreateCondBr(IsZero, End, NotZero);
|
|
|
|
Result->addIncoming(ZeroLiteral, Begin);
|
|
|
|
|
|
|
|
// if (V != V) return FP_NAN
|
|
|
|
Builder.SetInsertPoint(NotZero);
|
|
|
|
Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
|
|
|
|
Value *NanLiteral = EmitScalarExpr(E->getArg(0));
|
|
|
|
BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
|
|
|
|
Builder.CreateCondBr(IsNan, End, NotNan);
|
|
|
|
Result->addIncoming(NanLiteral, NotZero);
|
|
|
|
|
|
|
|
// if (fabs(V) == infinity) return FP_INFINITY
|
|
|
|
Builder.SetInsertPoint(NotNan);
|
|
|
|
Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
|
|
|
|
Value *IsInf =
|
|
|
|
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
|
|
|
|
"isinf");
|
|
|
|
Value *InfLiteral = EmitScalarExpr(E->getArg(1));
|
|
|
|
BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
|
|
|
|
Builder.CreateCondBr(IsInf, End, NotInf);
|
|
|
|
Result->addIncoming(InfLiteral, NotNan);
|
|
|
|
|
|
|
|
// if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
|
|
|
|
Builder.SetInsertPoint(NotInf);
|
|
|
|
APFloat Smallest = APFloat::getSmallestNormalized(
|
|
|
|
getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
|
|
|
|
Value *IsNormal =
|
|
|
|
Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
|
|
|
|
"isnormal");
|
|
|
|
Value *NormalResult =
|
|
|
|
Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
|
|
|
|
EmitScalarExpr(E->getArg(3)));
|
|
|
|
Builder.CreateBr(End);
|
|
|
|
Result->addIncoming(NormalResult, NotInf);
|
|
|
|
|
|
|
|
// return Result
|
|
|
|
Builder.SetInsertPoint(End);
|
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-06-02 15:10:30 +08:00
|
|
|
case Builtin::BIalloca:
|
2008-06-17 01:15:14 +08:00
|
|
|
case Builtin::BI__builtin_alloca: {
|
|
|
|
Value *Size = EmitScalarExpr(E->getArg(0));
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
|
2008-07-22 08:26:45 +08:00
|
|
|
}
|
2010-01-24 03:00:10 +08:00
|
|
|
case Builtin::BIbzero:
|
2008-07-22 08:26:45 +08:00
|
|
|
case Builtin::BI__builtin_bzero: {
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
2010-04-04 11:10:52 +08:00
|
|
|
Value *SizeVal = EmitScalarExpr(E->getArg(1));
|
2012-08-23 11:10:17 +08:00
|
|
|
Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
|
|
|
|
Dest.second, false);
|
|
|
|
return RValue::get(Dest.first);
|
2008-06-17 01:15:14 +08:00
|
|
|
}
|
2009-12-17 08:14:28 +08:00
|
|
|
case Builtin::BImemcpy:
|
2008-05-20 07:27:48 +08:00
|
|
|
case Builtin::BI__builtin_memcpy: {
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(1));
|
2010-04-04 11:10:52 +08:00
|
|
|
Value *SizeVal = EmitScalarExpr(E->getArg(2));
|
2012-08-23 11:10:17 +08:00
|
|
|
unsigned Align = std::min(Dest.second, Src.second);
|
|
|
|
Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
|
|
|
|
return RValue::get(Dest.first);
|
2008-07-22 08:26:45 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-04-17 08:40:24 +08:00
|
|
|
case Builtin::BI__builtin___memcpy_chk: {
|
2012-09-27 18:16:10 +08:00
|
|
|
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
|
2011-10-11 02:28:20 +08:00
|
|
|
llvm::APSInt Size, DstSize;
|
|
|
|
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
|
|
|
|
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
|
2011-04-17 08:40:24 +08:00
|
|
|
break;
|
|
|
|
if (Size.ugt(DstSize))
|
|
|
|
break;
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(1));
|
2011-04-17 08:40:24 +08:00
|
|
|
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
|
2012-08-23 11:10:17 +08:00
|
|
|
unsigned Align = std::min(Dest.second, Src.second);
|
|
|
|
Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
|
|
|
|
return RValue::get(Dest.first);
|
2011-04-17 08:40:24 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-06-17 00:22:04 +08:00
|
|
|
case Builtin::BI__builtin_objc_memmove_collectable: {
|
2010-06-16 06:44:06 +08:00
|
|
|
Value *Address = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
|
|
|
|
Value *SizeVal = EmitScalarExpr(E->getArg(2));
|
2012-09-21 08:18:27 +08:00
|
|
|
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
|
2010-06-16 06:44:06 +08:00
|
|
|
Address, SrcAddr, SizeVal);
|
|
|
|
return RValue::get(Address);
|
|
|
|
}
|
2011-04-17 08:40:24 +08:00
|
|
|
|
|
|
|
case Builtin::BI__builtin___memmove_chk: {
|
2012-09-27 18:16:10 +08:00
|
|
|
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
|
2011-10-11 02:28:20 +08:00
|
|
|
llvm::APSInt Size, DstSize;
|
|
|
|
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
|
|
|
|
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
|
2011-04-17 08:40:24 +08:00
|
|
|
break;
|
|
|
|
if (Size.ugt(DstSize))
|
|
|
|
break;
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(1));
|
2011-04-17 08:40:24 +08:00
|
|
|
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
|
2012-08-23 11:10:17 +08:00
|
|
|
unsigned Align = std::min(Dest.second, Src.second);
|
|
|
|
Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
|
|
|
|
return RValue::get(Dest.first);
|
2011-04-17 08:40:24 +08:00
|
|
|
}
|
|
|
|
|
2009-12-17 08:14:28 +08:00
|
|
|
case Builtin::BImemmove:
|
2008-07-22 08:26:45 +08:00
|
|
|
case Builtin::BI__builtin_memmove: {
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(1));
|
2010-04-04 11:10:52 +08:00
|
|
|
Value *SizeVal = EmitScalarExpr(E->getArg(2));
|
2012-08-23 11:10:17 +08:00
|
|
|
unsigned Align = std::min(Dest.second, Src.second);
|
|
|
|
Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
|
|
|
|
return RValue::get(Dest.first);
|
2008-07-22 08:26:45 +08:00
|
|
|
}
|
2009-12-17 08:14:28 +08:00
|
|
|
case Builtin::BImemset:
|
2008-07-22 08:26:45 +08:00
|
|
|
case Builtin::BI__builtin_memset: {
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
2010-12-30 08:13:21 +08:00
|
|
|
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
|
|
|
|
Builder.getInt8Ty());
|
2010-04-04 11:10:52 +08:00
|
|
|
Value *SizeVal = EmitScalarExpr(E->getArg(2));
|
2012-08-23 11:10:17 +08:00
|
|
|
Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
|
|
|
|
return RValue::get(Dest.first);
|
2008-05-20 07:27:48 +08:00
|
|
|
}
|
2011-04-17 08:40:24 +08:00
|
|
|
case Builtin::BI__builtin___memset_chk: {
|
2012-09-27 18:16:10 +08:00
|
|
|
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
|
2011-10-11 02:28:20 +08:00
|
|
|
llvm::APSInt Size, DstSize;
|
|
|
|
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
|
|
|
|
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
|
2011-04-17 08:40:24 +08:00
|
|
|
break;
|
|
|
|
if (Size.ugt(DstSize))
|
|
|
|
break;
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Dest =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
2011-04-17 08:40:24 +08:00
|
|
|
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
|
|
|
|
Builder.getInt8Ty());
|
|
|
|
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
|
2012-08-23 11:10:17 +08:00
|
|
|
Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
|
|
|
|
return RValue::get(Dest.first);
|
2011-04-17 08:40:24 +08:00
|
|
|
}
|
2010-03-03 18:30:05 +08:00
|
|
|
case Builtin::BI__builtin_dwarf_cfa: {
|
|
|
|
// The offset in bytes from the first argument to the CFA.
|
|
|
|
//
|
|
|
|
// Why on earth is this in the frontend? Is there any reason at
|
|
|
|
// all that the backend can't reasonably determine this while
|
|
|
|
// lowering llvm.eh.dwarf.cfa()?
|
|
|
|
//
|
|
|
|
// TODO: If there's a satisfactory reason, add a target hook for
|
|
|
|
// this instead of hard-coding 0, which is correct for most targets.
|
|
|
|
int32_t Offset = 0;
|
|
|
|
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
|
2012-09-21 08:18:27 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F,
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::ConstantInt::get(Int32Ty, Offset)));
|
2010-03-03 18:30:05 +08:00
|
|
|
}
|
2008-05-20 16:59:34 +08:00
|
|
|
case Builtin::BI__builtin_return_address: {
|
2009-12-27 22:27:22 +08:00
|
|
|
Value *Depth = EmitScalarExpr(E->getArg(0));
|
2011-09-28 05:06:10 +08:00
|
|
|
Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
|
2009-12-27 22:27:22 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F, Depth));
|
2008-05-20 16:59:34 +08:00
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_frame_address: {
|
2009-12-27 22:27:22 +08:00
|
|
|
Value *Depth = EmitScalarExpr(E->getArg(0));
|
2011-09-28 05:06:10 +08:00
|
|
|
Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
|
2009-12-27 22:27:22 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F, Depth));
|
2008-05-20 16:59:34 +08:00
|
|
|
}
|
2009-05-04 03:23:23 +08:00
|
|
|
case Builtin::BI__builtin_extract_return_addr: {
|
2010-03-03 12:15:11 +08:00
|
|
|
Value *Address = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
|
|
|
|
return RValue::get(Result);
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_frob_return_addr: {
|
|
|
|
Value *Address = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
|
|
|
|
return RValue::get(Result);
|
2009-05-04 03:23:23 +08:00
|
|
|
}
|
2010-03-06 08:35:14 +08:00
|
|
|
case Builtin::BI__builtin_dwarf_sp_column: {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *Ty
|
2010-03-06 08:35:14 +08:00
|
|
|
= cast<llvm::IntegerType>(ConvertType(E->getType()));
|
|
|
|
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
|
|
|
|
if (Column == -1) {
|
|
|
|
CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
|
|
|
|
return RValue::get(llvm::UndefValue::get(Ty));
|
|
|
|
}
|
|
|
|
return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_init_dwarf_reg_size_table: {
|
|
|
|
Value *Address = EmitScalarExpr(E->getArg(0));
|
|
|
|
if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
|
|
|
|
CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
|
|
|
|
return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
|
|
|
|
}
|
2010-03-03 13:38:58 +08:00
|
|
|
case Builtin::BI__builtin_eh_return: {
|
|
|
|
Value *Int = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Ptr = EmitScalarExpr(E->getArg(1));
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
|
2010-03-03 13:38:58 +08:00
|
|
|
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
|
|
|
|
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
|
|
|
|
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
|
|
|
|
? Intrinsic::eh_return_i32
|
2011-07-15 01:45:50 +08:00
|
|
|
: Intrinsic::eh_return_i64);
|
2010-03-03 13:38:58 +08:00
|
|
|
Builder.CreateCall2(F, Int, Ptr);
|
2011-01-12 11:41:02 +08:00
|
|
|
Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
// We do need to preserve an insertion point.
|
2011-02-08 16:22:06 +08:00
|
|
|
EmitBlock(createBasicBlock("builtin_eh_return.cont"));
|
2011-01-12 11:41:02 +08:00
|
|
|
|
|
|
|
return RValue::get(0);
|
2010-03-03 13:38:58 +08:00
|
|
|
}
|
2009-06-02 17:37:50 +08:00
|
|
|
case Builtin::BI__builtin_unwind_init: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
|
2009-06-02 17:37:50 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F));
|
|
|
|
}
|
2010-03-02 10:31:24 +08:00
|
|
|
case Builtin::BI__builtin_extend_pointer: {
|
|
|
|
// Extends a pointer to the size of an _Unwind_Word, which is
|
2010-03-02 11:50:12 +08:00
|
|
|
// uint64_t on all platforms. Generally this gets poked into a
|
|
|
|
// register and eventually used as an address, so if the
|
|
|
|
// addressing registers are wider than pointers and the platform
|
|
|
|
// doesn't implicitly ignore high-order bits when doing
|
|
|
|
// addressing, we need to make sure we zext / sext based on
|
|
|
|
// the platform's expectations.
|
2010-03-02 10:31:24 +08:00
|
|
|
//
|
|
|
|
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
|
2010-03-02 11:50:12 +08:00
|
|
|
|
|
|
|
// Cast the pointer to intptr_t.
|
2010-03-02 10:31:24 +08:00
|
|
|
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
2010-03-02 11:50:12 +08:00
|
|
|
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
|
|
|
|
|
|
|
|
// If that's 64 bits, we're done.
|
|
|
|
if (IntPtrTy->getBitWidth() == 64)
|
|
|
|
return RValue::get(Result);
|
|
|
|
|
|
|
|
// Otherwise, ask the codegen data what to do.
|
2010-03-03 12:15:11 +08:00
|
|
|
if (getTargetHooks().extendPointerWithSExt())
|
2010-03-02 11:50:12 +08:00
|
|
|
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
|
|
|
|
else
|
|
|
|
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
|
2010-03-02 10:31:24 +08:00
|
|
|
}
|
2009-06-02 17:37:50 +08:00
|
|
|
case Builtin::BI__builtin_setjmp: {
|
2010-05-28 02:47:06 +08:00
|
|
|
// Buffer is a void**.
|
2009-06-02 17:37:50 +08:00
|
|
|
Value *Buf = EmitScalarExpr(E->getArg(0));
|
2010-05-28 02:47:06 +08:00
|
|
|
|
|
|
|
// Store the frame pointer to the setjmp buffer.
|
2009-06-02 17:37:50 +08:00
|
|
|
Value *FrameAddr =
|
2010-05-28 02:47:06 +08:00
|
|
|
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
|
2010-06-27 15:15:29 +08:00
|
|
|
ConstantInt::get(Int32Ty, 0));
|
2009-06-02 17:37:50 +08:00
|
|
|
Builder.CreateStore(FrameAddr, Buf);
|
2010-05-28 02:47:06 +08:00
|
|
|
|
2010-05-28 07:54:20 +08:00
|
|
|
// Store the stack pointer to the setjmp buffer.
|
|
|
|
Value *StackAddr =
|
|
|
|
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
|
|
|
|
Value *StackSaveSlot =
|
2010-06-27 15:15:29 +08:00
|
|
|
Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
|
2010-05-28 07:54:20 +08:00
|
|
|
Builder.CreateStore(StackAddr, StackSaveSlot);
|
|
|
|
|
2010-05-28 02:47:06 +08:00
|
|
|
// Call LLVM's EH setjmp, which is lightweight.
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
|
2011-02-08 16:22:06 +08:00
|
|
|
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
|
2009-06-02 17:37:50 +08:00
|
|
|
return RValue::get(Builder.CreateCall(F, Buf));
|
|
|
|
}
|
|
|
|
case Builtin::BI__builtin_longjmp: {
|
|
|
|
Value *Buf = EmitScalarExpr(E->getArg(0));
|
2011-02-08 16:22:06 +08:00
|
|
|
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
|
2010-05-28 02:47:06 +08:00
|
|
|
|
|
|
|
// Call LLVM's EH longjmp, which is lightweight.
|
|
|
|
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
|
|
|
|
|
2011-01-12 11:41:02 +08:00
|
|
|
// longjmp doesn't return; mark this as unreachable.
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
// We do need to preserve an insertion point.
|
2011-02-08 16:22:06 +08:00
|
|
|
EmitBlock(createBasicBlock("longjmp.cont"));
|
2011-01-12 11:41:02 +08:00
|
|
|
|
|
|
|
return RValue::get(0);
|
2009-06-02 17:37:50 +08:00
|
|
|
}
|
2008-05-10 06:40:52 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_add:
|
|
|
|
case Builtin::BI__sync_fetch_and_sub:
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_or:
|
|
|
|
case Builtin::BI__sync_fetch_and_and:
|
|
|
|
case Builtin::BI__sync_fetch_and_xor:
|
|
|
|
case Builtin::BI__sync_add_and_fetch:
|
|
|
|
case Builtin::BI__sync_sub_and_fetch:
|
|
|
|
case Builtin::BI__sync_and_and_fetch:
|
|
|
|
case Builtin::BI__sync_or_and_fetch:
|
|
|
|
case Builtin::BI__sync_xor_and_fetch:
|
|
|
|
case Builtin::BI__sync_val_compare_and_swap:
|
|
|
|
case Builtin::BI__sync_bool_compare_and_swap:
|
|
|
|
case Builtin::BI__sync_lock_test_and_set:
|
|
|
|
case Builtin::BI__sync_lock_release:
|
2011-04-09 11:57:26 +08:00
|
|
|
case Builtin::BI__sync_swap:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Shouldn't make it through sema");
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_add_1:
|
|
|
|
case Builtin::BI__sync_fetch_and_add_2:
|
|
|
|
case Builtin::BI__sync_fetch_and_add_4:
|
|
|
|
case Builtin::BI__sync_fetch_and_add_8:
|
|
|
|
case Builtin::BI__sync_fetch_and_add_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_sub_1:
|
|
|
|
case Builtin::BI__sync_fetch_and_sub_2:
|
|
|
|
case Builtin::BI__sync_fetch_and_sub_4:
|
|
|
|
case Builtin::BI__sync_fetch_and_sub_8:
|
|
|
|
case Builtin::BI__sync_fetch_and_sub_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_or_1:
|
|
|
|
case Builtin::BI__sync_fetch_and_or_2:
|
|
|
|
case Builtin::BI__sync_fetch_and_or_4:
|
|
|
|
case Builtin::BI__sync_fetch_and_or_8:
|
|
|
|
case Builtin::BI__sync_fetch_and_or_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_and_1:
|
|
|
|
case Builtin::BI__sync_fetch_and_and_2:
|
|
|
|
case Builtin::BI__sync_fetch_and_and_4:
|
|
|
|
case Builtin::BI__sync_fetch_and_and_8:
|
|
|
|
case Builtin::BI__sync_fetch_and_and_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_xor_1:
|
|
|
|
case Builtin::BI__sync_fetch_and_xor_2:
|
|
|
|
case Builtin::BI__sync_fetch_and_xor_4:
|
|
|
|
case Builtin::BI__sync_fetch_and_xor_8:
|
|
|
|
case Builtin::BI__sync_fetch_and_xor_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
// Clang extensions: not overloaded yet.
|
2008-05-10 06:40:52 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_min:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
|
2008-05-10 06:40:52 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_max:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
|
2008-05-10 06:40:52 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_umin:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
|
2008-05-10 06:40:52 +08:00
|
|
|
case Builtin::BI__sync_fetch_and_umax:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
|
2009-04-07 08:55:51 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_add_and_fetch_1:
|
|
|
|
case Builtin::BI__sync_add_and_fetch_2:
|
|
|
|
case Builtin::BI__sync_add_and_fetch_4:
|
|
|
|
case Builtin::BI__sync_add_and_fetch_8:
|
|
|
|
case Builtin::BI__sync_add_and_fetch_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
|
2009-04-07 08:55:51 +08:00
|
|
|
llvm::Instruction::Add);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_sub_and_fetch_1:
|
|
|
|
case Builtin::BI__sync_sub_and_fetch_2:
|
|
|
|
case Builtin::BI__sync_sub_and_fetch_4:
|
|
|
|
case Builtin::BI__sync_sub_and_fetch_8:
|
|
|
|
case Builtin::BI__sync_sub_and_fetch_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
|
2009-04-07 08:55:51 +08:00
|
|
|
llvm::Instruction::Sub);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_and_and_fetch_1:
|
|
|
|
case Builtin::BI__sync_and_and_fetch_2:
|
|
|
|
case Builtin::BI__sync_and_and_fetch_4:
|
|
|
|
case Builtin::BI__sync_and_and_fetch_8:
|
|
|
|
case Builtin::BI__sync_and_and_fetch_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
|
2009-04-07 08:55:51 +08:00
|
|
|
llvm::Instruction::And);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_or_and_fetch_1:
|
|
|
|
case Builtin::BI__sync_or_and_fetch_2:
|
|
|
|
case Builtin::BI__sync_or_and_fetch_4:
|
|
|
|
case Builtin::BI__sync_or_and_fetch_8:
|
|
|
|
case Builtin::BI__sync_or_and_fetch_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
|
2009-04-07 08:55:51 +08:00
|
|
|
llvm::Instruction::Or);
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_xor_and_fetch_1:
|
|
|
|
case Builtin::BI__sync_xor_and_fetch_2:
|
|
|
|
case Builtin::BI__sync_xor_and_fetch_4:
|
|
|
|
case Builtin::BI__sync_xor_and_fetch_8:
|
|
|
|
case Builtin::BI__sync_xor_and_fetch_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
|
2009-04-07 08:55:51 +08:00
|
|
|
llvm::Instruction::Xor);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_val_compare_and_swap_1:
|
|
|
|
case Builtin::BI__sync_val_compare_and_swap_2:
|
|
|
|
case Builtin::BI__sync_val_compare_and_swap_4:
|
|
|
|
case Builtin::BI__sync_val_compare_and_swap_8:
|
2010-03-20 15:04:11 +08:00
|
|
|
case Builtin::BI__sync_val_compare_and_swap_16: {
|
2010-10-28 04:58:56 +08:00
|
|
|
QualType T = E->getType();
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::IntegerType *IntType =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::IntegerType::get(getLLVMContext(),
|
|
|
|
getContext().getTypeSize(T));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
|
2010-07-18 15:23:17 +08:00
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
Value *Args[3];
|
|
|
|
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
|
2011-02-08 16:22:06 +08:00
|
|
|
Args[1] = EmitScalarExpr(E->getArg(1));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ValueType = Args[1]->getType();
|
2011-02-08 16:22:06 +08:00
|
|
|
Args[1] = EmitToInt(*this, Args[1], T, IntType);
|
|
|
|
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
|
2010-10-28 04:58:56 +08:00
|
|
|
|
2011-09-07 09:41:24 +08:00
|
|
|
Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
|
|
|
|
llvm::SequentiallyConsistent);
|
2011-02-08 16:22:06 +08:00
|
|
|
Result = EmitFromInt(*this, Result, T, ValueType);
|
2010-10-28 04:58:56 +08:00
|
|
|
return RValue::get(Result);
|
2007-08-21 02:05:56 +08:00
|
|
|
}
|
2009-04-07 08:55:51 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_bool_compare_and_swap_1:
|
|
|
|
case Builtin::BI__sync_bool_compare_and_swap_2:
|
|
|
|
case Builtin::BI__sync_bool_compare_and_swap_4:
|
|
|
|
case Builtin::BI__sync_bool_compare_and_swap_8:
|
2010-03-20 15:04:11 +08:00
|
|
|
case Builtin::BI__sync_bool_compare_and_swap_16: {
|
2010-10-28 04:58:56 +08:00
|
|
|
QualType T = E->getArg(1)->getType();
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::IntegerType *IntType =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::IntegerType::get(getLLVMContext(),
|
|
|
|
getContext().getTypeSize(T));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
|
2010-07-18 15:23:17 +08:00
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
Value *Args[3];
|
|
|
|
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
|
2011-02-08 16:22:06 +08:00
|
|
|
Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
|
|
|
|
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
|
2010-10-28 04:58:56 +08:00
|
|
|
|
2010-07-18 15:23:17 +08:00
|
|
|
Value *OldVal = Args[1];
|
2011-09-07 09:41:24 +08:00
|
|
|
Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
|
|
|
|
llvm::SequentiallyConsistent);
|
2009-04-07 08:55:51 +08:00
|
|
|
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
|
|
|
|
// zext bool to int.
|
2010-10-28 04:58:56 +08:00
|
|
|
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
|
|
|
|
return RValue::get(Result);
|
2009-04-07 08:55:51 +08:00
|
|
|
}
|
|
|
|
|
2011-04-09 11:57:26 +08:00
|
|
|
case Builtin::BI__sync_swap_1:
|
|
|
|
case Builtin::BI__sync_swap_2:
|
|
|
|
case Builtin::BI__sync_swap_4:
|
|
|
|
case Builtin::BI__sync_swap_8:
|
|
|
|
case Builtin::BI__sync_swap_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
|
2011-04-09 11:57:26 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_lock_test_and_set_1:
|
|
|
|
case Builtin::BI__sync_lock_test_and_set_2:
|
|
|
|
case Builtin::BI__sync_lock_test_and_set_4:
|
|
|
|
case Builtin::BI__sync_lock_test_and_set_8:
|
|
|
|
case Builtin::BI__sync_lock_test_and_set_16:
|
2011-09-07 09:41:24 +08:00
|
|
|
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
|
2010-03-20 15:04:11 +08:00
|
|
|
|
2009-05-08 14:58:22 +08:00
|
|
|
case Builtin::BI__sync_lock_release_1:
|
|
|
|
case Builtin::BI__sync_lock_release_2:
|
|
|
|
case Builtin::BI__sync_lock_release_4:
|
|
|
|
case Builtin::BI__sync_lock_release_8:
|
2009-05-13 12:46:13 +08:00
|
|
|
case Builtin::BI__sync_lock_release_16: {
|
|
|
|
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
2011-09-14 06:21:56 +08:00
|
|
|
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
|
|
|
|
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
|
2012-03-16 09:48:04 +08:00
|
|
|
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
|
|
|
|
StoreSize.getQuantity() * 8);
|
|
|
|
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
|
2012-09-21 08:18:27 +08:00
|
|
|
llvm::StoreInst *Store =
|
2012-03-16 09:48:04 +08:00
|
|
|
Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
|
2011-09-14 06:21:56 +08:00
|
|
|
Store->setAlignment(StoreSize.getQuantity());
|
|
|
|
Store->setAtomic(llvm::Release);
|
2009-05-28 07:45:33 +08:00
|
|
|
return RValue::get(0);
|
2009-05-13 12:46:13 +08:00
|
|
|
}
|
2009-02-17 06:43:43 +08:00
|
|
|
|
2009-05-13 12:46:13 +08:00
|
|
|
case Builtin::BI__sync_synchronize: {
|
2011-09-07 09:41:24 +08:00
|
|
|
// We assume this is supposed to correspond to a C++0x-style
|
|
|
|
// sequentially-consistent fence (i.e. this is only usable for
|
|
|
|
// synchonization, not device I/O or anything like that). This intrinsic
|
2012-09-21 08:18:27 +08:00
|
|
|
// is really badly designed in the sense that in theory, there isn't
|
2011-09-07 09:41:24 +08:00
|
|
|
// any way to safely use it... but in practice, it mostly works
|
|
|
|
// to use it with non-atomic loads and stores to get acquire/release
|
|
|
|
// semantics.
|
|
|
|
Builder.CreateFence(llvm::SequentiallyConsistent);
|
2009-05-28 07:45:33 +08:00
|
|
|
return RValue::get(0);
|
2009-05-13 12:46:13 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.
Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.
Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.
Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.
With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.
llvm-svn: 154640
2012-04-13 08:45:38 +08:00
|
|
|
case Builtin::BI__c11_atomic_is_lock_free:
|
|
|
|
case Builtin::BI__atomic_is_lock_free: {
|
|
|
|
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
|
|
|
|
// __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
|
|
|
|
// _Atomic(T) is always properly-aligned.
|
|
|
|
const char *LibCallName = "__atomic_is_lock_free";
|
|
|
|
CallArgList Args;
|
|
|
|
Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
|
|
|
|
getContext().getSizeType());
|
|
|
|
if (BuiltinID == Builtin::BI__atomic_is_lock_free)
|
|
|
|
Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
else
|
|
|
|
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
const CGFunctionInfo &FuncInfo =
|
2012-07-07 14:41:13 +08:00
|
|
|
CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
|
|
|
|
FunctionType::ExtInfo(),
|
|
|
|
RequiredArgs::All);
|
Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.
Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.
Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.
Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.
With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.
llvm-svn: 154640
2012-04-13 08:45:38 +08:00
|
|
|
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
|
|
|
|
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
|
|
|
|
return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Builtin::BI__atomic_test_and_set: {
|
|
|
|
// Look at the argument type to determine whether this is a volatile
|
|
|
|
// operation. The parameter type is always volatile.
|
|
|
|
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
|
|
|
|
bool Volatile =
|
|
|
|
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
|
|
|
|
|
|
|
|
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
|
Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.
Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.
Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.
Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.
With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.
llvm-svn: 154640
2012-04-13 08:45:38 +08:00
|
|
|
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
|
|
|
|
Value *NewVal = Builder.getInt8(1);
|
|
|
|
Value *Order = EmitScalarExpr(E->getArg(1));
|
|
|
|
if (isa<llvm::ConstantInt>(Order)) {
|
|
|
|
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
|
|
|
|
AtomicRMWInst *Result = 0;
|
|
|
|
switch (ord) {
|
|
|
|
case 0: // memory_order_relaxed
|
|
|
|
default: // invalid order
|
|
|
|
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal,
|
|
|
|
llvm::Monotonic);
|
|
|
|
break;
|
|
|
|
case 1: // memory_order_consume
|
|
|
|
case 2: // memory_order_acquire
|
|
|
|
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal,
|
|
|
|
llvm::Acquire);
|
|
|
|
break;
|
|
|
|
case 3: // memory_order_release
|
|
|
|
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal,
|
|
|
|
llvm::Release);
|
|
|
|
break;
|
|
|
|
case 4: // memory_order_acq_rel
|
|
|
|
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal,
|
|
|
|
llvm::AcquireRelease);
|
|
|
|
break;
|
|
|
|
case 5: // memory_order_seq_cst
|
|
|
|
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal,
|
|
|
|
llvm::SequentiallyConsistent);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Result->setVolatile(Volatile);
|
|
|
|
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
|
|
|
|
|
|
|
|
llvm::BasicBlock *BBs[5] = {
|
|
|
|
createBasicBlock("monotonic", CurFn),
|
|
|
|
createBasicBlock("acquire", CurFn),
|
|
|
|
createBasicBlock("release", CurFn),
|
|
|
|
createBasicBlock("acqrel", CurFn),
|
|
|
|
createBasicBlock("seqcst", CurFn)
|
|
|
|
};
|
|
|
|
llvm::AtomicOrdering Orders[5] = {
|
|
|
|
llvm::Monotonic, llvm::Acquire, llvm::Release,
|
|
|
|
llvm::AcquireRelease, llvm::SequentiallyConsistent
|
|
|
|
};
|
|
|
|
|
|
|
|
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
|
|
|
PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 5; ++i) {
|
|
|
|
Builder.SetInsertPoint(BBs[i]);
|
|
|
|
AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
|
|
|
|
Ptr, NewVal, Orders[i]);
|
|
|
|
RMW->setVolatile(Volatile);
|
|
|
|
Result->addIncoming(RMW, BBs[i]);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
SI->addCase(Builder.getInt32(0), BBs[0]);
|
|
|
|
SI->addCase(Builder.getInt32(1), BBs[1]);
|
|
|
|
SI->addCase(Builder.getInt32(2), BBs[1]);
|
|
|
|
SI->addCase(Builder.getInt32(3), BBs[2]);
|
|
|
|
SI->addCase(Builder.getInt32(4), BBs[3]);
|
|
|
|
SI->addCase(Builder.getInt32(5), BBs[4]);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
|
|
|
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
|
|
|
|
}
|
|
|
|
|
|
|
|
case Builtin::BI__atomic_clear: {
|
|
|
|
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
|
|
|
|
bool Volatile =
|
|
|
|
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
|
|
|
|
|
|
|
|
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
2012-10-25 23:39:14 +08:00
|
|
|
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
|
Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.
Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.
Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.
Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.
With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.
llvm-svn: 154640
2012-04-13 08:45:38 +08:00
|
|
|
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
|
|
|
|
Value *NewVal = Builder.getInt8(0);
|
|
|
|
Value *Order = EmitScalarExpr(E->getArg(1));
|
|
|
|
if (isa<llvm::ConstantInt>(Order)) {
|
|
|
|
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
|
|
|
|
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
|
|
|
|
Store->setAlignment(1);
|
|
|
|
switch (ord) {
|
|
|
|
case 0: // memory_order_relaxed
|
|
|
|
default: // invalid order
|
|
|
|
Store->setOrdering(llvm::Monotonic);
|
|
|
|
break;
|
|
|
|
case 3: // memory_order_release
|
|
|
|
Store->setOrdering(llvm::Release);
|
|
|
|
break;
|
|
|
|
case 5: // memory_order_seq_cst
|
|
|
|
Store->setOrdering(llvm::SequentiallyConsistent);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
|
|
|
|
|
|
|
|
llvm::BasicBlock *BBs[3] = {
|
|
|
|
createBasicBlock("monotonic", CurFn),
|
|
|
|
createBasicBlock("release", CurFn),
|
|
|
|
createBasicBlock("seqcst", CurFn)
|
|
|
|
};
|
|
|
|
llvm::AtomicOrdering Orders[3] = {
|
|
|
|
llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
|
|
|
|
};
|
|
|
|
|
|
|
|
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 3; ++i) {
|
|
|
|
Builder.SetInsertPoint(BBs[i]);
|
|
|
|
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
|
|
|
|
Store->setAlignment(1);
|
|
|
|
Store->setOrdering(Orders[i]);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
SI->addCase(Builder.getInt32(0), BBs[0]);
|
|
|
|
SI->addCase(Builder.getInt32(3), BBs[1]);
|
|
|
|
SI->addCase(Builder.getInt32(5), BBs[2]);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
case Builtin::BI__atomic_thread_fence:
|
2012-04-12 01:55:32 +08:00
|
|
|
case Builtin::BI__atomic_signal_fence:
|
|
|
|
case Builtin::BI__c11_atomic_thread_fence:
|
|
|
|
case Builtin::BI__c11_atomic_signal_fence: {
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::SynchronizationScope Scope;
|
2012-04-12 01:55:32 +08:00
|
|
|
if (BuiltinID == Builtin::BI__atomic_signal_fence ||
|
|
|
|
BuiltinID == Builtin::BI__c11_atomic_signal_fence)
|
2011-10-11 10:20:01 +08:00
|
|
|
Scope = llvm::SingleThread;
|
|
|
|
else
|
|
|
|
Scope = llvm::CrossThread;
|
|
|
|
Value *Order = EmitScalarExpr(E->getArg(0));
|
|
|
|
if (isa<llvm::ConstantInt>(Order)) {
|
|
|
|
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
|
|
|
|
switch (ord) {
|
|
|
|
case 0: // memory_order_relaxed
|
|
|
|
default: // invalid order
|
|
|
|
break;
|
|
|
|
case 1: // memory_order_consume
|
|
|
|
case 2: // memory_order_acquire
|
|
|
|
Builder.CreateFence(llvm::Acquire, Scope);
|
|
|
|
break;
|
|
|
|
case 3: // memory_order_release
|
|
|
|
Builder.CreateFence(llvm::Release, Scope);
|
|
|
|
break;
|
|
|
|
case 4: // memory_order_acq_rel
|
|
|
|
Builder.CreateFence(llvm::AcquireRelease, Scope);
|
|
|
|
break;
|
|
|
|
case 5: // memory_order_seq_cst
|
|
|
|
Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
|
|
|
|
AcquireBB = createBasicBlock("acquire", CurFn);
|
|
|
|
ReleaseBB = createBasicBlock("release", CurFn);
|
|
|
|
AcqRelBB = createBasicBlock("acqrel", CurFn);
|
|
|
|
SeqCstBB = createBasicBlock("seqcst", CurFn);
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
|
|
|
|
|
|
|
|
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(AcquireBB);
|
|
|
|
Builder.CreateFence(llvm::Acquire, Scope);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(1), AcquireBB);
|
|
|
|
SI->addCase(Builder.getInt32(2), AcquireBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ReleaseBB);
|
|
|
|
Builder.CreateFence(llvm::Release, Scope);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(3), ReleaseBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(AcqRelBB);
|
|
|
|
Builder.CreateFence(llvm::AcquireRelease, Scope);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(4), AcqRelBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(SeqCstBB);
|
|
|
|
Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(5), SeqCstBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
2009-02-17 06:43:43 +08:00
|
|
|
// Library functions with special handling.
|
|
|
|
case Builtin::BIsqrt:
|
|
|
|
case Builtin::BIsqrtf:
|
|
|
|
case Builtin::BIsqrtl: {
|
2013-09-13 07:57:55 +08:00
|
|
|
// Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
|
|
|
|
// in finite- or unsafe-math mode (the intrinsic has different semantics
|
|
|
|
// for handling negative numbers compared to the library function, so
|
|
|
|
// -fmath-errno=0 is not enough).
|
|
|
|
if (!FD->hasAttr<ConstAttr>())
|
|
|
|
break;
|
|
|
|
if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
|
|
|
|
CGM.getCodeGenOpts().NoNaNsFPMath))
|
|
|
|
break;
|
|
|
|
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|
|
|
|
llvm::Type *ArgType = Arg0->getType();
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
|
|
|
|
return RValue::get(Builder.CreateCall(F, Arg0));
|
2009-02-17 06:43:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Builtin::BIpow:
|
|
|
|
case Builtin::BIpowf:
|
|
|
|
case Builtin::BIpowl: {
|
2013-07-25 05:22:01 +08:00
|
|
|
// Transform a call to pow* into a @llvm.pow.* intrinsic call.
|
|
|
|
if (!FD->hasAttr<ConstAttr>())
|
|
|
|
break;
|
|
|
|
Value *Base = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Exponent = EmitScalarExpr(E->getArg(1));
|
|
|
|
llvm::Type *ArgType = Base->getType();
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
|
|
|
|
return RValue::get(Builder.CreateCall2(F, Base, Exponent));
|
2013-07-04 03:19:12 +08:00
|
|
|
break;
|
2009-02-17 06:43:43 +08:00
|
|
|
}
|
2010-03-06 10:17:52 +08:00
|
|
|
|
2011-07-09 05:39:34 +08:00
|
|
|
case Builtin::BIfma:
|
|
|
|
case Builtin::BIfmaf:
|
|
|
|
case Builtin::BIfmal:
|
|
|
|
case Builtin::BI__builtin_fma:
|
|
|
|
case Builtin::BI__builtin_fmaf:
|
|
|
|
case Builtin::BI__builtin_fmal: {
|
|
|
|
// Rewrite fma to intrinsic.
|
|
|
|
Value *FirstArg = EmitScalarExpr(E->getArg(0));
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *ArgType = FirstArg->getType();
|
2011-07-15 01:45:50 +08:00
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
|
2011-07-09 05:39:34 +08:00
|
|
|
return RValue::get(Builder.CreateCall3(F, FirstArg,
|
|
|
|
EmitScalarExpr(E->getArg(1)),
|
2011-09-28 05:06:10 +08:00
|
|
|
EmitScalarExpr(E->getArg(2))));
|
2011-07-09 05:39:34 +08:00
|
|
|
}
|
|
|
|
|
2010-03-06 10:17:52 +08:00
|
|
|
case Builtin::BI__builtin_signbit:
|
|
|
|
case Builtin::BI__builtin_signbitf:
|
|
|
|
case Builtin::BI__builtin_signbitl: {
|
|
|
|
LLVMContext &C = CGM.getLLVMContext();
|
|
|
|
|
|
|
|
Value *Arg = EmitScalarExpr(E->getArg(0));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ArgTy = Arg->getType();
|
2010-03-06 10:17:52 +08:00
|
|
|
if (ArgTy->isPPC_FP128Ty())
|
|
|
|
break; // FIXME: I'm not sure what the right implementation is here.
|
|
|
|
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
|
2010-03-06 10:17:52 +08:00
|
|
|
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
|
|
|
|
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
|
|
|
|
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
|
|
|
|
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
|
|
|
|
}
|
2011-09-10 06:41:49 +08:00
|
|
|
case Builtin::BI__builtin_annotation: {
|
|
|
|
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
|
|
|
|
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
|
|
|
|
AnnVal->getType());
|
|
|
|
|
|
|
|
// Get the annotation string, go through casts. Sema requires this to be a
|
|
|
|
// non-wide string literal, potentially casted, so the cast<> is safe.
|
|
|
|
const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
|
2013-01-13 03:30:44 +08:00
|
|
|
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
|
2011-09-10 06:41:49 +08:00
|
|
|
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
|
|
|
|
}
|
2013-06-19 04:40:40 +08:00
|
|
|
case Builtin::BI__builtin_addcb:
|
2013-01-13 10:22:39 +08:00
|
|
|
case Builtin::BI__builtin_addcs:
|
|
|
|
case Builtin::BI__builtin_addc:
|
|
|
|
case Builtin::BI__builtin_addcl:
|
2013-01-15 05:44:30 +08:00
|
|
|
case Builtin::BI__builtin_addcll:
|
2013-06-19 04:40:40 +08:00
|
|
|
case Builtin::BI__builtin_subcb:
|
2013-01-15 05:44:30 +08:00
|
|
|
case Builtin::BI__builtin_subcs:
|
|
|
|
case Builtin::BI__builtin_subc:
|
|
|
|
case Builtin::BI__builtin_subcl:
|
|
|
|
case Builtin::BI__builtin_subcll: {
|
2013-01-13 10:22:39 +08:00
|
|
|
|
|
|
|
// We translate all of these builtins from expressions of the form:
|
|
|
|
// int x = ..., y = ..., carryin = ..., carryout, result;
|
|
|
|
// result = __builtin_addc(x, y, carryin, &carryout);
|
|
|
|
//
|
|
|
|
// to LLVM IR of the form:
|
|
|
|
//
|
|
|
|
// %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
|
|
|
|
// %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
|
|
|
|
// %carry1 = extractvalue {i32, i1} %tmp1, 1
|
|
|
|
// %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
|
|
|
|
// i32 %carryin)
|
|
|
|
// %result = extractvalue {i32, i1} %tmp2, 0
|
|
|
|
// %carry2 = extractvalue {i32, i1} %tmp2, 1
|
|
|
|
// %tmp3 = or i1 %carry1, %carry2
|
|
|
|
// %tmp4 = zext i1 %tmp3 to i32
|
|
|
|
// store i32 %tmp4, i32* %carryout
|
|
|
|
|
|
|
|
// Scalarize our inputs.
|
|
|
|
llvm::Value *X = EmitScalarExpr(E->getArg(0));
|
|
|
|
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
|
|
|
|
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
|
|
|
|
std::pair<llvm::Value*, unsigned> CarryOutPtr =
|
|
|
|
EmitPointerWithAlignment(E->getArg(3));
|
|
|
|
|
2013-01-15 05:44:30 +08:00
|
|
|
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
|
|
|
|
llvm::Intrinsic::ID IntrinsicId;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: llvm_unreachable("Unknown multiprecision builtin id.");
|
2013-06-19 04:40:40 +08:00
|
|
|
case Builtin::BI__builtin_addcb:
|
2013-01-15 05:44:30 +08:00
|
|
|
case Builtin::BI__builtin_addcs:
|
|
|
|
case Builtin::BI__builtin_addc:
|
|
|
|
case Builtin::BI__builtin_addcl:
|
|
|
|
case Builtin::BI__builtin_addcll:
|
|
|
|
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
|
|
|
|
break;
|
2013-06-19 04:40:40 +08:00
|
|
|
case Builtin::BI__builtin_subcb:
|
2013-01-15 05:44:30 +08:00
|
|
|
case Builtin::BI__builtin_subcs:
|
|
|
|
case Builtin::BI__builtin_subc:
|
|
|
|
case Builtin::BI__builtin_subcl:
|
|
|
|
case Builtin::BI__builtin_subcll:
|
|
|
|
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
|
|
|
|
break;
|
|
|
|
}
|
2013-01-13 10:22:39 +08:00
|
|
|
|
|
|
|
// Construct our resulting LLVM IR expression.
|
|
|
|
llvm::Value *Carry1;
|
|
|
|
llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
|
|
|
|
X, Y, Carry1);
|
|
|
|
llvm::Value *Carry2;
|
|
|
|
llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
|
|
|
|
Sum1, Carryin, Carry2);
|
|
|
|
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
|
|
|
|
X->getType());
|
|
|
|
llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
|
|
|
|
CarryOutPtr.first);
|
|
|
|
CarryOutStore->setAlignment(CarryOutPtr.second);
|
|
|
|
return RValue::get(Sum2);
|
|
|
|
}
|
2013-06-21 07:28:10 +08:00
|
|
|
case Builtin::BI__builtin_uadd_overflow:
|
|
|
|
case Builtin::BI__builtin_uaddl_overflow:
|
|
|
|
case Builtin::BI__builtin_uaddll_overflow:
|
|
|
|
case Builtin::BI__builtin_usub_overflow:
|
|
|
|
case Builtin::BI__builtin_usubl_overflow:
|
|
|
|
case Builtin::BI__builtin_usubll_overflow:
|
|
|
|
case Builtin::BI__builtin_umul_overflow:
|
|
|
|
case Builtin::BI__builtin_umull_overflow:
|
|
|
|
case Builtin::BI__builtin_umulll_overflow:
|
|
|
|
case Builtin::BI__builtin_sadd_overflow:
|
|
|
|
case Builtin::BI__builtin_saddl_overflow:
|
|
|
|
case Builtin::BI__builtin_saddll_overflow:
|
|
|
|
case Builtin::BI__builtin_ssub_overflow:
|
|
|
|
case Builtin::BI__builtin_ssubl_overflow:
|
|
|
|
case Builtin::BI__builtin_ssubll_overflow:
|
|
|
|
case Builtin::BI__builtin_smul_overflow:
|
|
|
|
case Builtin::BI__builtin_smull_overflow:
|
|
|
|
case Builtin::BI__builtin_smulll_overflow: {
|
|
|
|
|
|
|
|
// We translate all of these builtins directly to the relevant llvm IR node.
|
|
|
|
|
|
|
|
// Scalarize our inputs.
|
|
|
|
llvm::Value *X = EmitScalarExpr(E->getArg(0));
|
|
|
|
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
|
|
|
|
std::pair<llvm::Value *, unsigned> SumOutPtr =
|
|
|
|
EmitPointerWithAlignment(E->getArg(2));
|
|
|
|
|
|
|
|
// Decide which of the overflow intrinsics we are lowering to:
|
|
|
|
llvm::Intrinsic::ID IntrinsicId;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: llvm_unreachable("Unknown security overflow builtin id.");
|
|
|
|
case Builtin::BI__builtin_uadd_overflow:
|
|
|
|
case Builtin::BI__builtin_uaddl_overflow:
|
|
|
|
case Builtin::BI__builtin_uaddll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_usub_overflow:
|
|
|
|
case Builtin::BI__builtin_usubl_overflow:
|
|
|
|
case Builtin::BI__builtin_usubll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_umul_overflow:
|
|
|
|
case Builtin::BI__builtin_umull_overflow:
|
|
|
|
case Builtin::BI__builtin_umulll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::umul_with_overflow;
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_sadd_overflow:
|
|
|
|
case Builtin::BI__builtin_saddl_overflow:
|
|
|
|
case Builtin::BI__builtin_saddll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_ssub_overflow:
|
|
|
|
case Builtin::BI__builtin_ssubl_overflow:
|
|
|
|
case Builtin::BI__builtin_ssubll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
|
|
|
|
break;
|
|
|
|
case Builtin::BI__builtin_smul_overflow:
|
|
|
|
case Builtin::BI__builtin_smull_overflow:
|
|
|
|
case Builtin::BI__builtin_smulll_overflow:
|
|
|
|
IntrinsicId = llvm::Intrinsic::smul_with_overflow;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
llvm::Value *Carry;
|
|
|
|
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
|
|
|
|
llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
|
|
|
|
SumOutStore->setAlignment(SumOutPtr.second);
|
|
|
|
|
|
|
|
return RValue::get(Carry);
|
|
|
|
}
|
2013-07-11 10:27:57 +08:00
|
|
|
case Builtin::BI__builtin_addressof:
|
|
|
|
return RValue::get(EmitLValue(E->getArg(0)).getAddress());
|
2012-10-14 06:30:41 +08:00
|
|
|
case Builtin::BI__noop:
|
|
|
|
return RValue::get(0);
|
2008-05-15 15:38:03 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-09-14 07:05:03 +08:00
|
|
|
// If this is an alias for a lib function (e.g. __builtin_sin), emit
|
|
|
|
// the call using the normal call path, but using the unmangled
|
|
|
|
// version of the function name.
|
|
|
|
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
|
|
|
|
return emitLibraryCall(*this, FD, E,
|
|
|
|
CGM.getBuiltinLibFunction(FD, BuiltinID));
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-09-14 07:05:03 +08:00
|
|
|
// If this is a predefined lib function (e.g. malloc), emit the call
|
|
|
|
// using exactly the normal call path.
|
|
|
|
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
|
|
|
|
return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
// See if we have a target specific intrinsic.
|
2009-02-05 09:50:47 +08:00
|
|
|
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
|
2009-08-24 17:54:37 +08:00
|
|
|
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
|
|
|
|
if (const char *Prefix =
|
2013-04-17 06:48:15 +08:00
|
|
|
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()))
|
2009-08-24 17:54:37 +08:00
|
|
|
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
if (IntrinsicID != Intrinsic::not_intrinsic) {
|
|
|
|
SmallVector<Value*, 16> Args;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-10-02 08:09:12 +08:00
|
|
|
// Find out if any arguments are required to be integer constant
|
|
|
|
// expressions.
|
|
|
|
unsigned ICEArguments = 0;
|
|
|
|
ASTContext::GetBuiltinTypeError Error;
|
|
|
|
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
|
|
|
|
assert(Error == ASTContext::GE_None && "Should not codegen an error");
|
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
Function *F = CGM.getIntrinsic(IntrinsicID);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy = F->getFunctionType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
|
2010-10-02 08:09:12 +08:00
|
|
|
Value *ArgValue;
|
|
|
|
// If this is a normal argument, just emit it as a scalar.
|
|
|
|
if ((ICEArguments & (1 << i)) == 0) {
|
|
|
|
ArgValue = EmitScalarExpr(E->getArg(i));
|
|
|
|
} else {
|
2012-09-21 08:18:27 +08:00
|
|
|
// If this is required to be a constant, constant fold it so that we
|
2010-10-02 08:09:12 +08:00
|
|
|
// know that the generated intrinsic gets a ConstantInt.
|
|
|
|
llvm::APSInt Result;
|
|
|
|
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
|
|
|
|
assert(IsConst && "Constant arg isn't actually constant?");
|
|
|
|
(void)IsConst;
|
2011-02-08 16:22:06 +08:00
|
|
|
ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
|
2010-10-02 08:09:12 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
// If the intrinsic arg type is different from the builtin arg type
|
|
|
|
// we need to do a bit cast.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PTy = FTy->getParamType(i);
|
2008-07-01 02:32:54 +08:00
|
|
|
if (PTy != ArgValue->getType()) {
|
|
|
|
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
|
|
|
|
"Must be able to losslessly bit cast to param");
|
|
|
|
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
Args.push_back(ArgValue);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-15 16:37:34 +08:00
|
|
|
Value *V = Builder.CreateCall(F, Args);
|
2008-07-01 02:32:54 +08:00
|
|
|
QualType BuiltinRetType = E->getType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *RetTy = VoidTy;
|
2012-09-21 08:18:27 +08:00
|
|
|
if (!BuiltinRetType->isVoidType())
|
2012-02-07 08:39:47 +08:00
|
|
|
RetTy = ConvertType(BuiltinRetType);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
if (RetTy != V->getType()) {
|
|
|
|
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
|
|
|
|
"Must be able to losslessly bit cast result type");
|
|
|
|
V = Builder.CreateBitCast(V, RetTy);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
return RValue::get(V);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
// See if we have a target specific builtin that needs to be lowered.
|
2008-10-10 08:24:54 +08:00
|
|
|
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
|
2008-07-01 02:32:54 +08:00
|
|
|
return RValue::get(V);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-16 08:56:44 +08:00
|
|
|
ErrorUnsupported(E, "builtin function");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-07-01 02:32:54 +08:00
|
|
|
// Unknown builtin, for now just dump it out and return undef.
|
2013-03-08 05:37:08 +08:00
|
|
|
return GetUndefRValue(E->getType());
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|
2007-12-10 07:17:02 +08:00
|
|
|
|
2008-10-10 08:24:54 +08:00
|
|
|
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
|
|
|
|
const CallExpr *E) {
|
2013-04-17 06:48:15 +08:00
|
|
|
switch (getTarget().getTriple().getArch()) {
|
2013-05-04 15:15:13 +08:00
|
|
|
case llvm::Triple::aarch64:
|
|
|
|
return EmitAArch64BuiltinExpr(BuiltinID, E);
|
2010-03-04 03:03:45 +08:00
|
|
|
case llvm::Triple::arm:
|
|
|
|
case llvm::Triple::thumb:
|
|
|
|
return EmitARMBuiltinExpr(BuiltinID, E);
|
2009-08-24 17:54:37 +08:00
|
|
|
case llvm::Triple::x86:
|
|
|
|
case llvm::Triple::x86_64:
|
2008-10-10 08:24:54 +08:00
|
|
|
return EmitX86BuiltinExpr(BuiltinID, E);
|
2009-08-24 17:54:37 +08:00
|
|
|
case llvm::Triple::ppc:
|
|
|
|
case llvm::Triple::ppc64:
|
2013-07-26 09:36:11 +08:00
|
|
|
case llvm::Triple::ppc64le:
|
2008-10-10 08:24:54 +08:00
|
|
|
return EmitPPCBuiltinExpr(BuiltinID, E);
|
2009-08-24 17:54:37 +08:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2008-10-10 08:24:54 +08:00
|
|
|
}
|
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
|
2013-09-24 10:48:06 +08:00
|
|
|
NeonTypeFlags TypeFlags,
|
|
|
|
bool V1Ty=false) {
|
2011-11-08 11:27:04 +08:00
|
|
|
int IsQuad = TypeFlags.isQuad();
|
|
|
|
switch (TypeFlags.getEltType()) {
|
2011-11-08 09:16:11 +08:00
|
|
|
case NeonTypeFlags::Int8:
|
|
|
|
case NeonTypeFlags::Poly8:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
|
2011-11-08 09:16:11 +08:00
|
|
|
case NeonTypeFlags::Int16:
|
|
|
|
case NeonTypeFlags::Poly16:
|
|
|
|
case NeonTypeFlags::Float16:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
|
2011-11-08 09:16:11 +08:00
|
|
|
case NeonTypeFlags::Int32:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
|
2011-11-08 09:16:11 +08:00
|
|
|
case NeonTypeFlags::Int64:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
|
2011-11-08 09:16:11 +08:00
|
|
|
case NeonTypeFlags::Float32:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
|
2013-08-01 17:23:19 +08:00
|
|
|
case NeonTypeFlags::Float64:
|
2013-09-24 10:48:06 +08:00
|
|
|
return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
|
2012-01-17 10:30:50 +08:00
|
|
|
}
|
2013-09-27 00:36:08 +08:00
|
|
|
llvm_unreachable("Unknown vector element type!");
|
2010-06-08 00:01:56 +08:00
|
|
|
}
|
|
|
|
|
2010-12-08 06:40:02 +08:00
|
|
|
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
|
2010-06-10 08:17:56 +08:00
|
|
|
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
|
2012-01-25 13:34:41 +08:00
|
|
|
Value* SV = llvm::ConstantVector::getSplat(nElts, C);
|
2010-06-10 08:17:56 +08:00
|
|
|
return Builder.CreateShuffleVector(V, V, SV, "lane");
|
|
|
|
}
|
|
|
|
|
2010-06-08 14:03:01 +08:00
|
|
|
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
|
2010-12-09 06:37:56 +08:00
|
|
|
const char *name,
|
2010-06-14 13:21:25 +08:00
|
|
|
unsigned shift, bool rightshift) {
|
2010-06-08 14:03:01 +08:00
|
|
|
unsigned j = 0;
|
|
|
|
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
|
|
|
|
ai != ae; ++ai, ++j)
|
2010-06-14 13:21:25 +08:00
|
|
|
if (shift > 0 && shift == j)
|
|
|
|
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
|
|
|
|
else
|
|
|
|
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
|
2010-06-08 14:03:01 +08:00
|
|
|
|
2011-07-15 16:37:34 +08:00
|
|
|
return Builder.CreateCall(F, Ops, name);
|
2010-06-08 14:03:01 +08:00
|
|
|
}
|
|
|
|
|
2012-09-21 08:18:27 +08:00
|
|
|
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
|
2010-06-12 06:57:12 +08:00
|
|
|
bool neg) {
|
2012-01-25 13:34:41 +08:00
|
|
|
int SV = cast<ConstantInt>(V)->getSExtValue();
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
|
2010-06-12 06:57:12 +08:00
|
|
|
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
|
2012-01-25 13:34:41 +08:00
|
|
|
return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
|
2010-06-12 06:57:12 +08:00
|
|
|
}
|
|
|
|
|
2013-10-04 21:13:15 +08:00
|
|
|
// \brief Right-shift a vector by a constant.
|
|
|
|
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
|
|
|
|
llvm::Type *Ty, bool usgn,
|
|
|
|
const char *name) {
|
|
|
|
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
|
|
|
|
|
|
|
|
int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
|
|
|
|
int EltSize = VTy->getScalarSizeInBits();
|
|
|
|
|
|
|
|
Vec = Builder.CreateBitCast(Vec, Ty);
|
|
|
|
|
|
|
|
// lshr/ashr are undefined when the shift amount is equal to the vector
|
|
|
|
// element size.
|
|
|
|
if (ShiftAmt == EltSize) {
|
|
|
|
if (usgn) {
|
|
|
|
// Right-shifting an unsigned value by its size yields 0.
|
|
|
|
llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
|
|
|
|
return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
|
|
|
|
} else {
|
|
|
|
// Right-shifting a signed value by its size is equivalent
|
|
|
|
// to a shift of size-1.
|
|
|
|
--ShiftAmt;
|
|
|
|
Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Shift = EmitNeonShiftVector(Shift, Ty, false);
|
|
|
|
if (usgn)
|
|
|
|
return Builder.CreateLShr(Vec, Shift, name);
|
|
|
|
else
|
|
|
|
return Builder.CreateAShr(Vec, Shift, name);
|
|
|
|
}
|
|
|
|
|
2010-08-28 01:14:29 +08:00
|
|
|
/// GetPointeeAlignment - Given an expression with a pointer type, find the
|
|
|
|
/// alignment of the type referenced by the pointer. Skip over implicit
|
|
|
|
/// casts.
|
2012-08-23 11:10:17 +08:00
|
|
|
std::pair<llvm::Value*, unsigned>
|
|
|
|
CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
|
|
|
|
assert(Addr->getType()->isPointerType());
|
|
|
|
Addr = Addr->IgnoreParens();
|
|
|
|
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
|
2012-08-30 05:21:11 +08:00
|
|
|
if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
|
|
|
|
ICE->getSubExpr()->getType()->isPointerType()) {
|
2012-09-21 08:18:27 +08:00
|
|
|
std::pair<llvm::Value*, unsigned> Ptr =
|
2012-08-23 11:10:17 +08:00
|
|
|
EmitPointerWithAlignment(ICE->getSubExpr());
|
|
|
|
Ptr.first = Builder.CreateBitCast(Ptr.first,
|
|
|
|
ConvertType(Addr->getType()));
|
|
|
|
return Ptr;
|
|
|
|
} else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
|
|
|
|
LValue LV = EmitLValue(ICE->getSubExpr());
|
2012-08-23 19:27:56 +08:00
|
|
|
unsigned Align = LV.getAlignment().getQuantity();
|
|
|
|
if (!Align) {
|
|
|
|
// FIXME: Once LValues are fixed to always set alignment,
|
|
|
|
// zap this code.
|
|
|
|
QualType PtTy = ICE->getSubExpr()->getType();
|
|
|
|
if (!PtTy->isIncompleteType())
|
|
|
|
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
|
|
|
|
else
|
|
|
|
Align = 1;
|
|
|
|
}
|
|
|
|
return std::make_pair(LV.getAddress(), Align);
|
2012-03-04 08:52:12 +08:00
|
|
|
}
|
2012-08-23 11:10:17 +08:00
|
|
|
}
|
|
|
|
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
|
|
|
|
if (UO->getOpcode() == UO_AddrOf) {
|
|
|
|
LValue LV = EmitLValue(UO->getSubExpr());
|
2012-08-23 19:27:56 +08:00
|
|
|
unsigned Align = LV.getAlignment().getQuantity();
|
|
|
|
if (!Align) {
|
|
|
|
// FIXME: Once LValues are fixed to always set alignment,
|
|
|
|
// zap this code.
|
|
|
|
QualType PtTy = UO->getSubExpr()->getType();
|
|
|
|
if (!PtTy->isIncompleteType())
|
|
|
|
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
|
|
|
|
else
|
|
|
|
Align = 1;
|
|
|
|
}
|
|
|
|
return std::make_pair(LV.getAddress(), Align);
|
2010-08-28 01:14:29 +08:00
|
|
|
}
|
|
|
|
}
|
2012-03-03 02:34:30 +08:00
|
|
|
|
2012-08-23 11:10:17 +08:00
|
|
|
unsigned Align = 1;
|
|
|
|
QualType PtTy = Addr->getType()->getPointeeType();
|
|
|
|
if (!PtTy->isIncompleteType())
|
|
|
|
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
|
|
|
|
|
|
|
|
return std::make_pair(EmitScalarExpr(Addr), Align);
|
2010-08-28 01:14:29 +08:00
|
|
|
}
|
|
|
|
|
2013-09-24 10:48:06 +08:00
|
|
|
static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
|
2013-09-26 20:16:47 +08:00
|
|
|
unsigned BuiltinID,
|
|
|
|
const CallExpr *E) {
|
2013-09-24 10:48:06 +08:00
|
|
|
unsigned int Int = 0;
|
2013-10-05 16:22:55 +08:00
|
|
|
// Scalar result generated across vectors
|
|
|
|
bool AcrossVec = false;
|
|
|
|
// Extend element of one-element vector
|
|
|
|
bool ExtendEle = false;
|
|
|
|
bool OverloadInt = false;
|
2013-10-30 23:20:07 +08:00
|
|
|
bool OverloadCmpInt = false;
|
2013-10-18 02:12:50 +08:00
|
|
|
bool OverloadWideInt = false;
|
2013-10-18 22:03:36 +08:00
|
|
|
bool OverloadNarrowInt = false;
|
2013-09-24 10:48:06 +08:00
|
|
|
const char *s = NULL;
|
|
|
|
|
2013-10-14 22:37:40 +08:00
|
|
|
SmallVector<Value *, 4> Ops;
|
|
|
|
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
|
|
|
|
Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
|
|
|
|
}
|
|
|
|
|
2013-09-24 10:48:06 +08:00
|
|
|
// AArch64 scalar builtins are not overloaded, they do not have an extra
|
|
|
|
// argument that specifies the vector type, need to handle each case.
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: break;
|
2013-11-06 01:42:24 +08:00
|
|
|
case AArch64::BI__builtin_neon_vget_lane_i8:
|
|
|
|
case AArch64::BI__builtin_neon_vget_lane_i16:
|
|
|
|
case AArch64::BI__builtin_neon_vget_lane_i32:
|
|
|
|
case AArch64::BI__builtin_neon_vget_lane_i64:
|
|
|
|
case AArch64::BI__builtin_neon_vget_lane_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vget_lane_f64:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_i8:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_i16:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_i32:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_i64:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vgetq_lane_f64:
|
|
|
|
return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vget_lane_i8, E);
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_i8:
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_i16:
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_i32:
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_i64:
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vset_lane_f64:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_i8:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_i16:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_i32:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_i64:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vsetq_lane_f64:
|
|
|
|
return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vset_lane_i8, E);
|
|
|
|
// Crypto
|
|
|
|
case AArch64::BI__builtin_neon_vsha1h_u32:
|
|
|
|
Int = Intrinsic::arm_neon_sha1h;
|
|
|
|
s = "sha1h"; OverloadInt = true; break;
|
|
|
|
case AArch64::BI__builtin_neon_vsha1cq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_sha1c;
|
|
|
|
s = "sha1c"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vsha1pq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_sha1p;
|
|
|
|
s = "sha1p"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vsha1mq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_sha1m;
|
|
|
|
s = "sha1m"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Add
|
|
|
|
case AArch64::BI__builtin_neon_vaddd_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vaddds;
|
|
|
|
s = "vaddds"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vaddd_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vadddu;
|
|
|
|
s = "vadddu"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Sub
|
|
|
|
case AArch64::BI__builtin_neon_vsubd_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vsubds;
|
|
|
|
s = "vsubds"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vsubd_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vsubdu;
|
|
|
|
s = "vsubdu"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Saturating Add
|
|
|
|
case AArch64::BI__builtin_neon_vqaddb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqaddh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqadds_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqaddd_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqadds;
|
|
|
|
s = "vqadds"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vqaddb_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vqaddh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqadds_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqaddd_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqaddu;
|
|
|
|
s = "vqaddu"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Saturating Sub
|
|
|
|
case AArch64::BI__builtin_neon_vqsubb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubs_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubd_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqsubs;
|
|
|
|
s = "vqsubs"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vqsubb_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubs_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqsubd_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqsubu;
|
|
|
|
s = "vqsubu"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Shift Left
|
|
|
|
case AArch64::BI__builtin_neon_vshld_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vshlds;
|
|
|
|
s = "vshlds"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vshld_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vshldu;
|
|
|
|
s = "vshldu"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Saturating Shift Left
|
|
|
|
case AArch64::BI__builtin_neon_vqshlb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshls_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshld_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqshls;
|
|
|
|
s = "vqshls"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vqshlb_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshls_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshld_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqshlu;
|
|
|
|
s = "vqshlu"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Rouding Shift Left
|
|
|
|
case AArch64::BI__builtin_neon_vrshld_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vrshlds;
|
|
|
|
s = "vrshlds"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vrshld_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vrshldu;
|
|
|
|
s = "vrshldu"; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Saturating Rouding Shift Left
|
|
|
|
case AArch64::BI__builtin_neon_vqrshlb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshlh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshls_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshld_s64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqrshls;
|
|
|
|
s = "vqrshls"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vqrshlb_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshlh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshls_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshld_u64:
|
2013-10-05 16:22:55 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vqrshlu;
|
|
|
|
s = "vqrshlu"; OverloadInt = true; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Reduce Pairwise Add
|
|
|
|
case AArch64::BI__builtin_neon_vpaddd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpadds_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpaddd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Reduce Pairwise Floating Point Max
|
|
|
|
case AArch64::BI__builtin_neon_vpmaxs_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpmaxqd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Reduce Pairwise Floating Point Min
|
|
|
|
case AArch64::BI__builtin_neon_vpmins_f32:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpminqd_f64:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
// Scalar Reduce Pairwise Floating Point Maxnm
|
|
|
|
case AArch64::BI__builtin_neon_vpmaxnms_f32:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
|
|
|
// Scalar Reduce Pairwise Floating Point Minnm
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpminnms_f32:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
2013-09-24 10:48:06 +08:00
|
|
|
case AArch64::BI__builtin_neon_vpminnmqd_f64:
|
2013-09-26 20:16:47 +08:00
|
|
|
Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq";
|
2013-10-05 16:22:55 +08:00
|
|
|
break;
|
|
|
|
// The followings are intrinsics with scalar results generated AcrossVec vectors
|
|
|
|
case AArch64::BI__builtin_neon_vaddlv_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlv_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_saddlv;
|
|
|
|
AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vaddlv_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlv_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddlvq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_uaddlv;
|
|
|
|
AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vmaxv_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxv_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_smaxv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vmaxv_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxv_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_umaxv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vminv_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vminv_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_sminv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "sminv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vminv_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vminv_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_uminv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "uminv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vaddv_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddv_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vaddv_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddv_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vaddvq_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vaddv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "vaddv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vmaxvq_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vmaxv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vminvq_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vminv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "vminv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vmaxnmvq_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vmaxnmv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
|
|
|
|
case AArch64::BI__builtin_neon_vminnmvq_f32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vminnmv;
|
|
|
|
AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
|
2013-10-08 01:07:17 +08:00
|
|
|
// Scalar Integer Saturating Doubling Multiply Half High
|
|
|
|
case AArch64::BI__builtin_neon_vqdmulhh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqdmulhs_s32:
|
|
|
|
Int = Intrinsic::arm_neon_vqdmulh;
|
|
|
|
s = "vqdmulh"; OverloadInt = true; break;
|
|
|
|
// Scalar Integer Saturating Rounding Doubling Multiply Half High
|
|
|
|
case AArch64::BI__builtin_neon_vqrdmulhh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrdmulhs_s32:
|
|
|
|
Int = Intrinsic::arm_neon_vqrdmulh;
|
|
|
|
s = "vqrdmulh"; OverloadInt = true; break;
|
|
|
|
// Scalar Floating-point Multiply Extended
|
|
|
|
case AArch64::BI__builtin_neon_vmulxs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vmulxd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vmulx;
|
|
|
|
s = "vmulx"; OverloadInt = true; break;
|
|
|
|
// Scalar Floating-point Reciprocal Step and
|
|
|
|
case AArch64::BI__builtin_neon_vrecpss_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vrecpsd_f64:
|
|
|
|
Int = Intrinsic::arm_neon_vrecps;
|
|
|
|
s = "vrecps"; OverloadInt = true; break;
|
|
|
|
// Scalar Floating-point Reciprocal Square Root Step
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrtss_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrtsd_f64:
|
|
|
|
Int = Intrinsic::arm_neon_vrsqrts;
|
|
|
|
s = "vrsqrts"; OverloadInt = true; break;
|
2013-10-09 04:43:46 +08:00
|
|
|
// Scalar Signed Integer Convert To Floating-point
|
|
|
|
case AArch64::BI__builtin_neon_vcvts_f32_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf32_s32,
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
case AArch64::BI__builtin_neon_vcvtd_f64_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf64_s64,
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Integer Convert To Floating-point
|
|
|
|
case AArch64::BI__builtin_neon_vcvts_f32_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf32_u32,
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
case AArch64::BI__builtin_neon_vcvtd_f64_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf64_u64,
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
2013-10-09 06:09:29 +08:00
|
|
|
// Scalar Floating-point Reciprocal Estimate
|
|
|
|
case AArch64::BI__builtin_neon_vrecpes_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vrecped_f64:
|
|
|
|
Int = Intrinsic::arm_neon_vrecpe;
|
|
|
|
s = "vrecpe"; OverloadInt = true; break;
|
|
|
|
// Scalar Floating-point Reciprocal Exponent
|
|
|
|
case AArch64::BI__builtin_neon_vrecpxs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vrecpxd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrecpx;
|
|
|
|
s = "vrecpx"; OverloadInt = true; break;
|
|
|
|
// Scalar Floating-point Reciprocal Square Root Estimate
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrtes_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrted_f64:
|
|
|
|
Int = Intrinsic::arm_neon_vrsqrte;
|
|
|
|
s = "vrsqrte"; OverloadInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Equal
|
|
|
|
case AArch64::BI__builtin_neon_vceqd_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vceqd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vceqzd_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vceqzd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Greater Than or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcged_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
case AArch64::BI__builtin_neon_vcged_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vchs; s = "vcge";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Greater Than or Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcgezd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Greater Than
|
|
|
|
case AArch64::BI__builtin_neon_vcgtd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
case AArch64::BI__builtin_neon_vcgtd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vchi; s = "vcgt";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Greater Than Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcgtzd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Less Than or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcled_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
2013-10-14 22:37:40 +08:00
|
|
|
case AArch64::BI__builtin_neon_vcled_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vchs; s = "vchs";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Less Than or Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vclezd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Less Than
|
|
|
|
case AArch64::BI__builtin_neon_vcltd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
2013-10-14 22:37:40 +08:00
|
|
|
case AArch64::BI__builtin_neon_vcltd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vchi; s = "vchi";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Less Than Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcltzd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Equal
|
|
|
|
case AArch64::BI__builtin_neon_vceqs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vceqd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vceqzs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vceqzd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Greater Than Or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcges_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcged_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Greater Than Or Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcgezs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcgezd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Greather Than
|
|
|
|
case AArch64::BI__builtin_neon_vcgts_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcgtd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Greather Than Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcgtzs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcgtzd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Less Than or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcles_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcled_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Less Than Or Equal To Zero
|
|
|
|
case AArch64::BI__builtin_neon_vclezs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vclezd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Compare Less Than Zero
|
|
|
|
case AArch64::BI__builtin_neon_vclts_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcltd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
|
|
|
// Scalar Floating-point Compare Less Than Zero
|
|
|
|
case AArch64::BI__builtin_neon_vcltzs_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcltzd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
|
|
|
|
// Add implicit zero operand.
|
|
|
|
Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Absolute Compare Greater Than Or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcages_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcaged_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Absolute Compare Greater Than
|
|
|
|
case AArch64::BI__builtin_neon_vcagts_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcagtd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcagt; s = "vcagt";
|
|
|
|
OverloadCmpInt = true; break;
|
|
|
|
// Scalar Floating-point Absolute Compare Less Than Or Equal
|
|
|
|
case AArch64::BI__builtin_neon_vcales_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcaled_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
|
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
|
|
|
// Scalar Floating-point Absolute Compare Less Than
|
|
|
|
case AArch64::BI__builtin_neon_vcalts_f32:
|
|
|
|
case AArch64::BI__builtin_neon_vcaltd_f64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcagt; s = "vcalt";
|
|
|
|
OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
|
2013-10-14 22:37:40 +08:00
|
|
|
// Scalar Compare Bitwise Test Bits
|
|
|
|
case AArch64::BI__builtin_neon_vtstd_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vtstd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vtstd; s = "vtst";
|
2013-10-30 23:20:07 +08:00
|
|
|
OverloadCmpInt = true; break;
|
2013-10-17 05:04:49 +08:00
|
|
|
// Scalar Absolute Value
|
|
|
|
case AArch64::BI__builtin_neon_vabsd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vabs;
|
|
|
|
s = "vabs"; OverloadInt = false; break;
|
2013-10-16 05:19:02 +08:00
|
|
|
// Scalar Signed Saturating Absolute Value
|
|
|
|
case AArch64::BI__builtin_neon_vqabsb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqabsh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqabss_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqabsd_s64:
|
|
|
|
Int = Intrinsic::arm_neon_vqabs;
|
|
|
|
s = "vqabs"; OverloadInt = true; break;
|
2013-10-17 05:04:53 +08:00
|
|
|
// Scalar Negate
|
|
|
|
case AArch64::BI__builtin_neon_vnegd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vneg;
|
|
|
|
s = "vneg"; OverloadInt = false; break;
|
2013-10-16 05:19:02 +08:00
|
|
|
// Scalar Signed Saturating Negate
|
|
|
|
case AArch64::BI__builtin_neon_vqnegb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqnegh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqnegs_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqnegd_s64:
|
|
|
|
Int = Intrinsic::arm_neon_vqneg;
|
|
|
|
s = "vqneg"; OverloadInt = true; break;
|
2013-10-17 00:09:16 +08:00
|
|
|
// Scalar Signed Saturating Accumulated of Unsigned Value
|
|
|
|
case AArch64::BI__builtin_neon_vuqaddb_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vuqaddh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vuqadds_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vuqaddd_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vuqadd;
|
|
|
|
s = "vuqadd"; OverloadInt = true; break;
|
2013-10-17 00:30:39 +08:00
|
|
|
// Scalar Unsigned Saturating Accumulated of Signed Value
|
2013-10-17 00:09:16 +08:00
|
|
|
case AArch64::BI__builtin_neon_vsqaddb_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vsqaddh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vsqadds_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vsqaddd_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqadd;
|
|
|
|
s = "vsqadd"; OverloadInt = true; break;
|
2013-10-18 02:12:50 +08:00
|
|
|
// Signed Saturating Doubling Multiply-Add Long
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlalh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlals_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqdmlal;
|
|
|
|
s = "vqdmlal"; OverloadWideInt = true; break;
|
|
|
|
// Signed Saturating Doubling Multiply-Subtract Long
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlslh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlsls_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqdmlsl;
|
|
|
|
s = "vqdmlsl"; OverloadWideInt = true; break;
|
|
|
|
// Signed Saturating Doubling Multiply Long
|
|
|
|
case AArch64::BI__builtin_neon_vqdmullh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqdmulls_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqdmull;
|
|
|
|
s = "vqdmull"; OverloadWideInt = true; break;
|
2013-10-18 22:03:36 +08:00
|
|
|
// Scalar Signed Saturating Extract Unsigned Narrow
|
|
|
|
case AArch64::BI__builtin_neon_vqmovunh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovuns_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovund_s64:
|
|
|
|
Int = Intrinsic::arm_neon_vqmovnsu;
|
|
|
|
s = "vqmovun"; OverloadNarrowInt = true; break;
|
|
|
|
// Scalar Signed Saturating Extract Narrow
|
|
|
|
case AArch64::BI__builtin_neon_vqmovnh_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovns_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovnd_s64:
|
|
|
|
Int = Intrinsic::arm_neon_vqmovns;
|
|
|
|
s = "vqmovn"; OverloadNarrowInt = true; break;
|
|
|
|
// Scalar Unsigned Saturating Extract Narrow
|
|
|
|
case AArch64::BI__builtin_neon_vqmovnh_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovns_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqmovnd_u64:
|
|
|
|
Int = Intrinsic::arm_neon_vqmovnu;
|
|
|
|
s = "vqmovn"; OverloadNarrowInt = true; break;
|
2013-11-01 03:29:05 +08:00
|
|
|
// Scalar Signed Shift Right (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vshrd_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vshrds_n;
|
|
|
|
s = "vsshr"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Shift Right (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vshrd_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vshrdu_n;
|
|
|
|
s = "vushr"; OverloadInt = false; break;
|
|
|
|
// Scalar Signed Rounding Shift Right (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vrshrd_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrshrds_n;
|
|
|
|
s = "vsrshr"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Rounding Shift Right (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vrshrd_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrshrdu_n;
|
|
|
|
s = "vurshr"; OverloadInt = false; break;
|
|
|
|
// Scalar Signed Shift Right and Accumulate (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vsrad_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsrads_n;
|
|
|
|
s = "vssra"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Shift Right and Accumulate (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vsrad_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsradu_n;
|
|
|
|
s = "vusra"; OverloadInt = false; break;
|
|
|
|
// Scalar Signed Rounding Shift Right and Accumulate (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vrsrad_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrsrads_n;
|
|
|
|
s = "vsrsra"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vrsrad_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrsradu_n;
|
|
|
|
s = "vursra"; OverloadInt = false; break;
|
|
|
|
// Scalar Signed/Unsigned Shift Left (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vshld_n_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vshld_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vshld_n;
|
|
|
|
s = "vshl"; OverloadInt = false; break;
|
|
|
|
// Signed Saturating Shift Left (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshlb_n_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshls_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshld_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqshls_n;
|
|
|
|
s = "vsqshl"; OverloadInt = true; break;
|
|
|
|
// Unsigned Saturating Shift Left (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshlb_n_u8:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlh_n_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshls_n_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshld_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqshlu_n;
|
|
|
|
s = "vuqshl"; OverloadInt = true; break;
|
|
|
|
// Signed Saturating Shift Left Unsigned (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshlub_n_s8:
|
|
|
|
case AArch64::BI__builtin_neon_vqshluh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlus_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshlud_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vqshlus_n;
|
|
|
|
s = "vsqshlu"; OverloadInt = true; break;
|
|
|
|
// Shift Right And Insert (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vsrid_n_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vsrid_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsrid_n;
|
|
|
|
s = "vsri"; OverloadInt = false; break;
|
|
|
|
// Shift Left And Insert (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vslid_n_s64:
|
|
|
|
case AArch64::BI__builtin_neon_vslid_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vslid_n;
|
|
|
|
s = "vsli"; OverloadInt = false; break;
|
|
|
|
// Signed Saturating Shift Right Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshrnh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshrns_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshrnd_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqshrn;
|
|
|
|
s = "vsqshrn"; OverloadInt = true; break;
|
|
|
|
// Unsigned Saturating Shift Right Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshrnh_n_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshrns_n_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshrnd_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vuqshrn;
|
|
|
|
s = "vuqshrn"; OverloadInt = true; break;
|
|
|
|
// Signed Saturating Rounded Shift Right Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrnh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrns_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrnd_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqrshrn;
|
|
|
|
s = "vsqrshrn"; OverloadInt = true; break;
|
|
|
|
// Unsigned Saturating Rounded Shift Right Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrnh_n_u16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrns_n_u32:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrnd_n_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vuqrshrn;
|
|
|
|
s = "vuqrshrn"; OverloadInt = true; break;
|
|
|
|
// Signed Saturating Shift Right Unsigned Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqshrunh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqshruns_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqshrund_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqshrun;
|
|
|
|
s = "vsqshrun"; OverloadInt = true; break;
|
|
|
|
// Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrunh_n_s16:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshruns_n_s32:
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrund_n_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqrshrun;
|
|
|
|
s = "vsqrshrun"; OverloadInt = true; break;
|
2013-11-01 06:37:08 +08:00
|
|
|
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vcvts_n_f32_s32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf32_n_s32;
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
case AArch64::BI__builtin_neon_vcvtd_n_f64_s64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf64_n_s64;
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
|
|
|
|
case AArch64::BI__builtin_neon_vcvts_n_f32_u32:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf32_n_u32;
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
|
|
|
case AArch64::BI__builtin_neon_vcvtd_n_f64_u64:
|
|
|
|
Int = Intrinsic::aarch64_neon_vcvtf64_n_u64;
|
|
|
|
s = "vcvtf"; OverloadInt = false; break;
|
2013-09-24 10:48:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Int)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// AArch64 scalar builtin that returns scalar type
|
2013-10-05 16:22:55 +08:00
|
|
|
// and should be mapped to AArch64 intrinsic that returns
|
2013-09-24 10:48:06 +08:00
|
|
|
// one-element vector type.
|
|
|
|
Function *F = 0;
|
2013-10-05 16:22:55 +08:00
|
|
|
if (AcrossVec) {
|
|
|
|
// Gen arg type
|
|
|
|
const Expr *Arg = E->getArg(E->getNumArgs()-1);
|
|
|
|
llvm::Type *Ty = CGF.ConvertType(Arg->getType());
|
|
|
|
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
|
|
|
|
llvm::Type *ETy = VTy->getElementType();
|
|
|
|
llvm::VectorType *RTy = llvm::VectorType::get(ETy, 1);
|
|
|
|
|
|
|
|
if (ExtendEle) {
|
|
|
|
assert(!ETy->isFloatingPointTy());
|
|
|
|
RTy = llvm::VectorType::getExtendedElementVectorType(RTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Type *Tys[2] = {RTy, VTy};
|
|
|
|
F = CGF.CGM.getIntrinsic(Int, Tys);
|
|
|
|
assert(E->getNumArgs() == 1);
|
2013-10-18 02:12:50 +08:00
|
|
|
} else if (OverloadInt) {
|
2013-09-26 20:16:47 +08:00
|
|
|
// Determine the type of this overloaded AArch64 intrinsic
|
2013-11-01 03:29:05 +08:00
|
|
|
llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
|
2013-10-05 16:22:55 +08:00
|
|
|
llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
|
|
|
|
assert(VTy);
|
|
|
|
|
|
|
|
F = CGF.CGM.getIntrinsic(Int, VTy);
|
2013-10-18 22:03:36 +08:00
|
|
|
} else if (OverloadWideInt || OverloadNarrowInt) {
|
2013-10-18 02:12:50 +08:00
|
|
|
// Determine the type of this overloaded AArch64 intrinsic
|
|
|
|
const Expr *Arg = E->getArg(E->getNumArgs()-1);
|
|
|
|
llvm::Type *Ty = CGF.ConvertType(Arg->getType());
|
|
|
|
llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
|
2013-10-18 22:03:36 +08:00
|
|
|
llvm::VectorType *RTy = OverloadWideInt ?
|
|
|
|
llvm::VectorType::getExtendedElementVectorType(VTy) :
|
|
|
|
llvm::VectorType::getTruncatedElementVectorType(VTy);
|
2013-10-18 02:12:50 +08:00
|
|
|
F = CGF.CGM.getIntrinsic(Int, RTy);
|
2013-10-30 23:20:07 +08:00
|
|
|
} else if (OverloadCmpInt) {
|
|
|
|
// Determine the types of this overloaded AArch64 intrinsic
|
|
|
|
SmallVector<llvm::Type *, 3> Tys;
|
|
|
|
const Expr *Arg = E->getArg(E->getNumArgs()-1);
|
|
|
|
llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
|
|
|
|
llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
|
|
|
|
Tys.push_back(VTy);
|
|
|
|
Ty = CGF.ConvertType(Arg->getType());
|
|
|
|
VTy = llvm::VectorType::get(Ty, 1);
|
|
|
|
Tys.push_back(VTy);
|
|
|
|
Tys.push_back(VTy);
|
|
|
|
|
|
|
|
F = CGF.CGM.getIntrinsic(Int, Tys);
|
2013-09-24 10:48:06 +08:00
|
|
|
} else
|
2013-09-26 20:16:47 +08:00
|
|
|
F = CGF.CGM.getIntrinsic(Int);
|
2013-09-24 10:48:06 +08:00
|
|
|
|
|
|
|
Value *Result = CGF.EmitNeonCall(F, Ops, s);
|
|
|
|
llvm::Type *ResultType = CGF.ConvertType(E->getType());
|
|
|
|
// AArch64 intrinsic one-element vector type cast to
|
|
|
|
// scalar type expected by the builtin
|
|
|
|
return CGF.Builder.CreateBitCast(Result, ResultType, s);
|
|
|
|
}
|
|
|
|
|
2013-05-04 15:15:13 +08:00
|
|
|
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
2013-10-05 16:22:55 +08:00
|
|
|
const CallExpr *E) {
|
2013-09-24 10:48:06 +08:00
|
|
|
|
|
|
|
// Process AArch64 scalar builtins
|
|
|
|
if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E))
|
|
|
|
return Result;
|
|
|
|
|
2013-05-04 15:15:13 +08:00
|
|
|
if (BuiltinID == AArch64::BI__clear_cache) {
|
|
|
|
assert(E->getNumArgs() == 2 &&
|
|
|
|
"Variadic __clear_cache slipped through on AArch64");
|
|
|
|
|
|
|
|
const FunctionDecl *FD = E->getDirectCallee();
|
|
|
|
SmallVector<Value *, 2> Ops;
|
|
|
|
for (unsigned i = 0; i < E->getNumArgs(); i++)
|
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
|
|
|
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
|
|
|
|
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
|
|
|
|
StringRef Name = FD->getName();
|
|
|
|
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
|
|
|
|
}
|
|
|
|
|
2013-08-01 17:23:19 +08:00
|
|
|
SmallVector<Value *, 4> Ops;
|
|
|
|
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
|
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the last argument, which specifies the vector type.
|
|
|
|
llvm::APSInt Result;
|
|
|
|
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
|
|
|
|
if (!Arg->isIntegerConstantExpr(Result, getContext()))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Determine the type of this overloaded NEON intrinsic.
|
|
|
|
NeonTypeFlags Type(Result.getZExtValue());
|
|
|
|
bool usgn = Type.isUnsigned();
|
|
|
|
|
|
|
|
llvm::VectorType *VTy = GetNeonType(this, Type);
|
|
|
|
llvm::Type *Ty = VTy;
|
|
|
|
if (!Ty)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned Int;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// AArch64 builtins mapping to legacy ARM v7 builtins.
|
|
|
|
// FIXME: the mapped builtins listed correspond to what has been tested
|
|
|
|
// in aarch64-neon-intrinsics.c so far.
|
|
|
|
case AArch64::BI__builtin_neon_vmul_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vmulq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vabd_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vabdq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vfma_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vfmaq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vbsl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vbslq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrts_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrsqrtsq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrecps_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrecpsq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcage_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcale_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcaleq_v:
|
|
|
|
std::swap(Ops[0], Ops[1]);
|
|
|
|
case AArch64::BI__builtin_neon_vcageq_v: {
|
|
|
|
Function *F;
|
|
|
|
if (VTy->getElementType()->isIntegerTy(64))
|
|
|
|
F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq);
|
|
|
|
else
|
|
|
|
F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
|
|
|
|
return EmitNeonCall(F, Ops, "vcage");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vcalt_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcagt_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcaltq_v:
|
|
|
|
std::swap(Ops[0], Ops[1]);
|
|
|
|
case AArch64::BI__builtin_neon_vcagtq_v: {
|
|
|
|
Function *F;
|
|
|
|
if (VTy->getElementType()->isIntegerTy(64))
|
|
|
|
F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq);
|
|
|
|
else
|
|
|
|
F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
|
|
|
|
return EmitNeonCall(F, Ops, "vcagt");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vtst_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vtstq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vhadd_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vhaddq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vhsub_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vhsubq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrhadd_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrhaddq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqadd_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqaddq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqsub_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqsubq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vshl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vshlq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqshl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqshlq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrshl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrshlq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqrshl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqrshlq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E);
|
Implement aarch64 neon instruction set AdvSIMD (3V Diff), covering the following 26 instructions,
SADDL, UADDL, SADDW, UADDW, SSUBL, USUBL, SSUBW, USUBW, ADDHN, RADDHN, SABAL, UABAL, SUBHN, RSUBHN, SABDL, UABDL, SMLAL, UMLAL, SMLSL, UMLSL, SQDMLAL, SQDMLSL, SMULL, UMULL, SQDMULL, PMULL
llvm-svn: 190289
2013-09-09 10:21:08 +08:00
|
|
|
case AArch64::BI__builtin_neon_vaddhn_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vraddhn_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vsubhn_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrsubhn_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vmull_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqdmull_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlal_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqdmlsl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E);
|
2013-08-01 17:23:19 +08:00
|
|
|
case AArch64::BI__builtin_neon_vmax_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vmaxq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vmin_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vminq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vpmax_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vpmin_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vpadd_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqdmulh_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqdmulhq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqrdmulh_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqrdmulhq_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E);
|
Inplement aarch64 neon instructions in AdvSIMD(shift). About 24 shift instructions:
sshr,ushr,ssra,usra,srshr,urshr,srsra,ursra,sri,shl,sli,sqshlu,sqshl,uqshl,shrn,sqrshr$
and 4 convert instructions:
scvtf,ucvtf,fcvtzs,fcvtzu
llvm-svn: 189926
2013-09-04 17:29:13 +08:00
|
|
|
|
|
|
|
// Shift by immediate
|
|
|
|
case AArch64::BI__builtin_neon_vshr_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vshrq_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrshr_n_v:
|
|
|
|
case AArch64::BI__builtin_neon_vrshrq_n_v:
|
|
|
|
Int = usgn ? Intrinsic::aarch64_neon_vurshr
|
|
|
|
: Intrinsic::aarch64_neon_vsrshr;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
|
|
|
|
case AArch64::BI__builtin_neon_vsra_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vsraq_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vrsra_n_v:
|
|
|
|
case AArch64::BI__builtin_neon_vrsraq_n_v: {
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Int = usgn ? Intrinsic::aarch64_neon_vurshr
|
|
|
|
: Intrinsic::aarch64_neon_vsrshr;
|
|
|
|
Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
|
|
|
|
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
|
|
|
|
}
|
2013-08-15 16:26:30 +08:00
|
|
|
case AArch64::BI__builtin_neon_vshl_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vshlq_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
|
Inplement aarch64 neon instructions in AdvSIMD(shift). About 24 shift instructions:
sshr,ushr,ssra,usra,srshr,urshr,srsra,ursra,sri,shl,sli,sqshlu,sqshl,uqshl,shrn,sqrshr$
and 4 convert instructions:
scvtf,ucvtf,fcvtzs,fcvtzu
llvm-svn: 189926
2013-09-04 17:29:13 +08:00
|
|
|
case AArch64::BI__builtin_neon_vqshl_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqshlq_n_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vqshlu_n_v:
|
|
|
|
case AArch64::BI__builtin_neon_vqshluq_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqshlu;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n");
|
|
|
|
case AArch64::BI__builtin_neon_vsri_n_v:
|
|
|
|
case AArch64::BI__builtin_neon_vsriq_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsri;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n");
|
|
|
|
case AArch64::BI__builtin_neon_vsli_n_v:
|
|
|
|
case AArch64::BI__builtin_neon_vsliq_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsli;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n");
|
|
|
|
case AArch64::BI__builtin_neon_vshll_n_v: {
|
|
|
|
llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
|
|
|
|
if (usgn)
|
|
|
|
Ops[0] = Builder.CreateZExt(Ops[0], VTy);
|
|
|
|
else
|
|
|
|
Ops[0] = Builder.CreateSExt(Ops[0], VTy);
|
|
|
|
Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
|
|
|
|
return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vshrn_n_v: {
|
|
|
|
llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
|
|
|
|
Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
|
|
|
|
if (usgn)
|
|
|
|
Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
|
|
|
|
else
|
|
|
|
Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
|
|
|
|
return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vqshrun_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqshrun;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
|
|
|
|
case AArch64::BI__builtin_neon_vrshrn_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vrshrn;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrun_n_v:
|
|
|
|
Int = Intrinsic::aarch64_neon_vsqrshrun;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
|
|
|
|
case AArch64::BI__builtin_neon_vqshrn_n_v:
|
|
|
|
Int = usgn ? Intrinsic::aarch64_neon_vuqshrn
|
|
|
|
: Intrinsic::aarch64_neon_vsqshrn;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
|
|
|
|
case AArch64::BI__builtin_neon_vqrshrn_n_v:
|
|
|
|
Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn
|
|
|
|
: Intrinsic::aarch64_neon_vsqrshrn;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
|
|
|
|
|
|
|
|
// Convert
|
2013-08-15 16:26:30 +08:00
|
|
|
case AArch64::BI__builtin_neon_vmovl_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E);
|
Inplement aarch64 neon instructions in AdvSIMD(shift). About 24 shift instructions:
sshr,ushr,ssra,usra,srshr,urshr,srsra,ursra,sri,shl,sli,sqshlu,sqshl,uqshl,shrn,sqrshr$
and 4 convert instructions:
scvtf,ucvtf,fcvtzs,fcvtzu
llvm-svn: 189926
2013-09-04 17:29:13 +08:00
|
|
|
case AArch64::BI__builtin_neon_vcvt_n_f32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_f32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_f64_v: {
|
|
|
|
llvm::Type *FloatTy =
|
|
|
|
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
|
|
|
|
llvm::Type *Tys[2] = { FloatTy, Ty };
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
|
|
|
|
: Intrinsic::arm_neon_vcvtfxs2fp;
|
|
|
|
Function *F = CGM.getIntrinsic(Int, Tys);
|
|
|
|
return EmitNeonCall(F, Ops, "vcvt_n");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vcvt_n_s32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_s32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvt_n_u32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_u32_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_s64_v:
|
|
|
|
case AArch64::BI__builtin_neon_vcvtq_n_u64_v: {
|
|
|
|
llvm::Type *FloatTy =
|
|
|
|
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
|
|
|
|
llvm::Type *Tys[2] = { Ty, FloatTy };
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
|
|
|
|
: Intrinsic::arm_neon_vcvtfp2fxs;
|
|
|
|
Function *F = CGM.getIntrinsic(Int, Tys);
|
|
|
|
return EmitNeonCall(F, Ops, "vcvt_n");
|
|
|
|
}
|
2013-08-01 17:23:19 +08:00
|
|
|
|
2013-10-11 01:01:49 +08:00
|
|
|
// Load/Store
|
|
|
|
case AArch64::BI__builtin_neon_vld1_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld1q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld2_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld2q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld3_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld3q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld4_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vld4q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst1_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst1q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst2_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst2q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst3_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst3q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst4_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E);
|
|
|
|
case AArch64::BI__builtin_neon_vst4q_v:
|
|
|
|
return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E);
|
|
|
|
|
2013-11-06 01:42:24 +08:00
|
|
|
// Crypto
|
|
|
|
case AArch64::BI__builtin_neon_vaeseq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese, Ty),
|
|
|
|
Ops, "aese");
|
|
|
|
case AArch64::BI__builtin_neon_vaesdq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesd, Ty),
|
|
|
|
Ops, "aesd");
|
|
|
|
case AArch64::BI__builtin_neon_vaesmcq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesmc, Ty),
|
|
|
|
Ops, "aesmc");
|
|
|
|
case AArch64::BI__builtin_neon_vaesimcq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesimc, Ty),
|
|
|
|
Ops, "aesimc");
|
|
|
|
case AArch64::BI__builtin_neon_vsha1su1q_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su1, Ty),
|
|
|
|
Ops, "sha1su1");
|
|
|
|
case AArch64::BI__builtin_neon_vsha256su0q_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su0, Ty),
|
|
|
|
Ops, "sha256su0");
|
|
|
|
case AArch64::BI__builtin_neon_vsha1su0q_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su0, Ty),
|
|
|
|
Ops, "sha1su0");
|
|
|
|
case AArch64::BI__builtin_neon_vsha256hq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h, Ty),
|
|
|
|
Ops, "sha256h");
|
|
|
|
case AArch64::BI__builtin_neon_vsha256h2q_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h2, Ty),
|
|
|
|
Ops, "sha256h2");
|
|
|
|
case AArch64::BI__builtin_neon_vsha256su1q_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty),
|
|
|
|
Ops, "sha256su1");
|
|
|
|
|
2013-08-01 17:23:19 +08:00
|
|
|
// AArch64-only builtins
|
2013-10-04 17:21:17 +08:00
|
|
|
case AArch64::BI__builtin_neon_vfma_lane_v:
|
|
|
|
case AArch64::BI__builtin_neon_vfmaq_laneq_v: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
|
|
|
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
|
|
|
|
return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vfmaq_lane_v: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
|
|
|
|
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
|
|
|
|
llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
|
|
|
|
VTy->getNumElements() / 2);
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
|
|
|
|
Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
|
|
|
|
cast<ConstantInt>(Ops[3]));
|
|
|
|
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
|
|
|
|
|
|
|
|
return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vfma_laneq_v: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
|
|
|
|
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
|
|
|
|
llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
|
|
|
|
VTy->getNumElements() * 2);
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
|
|
|
|
Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
|
|
|
|
cast<ConstantInt>(Ops[3]));
|
|
|
|
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
|
|
|
|
|
|
|
|
return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
|
|
|
|
}
|
2013-08-01 17:23:19 +08:00
|
|
|
case AArch64::BI__builtin_neon_vfms_v:
|
|
|
|
case AArch64::BI__builtin_neon_vfmsq_v: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[1] = Builder.CreateFNeg(Ops[1]);
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
|
|
|
|
|
|
|
// LLVM's fma intrinsic puts the accumulator in the last position, but the
|
|
|
|
// AArch64 intrinsic has it first.
|
|
|
|
return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vmaxnm_v:
|
|
|
|
case AArch64::BI__builtin_neon_vmaxnmq_v: {
|
|
|
|
Int = Intrinsic::aarch64_neon_vmaxnm;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vminnm_v:
|
|
|
|
case AArch64::BI__builtin_neon_vminnmq_v: {
|
|
|
|
Int = Intrinsic::aarch64_neon_vminnm;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vpmaxnm_v:
|
|
|
|
case AArch64::BI__builtin_neon_vpmaxnmq_v: {
|
|
|
|
Int = Intrinsic::aarch64_neon_vpmaxnm;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vpminnm_v:
|
|
|
|
case AArch64::BI__builtin_neon_vpminnmq_v: {
|
|
|
|
Int = Intrinsic::aarch64_neon_vpminnm;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vpmaxq_v: {
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vpminq_v: {
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vpaddq_v: {
|
|
|
|
Int = Intrinsic::arm_neon_vpadd;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd");
|
|
|
|
}
|
|
|
|
case AArch64::BI__builtin_neon_vmulx_v:
|
|
|
|
case AArch64::BI__builtin_neon_vmulxq_v: {
|
|
|
|
Int = Intrinsic::aarch64_neon_vmulx;
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
|
|
|
|
}
|
|
|
|
}
|
2013-05-04 15:15:13 +08:00
|
|
|
}
|
|
|
|
|
2010-03-04 03:03:45 +08:00
|
|
|
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
|
|
|
|
const CallExpr *E) {
|
2010-06-09 11:48:40 +08:00
|
|
|
if (BuiltinID == ARM::BI__clear_cache) {
|
2013-05-14 20:45:47 +08:00
|
|
|
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
|
2010-06-08 01:26:50 +08:00
|
|
|
const FunctionDecl *FD = E->getDirectCallee();
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<Value*, 2> Ops;
|
2013-05-14 20:45:47 +08:00
|
|
|
for (unsigned i = 0; i < 2; i++)
|
2011-03-15 04:30:34 +08:00
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
|
|
|
|
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Name = FD->getName();
|
2013-03-01 03:01:20 +08:00
|
|
|
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
|
2010-03-04 03:03:45 +08:00
|
|
|
}
|
2010-06-09 11:48:40 +08:00
|
|
|
|
2013-07-16 17:47:53 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
|
|
|
|
(BuiltinID == ARM::BI__builtin_arm_ldrex &&
|
|
|
|
getContext().getTypeSize(E->getType()) == 64)) {
|
2011-05-28 12:11:33 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
|
|
|
|
|
|
|
|
Value *LdPtr = EmitScalarExpr(E->getArg(0));
|
2013-07-16 17:47:53 +08:00
|
|
|
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
|
|
|
|
"ldrexd");
|
2011-05-28 12:11:33 +08:00
|
|
|
|
|
|
|
Value *Val0 = Builder.CreateExtractValue(Val, 1);
|
|
|
|
Value *Val1 = Builder.CreateExtractValue(Val, 0);
|
|
|
|
Val0 = Builder.CreateZExt(Val0, Int64Ty);
|
|
|
|
Val1 = Builder.CreateZExt(Val1, Int64Ty);
|
|
|
|
|
|
|
|
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
|
|
|
|
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
|
2013-07-16 17:47:53 +08:00
|
|
|
Val = Builder.CreateOr(Val, Val1);
|
|
|
|
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
|
2011-05-28 12:11:33 +08:00
|
|
|
}
|
|
|
|
|
2013-07-16 17:47:53 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_ldrex) {
|
|
|
|
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
|
|
|
|
|
|
|
|
QualType Ty = E->getType();
|
|
|
|
llvm::Type *RealResTy = ConvertType(Ty);
|
|
|
|
llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
|
|
|
|
getContext().getTypeSize(Ty));
|
|
|
|
LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
|
|
|
|
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType());
|
|
|
|
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
|
|
|
|
|
|
|
|
if (RealResTy->isPointerTy())
|
|
|
|
return Builder.CreateIntToPtr(Val, RealResTy);
|
|
|
|
else {
|
|
|
|
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
|
|
|
|
return Builder.CreateBitCast(Val, RealResTy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_strexd ||
|
|
|
|
(BuiltinID == ARM::BI__builtin_arm_strex &&
|
|
|
|
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
|
2011-05-28 12:11:33 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
|
2011-06-19 06:49:11 +08:00
|
|
|
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
|
2011-05-28 12:11:33 +08:00
|
|
|
|
2013-08-20 06:20:37 +08:00
|
|
|
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
|
2011-05-28 12:11:33 +08:00
|
|
|
Value *Val = EmitScalarExpr(E->getArg(0));
|
|
|
|
Builder.CreateStore(Val, Tmp);
|
|
|
|
|
|
|
|
Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
|
|
|
|
Val = Builder.CreateLoad(LdPtr);
|
|
|
|
|
|
|
|
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
|
|
|
|
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
|
2013-07-16 17:47:53 +08:00
|
|
|
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
|
2011-05-28 12:11:33 +08:00
|
|
|
return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
|
|
|
|
}
|
|
|
|
|
2013-07-16 17:47:53 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_strex) {
|
|
|
|
Value *StoreVal = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
|
|
|
|
|
|
|
|
QualType Ty = E->getArg(0)->getType();
|
|
|
|
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
|
|
|
|
getContext().getTypeSize(Ty));
|
|
|
|
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
|
|
|
|
|
|
|
|
if (StoreVal->getType()->isPointerTy())
|
|
|
|
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
|
|
|
|
else {
|
|
|
|
StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
|
|
|
|
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
|
|
|
|
}
|
|
|
|
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType());
|
|
|
|
return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
|
|
|
|
return Builder.CreateCall(F);
|
|
|
|
}
|
|
|
|
|
2013-10-02 18:00:18 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_sevl) {
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_sevl);
|
|
|
|
return Builder.CreateCall(F);
|
|
|
|
}
|
|
|
|
|
2013-09-18 18:07:09 +08:00
|
|
|
// CRC32
|
|
|
|
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
case ARM::BI__builtin_arm_crc32b:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
|
|
|
|
case ARM::BI__builtin_arm_crc32cb:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
|
|
|
|
case ARM::BI__builtin_arm_crc32h:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
|
|
|
|
case ARM::BI__builtin_arm_crc32ch:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
|
|
|
|
case ARM::BI__builtin_arm_crc32w:
|
|
|
|
case ARM::BI__builtin_arm_crc32d:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
|
|
|
|
case ARM::BI__builtin_arm_crc32cw:
|
|
|
|
case ARM::BI__builtin_arm_crc32cd:
|
|
|
|
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
|
|
|
|
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|
|
|
|
Value *Arg1 = EmitScalarExpr(E->getArg(1));
|
|
|
|
|
|
|
|
// crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
|
|
|
|
// intrinsics, hence we need different codegen for these cases.
|
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
|
|
|
|
BuiltinID == ARM::BI__builtin_arm_crc32cd) {
|
|
|
|
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
|
|
|
|
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
|
|
|
|
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
|
|
|
|
Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
|
|
|
|
|
|
|
|
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
|
|
|
|
Value *Res = Builder.CreateCall2(F, Arg0, Arg1a);
|
|
|
|
return Builder.CreateCall2(F, Res, Arg1b);
|
|
|
|
} else {
|
|
|
|
Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
|
|
|
|
|
|
|
|
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
|
|
|
|
return Builder.CreateCall2(F, Arg0, Arg1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<Value*, 4> Ops;
|
2012-08-23 11:10:17 +08:00
|
|
|
llvm::Value *Align = 0;
|
|
|
|
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
|
|
|
|
if (i == 0) {
|
|
|
|
switch (BuiltinID) {
|
|
|
|
case ARM::BI__builtin_neon_vld1_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1q_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1q_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vst1_v:
|
|
|
|
case ARM::BI__builtin_neon_vst1q_v:
|
|
|
|
case ARM::BI__builtin_neon_vst1q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst1_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2q_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3q_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4q_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4q_lane_v:
|
|
|
|
// Get the alignment for the argument in addition to the value;
|
|
|
|
// we'll use it later.
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(0));
|
|
|
|
Ops.push_back(Src.first);
|
|
|
|
Align = Builder.getInt32(Src.second);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i == 1) {
|
|
|
|
switch (BuiltinID) {
|
|
|
|
case ARM::BI__builtin_neon_vld2_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2q_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3q_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4q_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4q_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4_dup_v:
|
|
|
|
// Get the alignment for the argument in addition to the value;
|
|
|
|
// we'll use it later.
|
|
|
|
std::pair<llvm::Value*, unsigned> Src =
|
|
|
|
EmitPointerWithAlignment(E->getArg(1));
|
|
|
|
Ops.push_back(Src.first);
|
|
|
|
Align = Builder.getInt32(Src.second);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2010-06-09 11:48:40 +08:00
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
2012-08-23 11:10:17 +08:00
|
|
|
}
|
2010-06-09 11:48:40 +08:00
|
|
|
|
2011-08-13 13:03:46 +08:00
|
|
|
// vget_lane and vset_lane are not overloaded and do not have an extra
|
|
|
|
// argument that specifies the vector type.
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: break;
|
|
|
|
case ARM::BI__builtin_neon_vget_lane_i8:
|
|
|
|
case ARM::BI__builtin_neon_vget_lane_i16:
|
|
|
|
case ARM::BI__builtin_neon_vget_lane_i32:
|
|
|
|
case ARM::BI__builtin_neon_vget_lane_i64:
|
|
|
|
case ARM::BI__builtin_neon_vget_lane_f32:
|
|
|
|
case ARM::BI__builtin_neon_vgetq_lane_i8:
|
|
|
|
case ARM::BI__builtin_neon_vgetq_lane_i16:
|
|
|
|
case ARM::BI__builtin_neon_vgetq_lane_i32:
|
|
|
|
case ARM::BI__builtin_neon_vgetq_lane_i64:
|
|
|
|
case ARM::BI__builtin_neon_vgetq_lane_f32:
|
|
|
|
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
|
|
|
|
"vget_lane");
|
|
|
|
case ARM::BI__builtin_neon_vset_lane_i8:
|
|
|
|
case ARM::BI__builtin_neon_vset_lane_i16:
|
|
|
|
case ARM::BI__builtin_neon_vset_lane_i32:
|
|
|
|
case ARM::BI__builtin_neon_vset_lane_i64:
|
|
|
|
case ARM::BI__builtin_neon_vset_lane_f32:
|
|
|
|
case ARM::BI__builtin_neon_vsetq_lane_i8:
|
|
|
|
case ARM::BI__builtin_neon_vsetq_lane_i16:
|
|
|
|
case ARM::BI__builtin_neon_vsetq_lane_i32:
|
|
|
|
case ARM::BI__builtin_neon_vsetq_lane_i64:
|
|
|
|
case ARM::BI__builtin_neon_vsetq_lane_f32:
|
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(2)));
|
|
|
|
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the last argument, which specifies the vector type.
|
2010-06-09 11:48:40 +08:00
|
|
|
llvm::APSInt Result;
|
|
|
|
const Expr *Arg = E->getArg(E->getNumArgs()-1);
|
|
|
|
if (!Arg->isIntegerConstantExpr(Result, getContext()))
|
|
|
|
return 0;
|
|
|
|
|
2010-08-04 05:32:34 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
|
|
|
|
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
|
|
|
|
// Determine the overloaded type of this builtin.
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *Ty;
|
2010-08-04 05:32:34 +08:00
|
|
|
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
|
2012-02-07 08:39:47 +08:00
|
|
|
Ty = FloatTy;
|
2010-08-04 05:32:34 +08:00
|
|
|
else
|
2012-02-07 08:39:47 +08:00
|
|
|
Ty = DoubleTy;
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-08-04 05:32:34 +08:00
|
|
|
// Determine whether this is an unsigned conversion or not.
|
|
|
|
bool usgn = Result.getZExtValue() == 1;
|
|
|
|
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
|
|
|
|
|
|
|
|
// Call the appropriate intrinsic.
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Int, Ty);
|
2011-07-15 16:37:34 +08:00
|
|
|
return Builder.CreateCall(F, Ops, "vcvtr");
|
2010-08-04 05:32:34 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-08-04 05:32:34 +08:00
|
|
|
// Determine the type of this overloaded NEON intrinsic.
|
2011-11-08 09:16:11 +08:00
|
|
|
NeonTypeFlags Type(Result.getZExtValue());
|
|
|
|
bool usgn = Type.isUnsigned();
|
|
|
|
bool quad = Type.isQuad();
|
2010-12-04 01:10:22 +08:00
|
|
|
bool rightShift = false;
|
2010-06-09 11:48:40 +08:00
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::VectorType *VTy = GetNeonType(this, Type);
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *Ty = VTy;
|
2010-06-09 11:48:40 +08:00
|
|
|
if (!Ty)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unsigned Int;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: return 0;
|
2012-09-21 08:18:30 +08:00
|
|
|
case ARM::BI__builtin_neon_vbsl_v:
|
|
|
|
case ARM::BI__builtin_neon_vbslq_v:
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
|
|
|
|
Ops, "vbsl");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vabd_v:
|
|
|
|
case ARM::BI__builtin_neon_vabdq_v:
|
2010-06-08 00:01:56 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vabs_v:
|
|
|
|
case ARM::BI__builtin_neon_vabsq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
|
2010-06-11 02:11:55 +08:00
|
|
|
Ops, "vabs");
|
2013-08-28 17:46:37 +08:00
|
|
|
case ARM::BI__builtin_neon_vaddhn_v: {
|
|
|
|
llvm::VectorType *SrcTy =
|
|
|
|
llvm::VectorType::getExtendedElementVectorType(VTy);
|
|
|
|
|
|
|
|
// %sum = add <4 x i32> %lhs, %rhs
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
|
|
|
|
Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
|
|
|
|
|
|
|
|
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
|
|
|
|
Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
|
|
|
|
SrcTy->getScalarSizeInBits() / 2);
|
|
|
|
ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
|
|
|
|
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
|
|
|
|
|
|
|
|
// %res = trunc <4 x i32> %high to <4 x i16>
|
|
|
|
return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcale_v:
|
2010-06-08 08:17:19 +08:00
|
|
|
std::swap(Ops[0], Ops[1]);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcage_v: {
|
2010-12-10 09:11:38 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcage");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcaleq_v:
|
2010-06-08 08:17:19 +08:00
|
|
|
std::swap(Ops[0], Ops[1]);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcageq_v: {
|
2010-12-10 09:11:38 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcage");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcalt_v:
|
2010-06-08 08:17:19 +08:00
|
|
|
std::swap(Ops[0], Ops[1]);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcagt_v: {
|
2010-12-10 09:11:38 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcagt");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcaltq_v:
|
2010-06-08 08:17:19 +08:00
|
|
|
std::swap(Ops[0], Ops[1]);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcagtq_v: {
|
2010-12-10 09:11:38 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcagt");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcls_v:
|
|
|
|
case ARM::BI__builtin_neon_vclsq_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcls");
|
2010-06-08 08:17:19 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vclz_v:
|
|
|
|
case ARM::BI__builtin_neon_vclzq_v: {
|
2012-07-15 03:29:12 +08:00
|
|
|
// Generate target-independent intrinsic; also need to add second argument
|
2012-07-14 07:26:27 +08:00
|
|
|
// for whether or not clz of zero is undefined; on ARM it isn't.
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
|
2013-04-17 06:48:15 +08:00
|
|
|
Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vclz");
|
2010-06-08 08:17:19 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcnt_v:
|
|
|
|
case ARM::BI__builtin_neon_vcntq_v: {
|
2012-07-18 08:01:03 +08:00
|
|
|
// generate target-independent intrinsic
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
|
|
|
|
return EmitNeonCall(F, Ops, "vctpop");
|
2010-06-08 08:17:19 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_f16_v: {
|
2011-11-08 09:16:11 +08:00
|
|
|
assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
|
|
|
|
"unexpected vcvt_f16_v builtin");
|
2010-12-16 07:36:44 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
|
|
|
|
return EmitNeonCall(F, Ops, "vcvt");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_f32_f16: {
|
2011-11-08 09:16:11 +08:00
|
|
|
assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
|
|
|
|
"unexpected vcvt_f32_f16 builtin");
|
2010-12-16 07:36:44 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
|
|
|
|
return EmitNeonCall(F, Ops, "vcvt");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_f32_v:
|
2011-11-08 09:16:11 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvtq_f32_v:
|
2010-06-08 14:03:01 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
2012-02-07 08:39:47 +08:00
|
|
|
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
|
2012-09-21 08:18:27 +08:00
|
|
|
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
|
2010-06-08 08:17:19 +08:00
|
|
|
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_s32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvt_u32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvtq_s32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvtq_u32_v: {
|
2011-11-08 09:16:11 +08:00
|
|
|
llvm::Type *FloatTy =
|
2012-02-07 08:39:47 +08:00
|
|
|
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
|
2011-11-08 09:16:11 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
|
2012-09-21 08:18:27 +08:00
|
|
|
return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
|
2010-06-08 08:17:19 +08:00
|
|
|
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_n_f32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
|
2011-11-08 09:16:11 +08:00
|
|
|
llvm::Type *FloatTy =
|
2012-02-07 08:39:47 +08:00
|
|
|
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
|
2011-11-08 09:16:11 +08:00
|
|
|
llvm::Type *Tys[2] = { FloatTy, Ty };
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
|
|
|
|
: Intrinsic::arm_neon_vcvtfxs2fp;
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Int, Tys);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcvt_n");
|
2010-06-08 08:17:19 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vcvt_n_s32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvt_n_u32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
|
|
|
|
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
|
2011-11-08 09:16:11 +08:00
|
|
|
llvm::Type *FloatTy =
|
2012-02-07 08:39:47 +08:00
|
|
|
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
|
2011-11-08 09:16:11 +08:00
|
|
|
llvm::Type *Tys[2] = { Ty, FloatTy };
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
|
|
|
|
: Intrinsic::arm_neon_vcvtfp2fxs;
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Int, Tys);
|
2010-06-08 14:03:01 +08:00
|
|
|
return EmitNeonCall(F, Ops, "vcvt_n");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vext_v:
|
|
|
|
case ARM::BI__builtin_neon_vextq_v: {
|
2011-02-15 08:14:06 +08:00
|
|
|
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
|
2010-06-09 09:10:23 +08:00
|
|
|
SmallVector<Constant*, 16> Indices;
|
2010-06-21 07:05:28 +08:00
|
|
|
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
|
2010-06-27 15:15:29 +08:00
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-06-08 14:03:01 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
2011-02-15 08:14:06 +08:00
|
|
|
Value *SV = llvm::ConstantVector::get(Indices);
|
2010-06-09 09:10:23 +08:00
|
|
|
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vhadd_v:
|
|
|
|
case ARM::BI__builtin_neon_vhaddq_v:
|
2010-06-10 02:04:15 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vhsub_v:
|
|
|
|
case ARM::BI__builtin_neon_vhsubq_v:
|
2010-06-10 02:04:15 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld1_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1q_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops, "vld1");
|
2012-08-15 01:27:04 +08:00
|
|
|
case ARM::BI__builtin_neon_vld1q_lane_v:
|
|
|
|
// Handle 64-bit integer elements as a special case. Use shuffles of
|
|
|
|
// one-element vectors to avoid poor code for i64 in the backend.
|
|
|
|
if (VTy->getElementType()->isIntegerTy(64)) {
|
|
|
|
// Extract the other lane.
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
|
|
|
|
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
|
|
|
|
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
|
|
|
|
// Load the value as a one-element vector.
|
|
|
|
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
|
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
|
2012-08-23 11:10:17 +08:00
|
|
|
Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
|
2012-08-15 01:27:04 +08:00
|
|
|
// Combine them.
|
|
|
|
SmallVector<Constant*, 2> Indices;
|
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
|
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, Lane));
|
|
|
|
SV = llvm::ConstantVector::get(Indices);
|
|
|
|
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
|
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
case ARM::BI__builtin_neon_vld1_lane_v: {
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
2012-02-05 07:58:08 +08:00
|
|
|
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
|
|
|
|
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
|
|
|
|
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld1_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld1q_dup_v: {
|
2010-06-21 07:05:28 +08:00
|
|
|
Value *V = UndefValue::get(Ty);
|
|
|
|
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
2012-02-05 07:58:08 +08:00
|
|
|
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
|
|
|
|
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
|
2012-02-05 07:58:08 +08:00
|
|
|
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
|
2010-06-21 07:05:28 +08:00
|
|
|
return EmitNeonSplat(Ops[0], CI);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld2_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2q_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
|
2010-08-28 01:14:29 +08:00
|
|
|
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld3_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3q_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
|
2010-08-28 01:14:29 +08:00
|
|
|
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld4_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4q_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
|
2010-08-28 01:14:29 +08:00
|
|
|
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld2_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld2q_lane_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
|
|
|
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-25 23:13:01 +08:00
|
|
|
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld3_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3q_lane_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
|
|
|
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
|
|
|
|
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-25 23:13:01 +08:00
|
|
|
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld4_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4q_lane_v: {
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
|
|
|
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
|
|
|
|
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
|
|
|
|
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-25 23:13:01 +08:00
|
|
|
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
|
2010-06-21 07:05:28 +08:00
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld2_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld3_dup_v:
|
|
|
|
case ARM::BI__builtin_neon_vld4_dup_v: {
|
2010-12-11 06:54:58 +08:00
|
|
|
// Handle 64-bit elements as a special-case. There is no "dup" needed.
|
|
|
|
if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
|
|
|
|
switch (BuiltinID) {
|
2012-09-21 08:18:27 +08:00
|
|
|
case ARM::BI__builtin_neon_vld2_dup_v:
|
|
|
|
Int = Intrinsic::arm_neon_vld2;
|
2010-12-11 06:54:58 +08:00
|
|
|
break;
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld3_dup_v:
|
2012-09-21 08:18:27 +08:00
|
|
|
Int = Intrinsic::arm_neon_vld3;
|
2010-12-11 06:54:58 +08:00
|
|
|
break;
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld4_dup_v:
|
2012-09-21 08:18:27 +08:00
|
|
|
Int = Intrinsic::arm_neon_vld4;
|
2010-12-11 06:54:58 +08:00
|
|
|
break;
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("unknown vld_dup intrinsic?");
|
2010-12-11 06:54:58 +08:00
|
|
|
}
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Int, Ty);
|
2010-12-11 06:54:58 +08:00
|
|
|
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
|
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2010-06-21 07:05:28 +08:00
|
|
|
switch (BuiltinID) {
|
2012-09-21 08:18:27 +08:00
|
|
|
case ARM::BI__builtin_neon_vld2_dup_v:
|
|
|
|
Int = Intrinsic::arm_neon_vld2lane;
|
2010-06-21 07:05:28 +08:00
|
|
|
break;
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld3_dup_v:
|
2012-09-21 08:18:27 +08:00
|
|
|
Int = Intrinsic::arm_neon_vld3lane;
|
2010-06-21 07:05:28 +08:00
|
|
|
break;
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vld4_dup_v:
|
2012-09-21 08:18:27 +08:00
|
|
|
Int = Intrinsic::arm_neon_vld4lane;
|
2010-06-21 07:05:28 +08:00
|
|
|
break;
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("unknown vld_dup intrinsic?");
|
2010-06-21 07:05:28 +08:00
|
|
|
}
|
2011-07-15 01:45:50 +08:00
|
|
|
Function *F = CGM.getIntrinsic(Int, Ty);
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-06-21 07:05:28 +08:00
|
|
|
SmallVector<Value*, 6> Args;
|
|
|
|
Args.push_back(Ops[1]);
|
|
|
|
Args.append(STy->getNumElements(), UndefValue::get(Ty));
|
|
|
|
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
|
2010-06-21 07:05:28 +08:00
|
|
|
Args.push_back(CI);
|
2012-08-23 11:10:17 +08:00
|
|
|
Args.push_back(Align);
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-07-15 16:37:34 +08:00
|
|
|
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
|
2010-06-21 07:05:28 +08:00
|
|
|
// splat lane 0 to all elts in each vector of the result.
|
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
|
|
|
Value *Val = Builder.CreateExtractValue(Ops[1], i);
|
|
|
|
Value *Elt = Builder.CreateBitCast(Val, Ty);
|
|
|
|
Elt = EmitNeonSplat(Elt, CI);
|
|
|
|
Elt = Builder.CreateBitCast(Elt, Val->getType());
|
|
|
|
Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
|
|
|
|
}
|
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmax_v:
|
|
|
|
case ARM::BI__builtin_neon_vmaxq_v:
|
2010-06-10 02:04:15 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmin_v:
|
|
|
|
case ARM::BI__builtin_neon_vminq_v:
|
2010-06-10 02:04:15 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmovl_v: {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
|
2010-09-03 06:37:30 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
|
2010-08-20 11:36:08 +08:00
|
|
|
if (usgn)
|
|
|
|
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
|
|
|
|
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
|
2010-09-03 06:37:30 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmovn_v: {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
|
2010-09-03 06:37:30 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
|
2010-08-31 03:57:13 +08:00
|
|
|
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
|
2010-09-03 06:37:30 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmul_v:
|
|
|
|
case ARM::BI__builtin_neon_vmulq_v:
|
2011-11-08 09:16:11 +08:00
|
|
|
assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
|
2010-12-04 01:29:39 +08:00
|
|
|
Ops, "vmul");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vmull_v:
|
2013-08-28 17:46:40 +08:00
|
|
|
// FIXME: the integer vmull operations could be emitted in terms of pure
|
|
|
|
// LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
|
|
|
|
// hoisting the exts outside loops. Until global ISel comes along that can
|
|
|
|
// see through such movement this leads to bad CodeGen. So we need an
|
|
|
|
// intrinsic for now.
|
2011-03-31 08:09:00 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
|
2011-11-08 09:16:11 +08:00
|
|
|
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
|
2012-09-30 07:52:48 +08:00
|
|
|
case ARM::BI__builtin_neon_vfma_v:
|
|
|
|
case ARM::BI__builtin_neon_vfmaq_v: {
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
2013-01-17 04:13:15 +08:00
|
|
|
|
|
|
|
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
|
|
|
|
return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
|
2012-09-30 07:52:48 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vpadal_v:
|
|
|
|
case ARM::BI__builtin_neon_vpadalq_v: {
|
2010-06-10 02:04:15 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
|
2010-12-10 13:51:07 +08:00
|
|
|
// The source operand type has twice as many elements of half the size.
|
|
|
|
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *EltTy =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *NarrowTy =
|
2010-12-10 13:51:07 +08:00
|
|
|
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *Tys[2] = { Ty, NarrowTy };
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
|
2010-12-10 13:51:07 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vpadd_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
|
2010-06-11 02:11:55 +08:00
|
|
|
Ops, "vpadd");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vpaddl_v:
|
|
|
|
case ARM::BI__builtin_neon_vpaddlq_v: {
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
|
2010-12-10 13:51:07 +08:00
|
|
|
// The source operand type has twice as many elements of half the size.
|
|
|
|
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *NarrowTy =
|
2010-12-10 13:51:07 +08:00
|
|
|
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *Tys[2] = { Ty, NarrowTy };
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
|
2010-12-10 13:51:07 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vpmax_v:
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vpmin_v:
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqabs_v:
|
|
|
|
case ARM::BI__builtin_neon_vqabsq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
|
2010-06-11 02:11:55 +08:00
|
|
|
Ops, "vqabs");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqadd_v:
|
|
|
|
case ARM::BI__builtin_neon_vqaddq_v:
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
|
2013-08-28 17:46:34 +08:00
|
|
|
case ARM::BI__builtin_neon_vqdmlal_v: {
|
|
|
|
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
|
|
|
|
Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
|
|
|
|
MulOps, "vqdmlal");
|
|
|
|
|
|
|
|
SmallVector<Value *, 2> AddOps;
|
|
|
|
AddOps.push_back(Ops[0]);
|
|
|
|
AddOps.push_back(Mul);
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty),
|
|
|
|
AddOps, "vqdmlal");
|
|
|
|
}
|
|
|
|
case ARM::BI__builtin_neon_vqdmlsl_v: {
|
|
|
|
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
|
|
|
|
Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
|
|
|
|
MulOps, "vqdmlsl");
|
|
|
|
|
|
|
|
SmallVector<Value *, 2> SubOps;
|
|
|
|
SubOps.push_back(Ops[0]);
|
|
|
|
SubOps.push_back(Mul);
|
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty),
|
|
|
|
SubOps, "vqdmlsl");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqdmulh_v:
|
|
|
|
case ARM::BI__builtin_neon_vqdmulhq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqdmulh");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqdmull_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqdmull");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqmovn_v:
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqmovun_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
|
2010-06-11 02:11:55 +08:00
|
|
|
Ops, "vqdmull");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqneg_v:
|
|
|
|
case ARM::BI__builtin_neon_vqnegq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
|
2010-06-14 13:21:25 +08:00
|
|
|
Ops, "vqneg");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqrdmulh_v:
|
|
|
|
case ARM::BI__builtin_neon_vqrdmulhq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqrdmulh");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqrshl_v:
|
|
|
|
case ARM::BI__builtin_neon_vqrshlq_v:
|
2010-06-11 02:11:55 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqrshrn_n_v:
|
2012-09-21 08:18:27 +08:00
|
|
|
Int =
|
|
|
|
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
|
2010-06-14 13:21:25 +08:00
|
|
|
1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqrshrun_n_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqrshrun_n", 1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqshl_v:
|
|
|
|
case ARM::BI__builtin_neon_vqshlq_v:
|
2010-06-14 13:21:25 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqshl_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vqshlq_n_v:
|
2010-06-14 13:21:25 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
|
2010-06-14 13:21:25 +08:00
|
|
|
1, false);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqshlu_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vqshluq_n_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqshlu", 1, false);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqshrn_n_v:
|
2010-06-14 13:21:25 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
|
2010-06-14 13:21:25 +08:00
|
|
|
1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqshrun_n_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vqshrun_n", 1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vqsub_v:
|
|
|
|
case ARM::BI__builtin_neon_vqsubq_v:
|
2010-06-12 06:57:12 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vraddhn_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "vraddhn");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrecpe_v:
|
|
|
|
case ARM::BI__builtin_neon_vrecpeq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "vrecpe");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrecps_v:
|
|
|
|
case ARM::BI__builtin_neon_vrecpsq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "vrecps");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrhadd_v:
|
|
|
|
case ARM::BI__builtin_neon_vrhaddq_v:
|
2010-06-12 06:57:12 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrshl_v:
|
|
|
|
case ARM::BI__builtin_neon_vrshlq_v:
|
2010-06-12 14:06:07 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrshrn_n_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vrshrn_n", 1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrshr_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vrshrq_n_v:
|
2010-06-12 14:06:07 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrsqrte_v:
|
|
|
|
case ARM::BI__builtin_neon_vrsqrteq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
|
2010-06-12 14:06:07 +08:00
|
|
|
Ops, "vrsqrte");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrsqrts_v:
|
|
|
|
case ARM::BI__builtin_neon_vrsqrtsq_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
|
2010-06-12 14:06:07 +08:00
|
|
|
Ops, "vrsqrts");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrsra_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vrsraq_n_v:
|
2010-06-12 14:06:07 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
|
|
|
|
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
|
2012-09-21 08:18:27 +08:00
|
|
|
Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
|
2010-06-12 14:06:07 +08:00
|
|
|
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vrsubhn_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "vrsubhn");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vshl_v:
|
|
|
|
case ARM::BI__builtin_neon_vshlq_v:
|
2010-06-12 06:57:12 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vshll_n_v:
|
2010-06-12 06:57:12 +08:00
|
|
|
Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vshl_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vshlq_n_v:
|
2010-06-14 13:21:25 +08:00
|
|
|
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
|
2012-09-21 08:18:27 +08:00
|
|
|
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
|
|
|
|
"vshl_n");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vshrn_n_v:
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
|
2010-12-09 06:37:56 +08:00
|
|
|
Ops, "vshrn_n", 1, true);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vshr_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vshrq_n_v:
|
2013-10-04 21:13:15 +08:00
|
|
|
return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, usgn, "vshr_n");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vsri_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vsriq_n_v:
|
2010-12-04 01:10:22 +08:00
|
|
|
rightShift = true;
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vsli_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vsliq_n_v:
|
2010-12-04 01:10:22 +08:00
|
|
|
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "vsli_n");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vsra_n_v:
|
|
|
|
case ARM::BI__builtin_neon_vsraq_n_v:
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
2013-10-04 21:13:15 +08:00
|
|
|
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
|
2010-06-12 06:57:12 +08:00
|
|
|
return Builder.CreateAdd(Ops[0], Ops[1]);
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst1_v:
|
|
|
|
case ARM::BI__builtin_neon_vst1q_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2012-08-15 01:27:04 +08:00
|
|
|
case ARM::BI__builtin_neon_vst1q_lane_v:
|
|
|
|
// Handle 64-bit integer elements as a special case. Use a shuffle to get
|
|
|
|
// a one-element vector and avoid poor code for i64 in the backend.
|
|
|
|
if (VTy->getElementType()->isIntegerTy(64)) {
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
|
|
|
|
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops[2] = Align;
|
2012-08-15 01:27:04 +08:00
|
|
|
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
|
|
|
|
Ops[1]->getType()), Ops);
|
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
case ARM::BI__builtin_neon_vst1_lane_v: {
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
|
|
|
|
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
|
2012-02-05 07:58:08 +08:00
|
|
|
StoreInst *St = Builder.CreateStore(Ops[1],
|
|
|
|
Builder.CreateBitCast(Ops[0], Ty));
|
|
|
|
St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
|
|
|
|
return St;
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst2_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2q_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst2_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst2q_lane_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst3_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3q_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst3_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst3q_lane_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst4_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4q_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vst4_lane_v:
|
|
|
|
case ARM::BI__builtin_neon_vst4q_lane_v:
|
2012-08-23 11:10:17 +08:00
|
|
|
Ops.push_back(Align);
|
2011-07-15 01:45:50 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
|
2010-06-12 06:57:12 +08:00
|
|
|
Ops, "");
|
2013-08-28 17:46:37 +08:00
|
|
|
case ARM::BI__builtin_neon_vsubhn_v: {
|
|
|
|
llvm::VectorType *SrcTy =
|
|
|
|
llvm::VectorType::getExtendedElementVectorType(VTy);
|
|
|
|
|
|
|
|
// %sum = add <4 x i32> %lhs, %rhs
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
|
|
|
|
Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
|
|
|
|
|
|
|
|
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
|
|
|
|
Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
|
|
|
|
SrcTy->getScalarSizeInBits() / 2);
|
|
|
|
ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
|
|
|
|
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
|
|
|
|
|
|
|
|
// %res = trunc <4 x i32> %high to <4 x i16>
|
|
|
|
return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbl1_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
|
|
|
|
Ops, "vtbl1");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbl2_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
|
|
|
|
Ops, "vtbl2");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbl3_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
|
|
|
|
Ops, "vtbl3");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbl4_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
|
|
|
|
Ops, "vtbl4");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbx1_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
|
|
|
|
Ops, "vtbx1");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbx2_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
|
|
|
|
Ops, "vtbx2");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbx3_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
|
|
|
|
Ops, "vtbx3");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtbx4_v:
|
2010-06-09 09:10:23 +08:00
|
|
|
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
|
|
|
|
Ops, "vtbx4");
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtst_v:
|
|
|
|
case ARM::BI__builtin_neon_vtstq_v: {
|
2010-06-09 09:10:23 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
|
2012-09-21 08:18:27 +08:00
|
|
|
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
|
2010-06-09 09:10:23 +08:00
|
|
|
ConstantAggregateZero::get(Ty));
|
|
|
|
return Builder.CreateSExt(Ops[0], Ty, "vtst");
|
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vtrn_v:
|
|
|
|
case ARM::BI__builtin_neon_vtrnq_v: {
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
2011-01-24 01:04:59 +08:00
|
|
|
Value *SV = 0;
|
2010-06-21 07:05:28 +08:00
|
|
|
|
2010-06-09 09:10:23 +08:00
|
|
|
for (unsigned vi = 0; vi != 2; ++vi) {
|
2010-06-21 07:05:28 +08:00
|
|
|
SmallVector<Constant*, 16> Indices;
|
|
|
|
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
|
2012-01-25 13:34:41 +08:00
|
|
|
Indices.push_back(Builder.getInt32(i+vi));
|
|
|
|
Indices.push_back(Builder.getInt32(i+e+vi));
|
2010-06-09 09:10:23 +08:00
|
|
|
}
|
2010-06-21 07:05:28 +08:00
|
|
|
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
|
2011-02-15 08:14:06 +08:00
|
|
|
SV = llvm::ConstantVector::get(Indices);
|
2010-06-21 07:05:28 +08:00
|
|
|
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
|
|
|
|
SV = Builder.CreateStore(SV, Addr);
|
2010-06-09 09:10:23 +08:00
|
|
|
}
|
2010-06-21 07:05:28 +08:00
|
|
|
return SV;
|
2010-06-09 09:10:23 +08:00
|
|
|
}
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vuzp_v:
|
|
|
|
case ARM::BI__builtin_neon_vuzpq_v: {
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
|
2010-06-09 09:10:23 +08:00
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
2011-01-24 01:04:59 +08:00
|
|
|
Value *SV = 0;
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-06-21 07:05:28 +08:00
|
|
|
for (unsigned vi = 0; vi != 2; ++vi) {
|
|
|
|
SmallVector<Constant*, 16> Indices;
|
|
|
|
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
|
2010-06-27 15:15:29 +08:00
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
|
2010-06-21 07:05:28 +08:00
|
|
|
|
|
|
|
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
|
2011-02-15 08:14:06 +08:00
|
|
|
SV = llvm::ConstantVector::get(Indices);
|
2010-06-21 07:05:28 +08:00
|
|
|
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
|
|
|
|
SV = Builder.CreateStore(SV, Addr);
|
|
|
|
}
|
|
|
|
return SV;
|
2010-06-09 09:10:23 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
case ARM::BI__builtin_neon_vzip_v:
|
2011-06-25 06:13:26 +08:00
|
|
|
case ARM::BI__builtin_neon_vzipq_v: {
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
|
2010-06-09 09:10:23 +08:00
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
|
2010-06-21 07:05:28 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
|
2011-01-24 01:04:59 +08:00
|
|
|
Value *SV = 0;
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-06-21 07:05:28 +08:00
|
|
|
for (unsigned vi = 0; vi != 2; ++vi) {
|
|
|
|
SmallVector<Constant*, 16> Indices;
|
|
|
|
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
|
2010-08-26 08:55:57 +08:00
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
|
|
|
|
Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
|
2010-06-21 07:05:28 +08:00
|
|
|
}
|
|
|
|
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
|
2011-02-15 08:14:06 +08:00
|
|
|
SV = llvm::ConstantVector::get(Indices);
|
2010-06-21 07:05:28 +08:00
|
|
|
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
|
|
|
|
SV = Builder.CreateStore(SV, Addr);
|
|
|
|
}
|
|
|
|
return SV;
|
2010-06-08 08:17:19 +08:00
|
|
|
}
|
2010-03-04 03:03:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-09 16:47:25 +08:00
|
|
|
llvm::Value *CodeGenFunction::
|
2012-02-22 17:30:11 +08:00
|
|
|
BuildVector(ArrayRef<llvm::Value*> Ops) {
|
2010-10-09 16:47:25 +08:00
|
|
|
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
|
|
|
|
"Not a power-of-two sized vector!");
|
|
|
|
bool AllConstants = true;
|
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
|
|
|
|
AllConstants &= isa<Constant>(Ops[i]);
|
|
|
|
|
|
|
|
// If this is a constant vector, create a ConstantVector.
|
|
|
|
if (AllConstants) {
|
2012-01-25 13:34:41 +08:00
|
|
|
SmallVector<llvm::Constant*, 16> CstOps;
|
2010-10-09 16:47:25 +08:00
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
|
|
|
CstOps.push_back(cast<Constant>(Ops[i]));
|
|
|
|
return llvm::ConstantVector::get(CstOps);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, insertelement the values to build the vector.
|
|
|
|
Value *Result =
|
|
|
|
llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
2012-01-25 13:34:41 +08:00
|
|
|
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
|
2010-10-09 16:47:25 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
|
2007-12-13 15:34:23 +08:00
|
|
|
const CallExpr *E) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<Value*, 4> Ops;
|
2007-12-15 01:48:24 +08:00
|
|
|
|
2010-10-02 08:09:12 +08:00
|
|
|
// Find out if any arguments are required to be integer constant expressions.
|
|
|
|
unsigned ICEArguments = 0;
|
|
|
|
ASTContext::GetBuiltinTypeError Error;
|
|
|
|
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
|
|
|
|
assert(Error == ASTContext::GE_None && "Should not codegen an error");
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
|
|
|
|
// If this is a normal argument, just emit it as a scalar.
|
|
|
|
if ((ICEArguments & (1 << i)) == 0) {
|
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is required to be a constant, constant fold it so that we know
|
|
|
|
// that the generated intrinsic gets a ConstantInt.
|
|
|
|
llvm::APSInt Result;
|
|
|
|
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
|
|
|
|
assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
|
2011-02-08 16:22:06 +08:00
|
|
|
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
|
2010-10-02 08:09:12 +08:00
|
|
|
}
|
2007-12-15 01:48:24 +08:00
|
|
|
|
2007-12-10 07:17:02 +08:00
|
|
|
switch (BuiltinID) {
|
2007-12-10 07:39:18 +08:00
|
|
|
default: return 0;
|
2010-10-09 16:47:25 +08:00
|
|
|
case X86::BI__builtin_ia32_vec_init_v8qi:
|
|
|
|
case X86::BI__builtin_ia32_vec_init_v4hi:
|
|
|
|
case X86::BI__builtin_ia32_vec_init_v2si:
|
|
|
|
return Builder.CreateBitCast(BuildVector(Ops),
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getX86_MMXTy(getLLVMContext()));
|
2010-10-10 11:19:11 +08:00
|
|
|
case X86::BI__builtin_ia32_vec_ext_v2si:
|
|
|
|
return Builder.CreateExtractElement(Ops[0],
|
|
|
|
llvm::ConstantInt::get(Ops[1]->getType(), 0));
|
2008-04-14 12:49:57 +08:00
|
|
|
case X86::BI__builtin_ia32_ldmxcsr: {
|
2013-08-20 06:20:37 +08:00
|
|
|
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
|
2008-04-14 12:49:57 +08:00
|
|
|
Builder.CreateStore(Ops[0], Tmp);
|
|
|
|
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
|
2013-08-20 06:20:37 +08:00
|
|
|
Builder.CreateBitCast(Tmp, Int8PtrTy));
|
2008-04-14 12:49:57 +08:00
|
|
|
}
|
|
|
|
case X86::BI__builtin_ia32_stmxcsr: {
|
2013-08-20 07:08:53 +08:00
|
|
|
Value *Tmp = CreateMemTemp(E->getType());
|
2011-08-18 05:04:19 +08:00
|
|
|
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
|
2013-08-20 06:20:37 +08:00
|
|
|
Builder.CreateBitCast(Tmp, Int8PtrTy));
|
2008-04-14 12:49:57 +08:00
|
|
|
return Builder.CreateLoad(Tmp, "stmxcsr");
|
|
|
|
}
|
|
|
|
case X86::BI__builtin_ia32_storehps:
|
|
|
|
case X86::BI__builtin_ia32_storelps: {
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
|
|
|
|
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-14 12:49:57 +08:00
|
|
|
// cast val v2i64
|
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-14 12:49:57 +08:00
|
|
|
// extract (0, 1)
|
|
|
|
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
|
2008-04-14 12:49:57 +08:00
|
|
|
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
|
|
|
|
|
|
|
|
// cast pointer to i64 & store
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
|
|
|
|
return Builder.CreateStore(Ops[1], Ops[0]);
|
|
|
|
}
|
2010-09-28 09:28:56 +08:00
|
|
|
case X86::BI__builtin_ia32_palignr: {
|
|
|
|
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-09-28 09:28:56 +08:00
|
|
|
// If palignr is shifting the pair of input vectors less than 9 bytes,
|
|
|
|
// emit a shuffle instruction.
|
|
|
|
if (shiftVal <= 8) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 8> Indices;
|
2010-09-28 09:28:56 +08:00
|
|
|
for (unsigned i = 0; i != 8; ++i)
|
|
|
|
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-02-15 08:14:06 +08:00
|
|
|
Value* SV = llvm::ConstantVector::get(Indices);
|
2010-09-28 09:28:56 +08:00
|
|
|
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-09-28 09:28:56 +08:00
|
|
|
// If palignr is shifting the pair of input vectors more than 8 but less
|
|
|
|
// than 16 bytes, emit a logical right shift of the destination.
|
|
|
|
if (shiftVal < 16) {
|
|
|
|
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-09-28 09:28:56 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
|
|
|
|
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2010-09-28 09:28:56 +08:00
|
|
|
// create i32 constant
|
|
|
|
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
|
2011-07-18 20:00:32 +08:00
|
|
|
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
|
2010-09-28 09:28:56 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-09-14 08:52:45 +08:00
|
|
|
// If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
|
2010-09-28 09:28:56 +08:00
|
|
|
return llvm::Constant::getNullValue(ConvertType(E->getType()));
|
|
|
|
}
|
2009-12-14 13:15:02 +08:00
|
|
|
case X86::BI__builtin_ia32_palignr128: {
|
2009-12-14 12:57:03 +08:00
|
|
|
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-14 12:57:03 +08:00
|
|
|
// If palignr is shifting the pair of input vectors less than 17 bytes,
|
|
|
|
// emit a shuffle instruction.
|
|
|
|
if (shiftVal <= 16) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 16> Indices;
|
2009-12-14 12:57:03 +08:00
|
|
|
for (unsigned i = 0; i != 16; ++i)
|
2010-06-27 15:15:29 +08:00
|
|
|
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2011-02-15 08:14:06 +08:00
|
|
|
Value* SV = llvm::ConstantVector::get(Indices);
|
2009-12-14 12:57:03 +08:00
|
|
|
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
|
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-14 12:57:03 +08:00
|
|
|
// If palignr is shifting the pair of input vectors more than 16 but less
|
|
|
|
// than 32 bytes, emit a logical right shift of the destination.
|
|
|
|
if (shiftVal < 32) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-14 12:57:03 +08:00
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
|
2010-06-27 15:15:29 +08:00
|
|
|
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-14 12:57:03 +08:00
|
|
|
// create i32 constant
|
|
|
|
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
|
2011-07-18 20:00:32 +08:00
|
|
|
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
|
2009-12-14 12:57:03 +08:00
|
|
|
}
|
2012-09-21 08:18:27 +08:00
|
|
|
|
2009-12-14 12:57:03 +08:00
|
|
|
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
|
|
|
|
return llvm::Constant::getNullValue(ConvertType(E->getType()));
|
2009-12-01 13:00:51 +08:00
|
|
|
}
|
2011-12-19 15:03:25 +08:00
|
|
|
case X86::BI__builtin_ia32_palignr256: {
|
|
|
|
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of input vectors less than 17 bytes,
|
|
|
|
// emit a shuffle instruction.
|
|
|
|
if (shiftVal <= 16) {
|
|
|
|
SmallVector<llvm::Constant*, 32> Indices;
|
|
|
|
// 256-bit palignr operates on 128-bit lanes so we need to handle that
|
|
|
|
for (unsigned l = 0; l != 2; ++l) {
|
|
|
|
unsigned LaneStart = l * 16;
|
|
|
|
unsigned LaneEnd = (l+1) * 16;
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = shiftVal + i + LaneStart;
|
|
|
|
if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
|
|
|
|
Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Value* SV = llvm::ConstantVector::get(Indices);
|
|
|
|
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
|
|
|
|
}
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of input vectors more than 16 but less
|
|
|
|
// than 32 bytes, emit a logical right shift of the destination.
|
|
|
|
if (shiftVal < 32) {
|
|
|
|
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
|
|
|
|
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
|
|
|
|
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
|
|
|
|
|
|
|
|
// create i32 constant
|
|
|
|
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
|
|
|
|
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
|
|
|
|
}
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
|
|
|
|
return llvm::Constant::getNullValue(ConvertType(E->getType()));
|
|
|
|
}
|
2011-05-04 10:40:38 +08:00
|
|
|
case X86::BI__builtin_ia32_movntps:
|
2012-05-07 14:25:45 +08:00
|
|
|
case X86::BI__builtin_ia32_movntps256:
|
2011-05-04 10:40:38 +08:00
|
|
|
case X86::BI__builtin_ia32_movntpd:
|
2012-05-07 14:25:45 +08:00
|
|
|
case X86::BI__builtin_ia32_movntpd256:
|
2011-05-04 10:40:38 +08:00
|
|
|
case X86::BI__builtin_ia32_movntdq:
|
2012-05-07 14:25:45 +08:00
|
|
|
case X86::BI__builtin_ia32_movntdq256:
|
2013-09-24 07:38:39 +08:00
|
|
|
case X86::BI__builtin_ia32_movnti:
|
|
|
|
case X86::BI__builtin_ia32_movnti64: {
|
2011-05-05 04:28:12 +08:00
|
|
|
llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
|
|
|
|
Builder.getInt32(1));
|
2011-05-04 10:40:38 +08:00
|
|
|
|
|
|
|
// Convert the type of the pointer to a pointer to the stored type.
|
|
|
|
Value *BC = Builder.CreateBitCast(Ops[0],
|
|
|
|
llvm::PointerType::getUnqual(Ops[1]->getType()),
|
|
|
|
"cast");
|
|
|
|
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
|
|
|
|
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
|
2013-09-24 07:38:39 +08:00
|
|
|
|
|
|
|
// If the operand is an integer, we can't assume alignment. Otherwise,
|
|
|
|
// assume natural alignment.
|
|
|
|
QualType ArgTy = E->getArg(1)->getType();
|
|
|
|
unsigned Align;
|
|
|
|
if (ArgTy->isIntegerType())
|
|
|
|
Align = 1;
|
|
|
|
else
|
|
|
|
Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
|
|
|
|
SI->setAlignment(Align);
|
2011-05-04 10:40:38 +08:00
|
|
|
return SI;
|
|
|
|
}
|
2011-04-15 23:07:13 +08:00
|
|
|
// 3DNow!
|
|
|
|
case X86::BI__builtin_ia32_pswapdsf:
|
|
|
|
case X86::BI__builtin_ia32_pswapdsi: {
|
|
|
|
const char *name = 0;
|
|
|
|
Intrinsic::ID ID = Intrinsic::not_intrinsic;
|
|
|
|
switch(BuiltinID) {
|
2012-01-30 16:18:19 +08:00
|
|
|
default: llvm_unreachable("Unsupported intrinsic!");
|
2011-04-15 23:07:13 +08:00
|
|
|
case X86::BI__builtin_ia32_pswapdsf:
|
|
|
|
case X86::BI__builtin_ia32_pswapdsi:
|
|
|
|
name = "pswapd";
|
|
|
|
ID = Intrinsic::x86_3dnowa_pswapd;
|
|
|
|
break;
|
|
|
|
}
|
2012-02-20 15:35:45 +08:00
|
|
|
llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
|
|
|
|
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
|
2011-04-15 23:07:13 +08:00
|
|
|
llvm::Function *F = CGM.getIntrinsic(ID);
|
2011-07-15 16:37:34 +08:00
|
|
|
return Builder.CreateCall(F, Ops, name);
|
2011-04-15 23:07:13 +08:00
|
|
|
}
|
2012-07-12 17:33:03 +08:00
|
|
|
case X86::BI__builtin_ia32_rdrand16_step:
|
|
|
|
case X86::BI__builtin_ia32_rdrand32_step:
|
2013-03-29 13:17:55 +08:00
|
|
|
case X86::BI__builtin_ia32_rdrand64_step:
|
|
|
|
case X86::BI__builtin_ia32_rdseed16_step:
|
|
|
|
case X86::BI__builtin_ia32_rdseed32_step:
|
|
|
|
case X86::BI__builtin_ia32_rdseed64_step: {
|
2012-07-12 17:33:03 +08:00
|
|
|
Intrinsic::ID ID;
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: llvm_unreachable("Unsupported intrinsic!");
|
|
|
|
case X86::BI__builtin_ia32_rdrand16_step:
|
|
|
|
ID = Intrinsic::x86_rdrand_16;
|
|
|
|
break;
|
|
|
|
case X86::BI__builtin_ia32_rdrand32_step:
|
|
|
|
ID = Intrinsic::x86_rdrand_32;
|
|
|
|
break;
|
|
|
|
case X86::BI__builtin_ia32_rdrand64_step:
|
|
|
|
ID = Intrinsic::x86_rdrand_64;
|
|
|
|
break;
|
2013-03-29 13:17:55 +08:00
|
|
|
case X86::BI__builtin_ia32_rdseed16_step:
|
|
|
|
ID = Intrinsic::x86_rdseed_16;
|
|
|
|
break;
|
|
|
|
case X86::BI__builtin_ia32_rdseed32_step:
|
|
|
|
ID = Intrinsic::x86_rdseed_32;
|
|
|
|
break;
|
|
|
|
case X86::BI__builtin_ia32_rdseed64_step:
|
|
|
|
ID = Intrinsic::x86_rdseed_64;
|
|
|
|
break;
|
2012-07-12 17:33:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
|
|
|
|
Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
|
|
|
|
return Builder.CreateExtractValue(Call, 1);
|
|
|
|
}
|
2013-08-18 00:40:09 +08:00
|
|
|
// AVX2 broadcast
|
|
|
|
case X86::BI__builtin_ia32_vbroadcastsi256: {
|
2013-08-20 06:20:37 +08:00
|
|
|
Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
|
|
|
|
Builder.CreateStore(Ops[0], VecTmp);
|
|
|
|
Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
|
|
|
|
return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
|
2013-08-18 00:40:09 +08:00
|
|
|
}
|
2007-12-10 07:17:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-13 05:14:55 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
|
2007-12-13 15:34:23 +08:00
|
|
|
const CallExpr *E) {
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<Value*, 4> Ops;
|
2010-04-14 11:54:58 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
|
|
|
|
Ops.push_back(EmitScalarExpr(E->getArg(i)));
|
|
|
|
|
|
|
|
Intrinsic::ID ID = Intrinsic::not_intrinsic;
|
|
|
|
|
|
|
|
switch (BuiltinID) {
|
|
|
|
default: return 0;
|
|
|
|
|
2010-06-19 17:47:18 +08:00
|
|
|
// vec_ld, vec_lvsl, vec_lvsr
|
|
|
|
case PPC::BI__builtin_altivec_lvx:
|
|
|
|
case PPC::BI__builtin_altivec_lvxl:
|
|
|
|
case PPC::BI__builtin_altivec_lvebx:
|
|
|
|
case PPC::BI__builtin_altivec_lvehx:
|
|
|
|
case PPC::BI__builtin_altivec_lvewx:
|
|
|
|
case PPC::BI__builtin_altivec_lvsl:
|
|
|
|
case PPC::BI__builtin_altivec_lvsr:
|
|
|
|
{
|
2011-02-08 16:22:06 +08:00
|
|
|
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
|
2010-06-19 17:47:18 +08:00
|
|
|
|
2011-09-28 05:06:10 +08:00
|
|
|
Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
|
2010-06-19 17:47:18 +08:00
|
|
|
Ops.pop_back();
|
|
|
|
|
|
|
|
switch (BuiltinID) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
|
2010-06-19 17:47:18 +08:00
|
|
|
case PPC::BI__builtin_altivec_lvx:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvxl:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvxl;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvebx:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvebx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvehx:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvehx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvewx:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvewx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvsl:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvsl;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_lvsr:
|
|
|
|
ID = Intrinsic::ppc_altivec_lvsr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
llvm::Function *F = CGM.getIntrinsic(ID);
|
2011-07-15 16:37:34 +08:00
|
|
|
return Builder.CreateCall(F, Ops, "");
|
2010-06-19 17:47:18 +08:00
|
|
|
}
|
|
|
|
|
2010-04-14 11:54:58 +08:00
|
|
|
// vec_st
|
|
|
|
case PPC::BI__builtin_altivec_stvx:
|
|
|
|
case PPC::BI__builtin_altivec_stvxl:
|
|
|
|
case PPC::BI__builtin_altivec_stvebx:
|
|
|
|
case PPC::BI__builtin_altivec_stvehx:
|
|
|
|
case PPC::BI__builtin_altivec_stvewx:
|
|
|
|
{
|
2011-02-08 16:22:06 +08:00
|
|
|
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
|
2011-09-28 05:06:10 +08:00
|
|
|
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
|
2010-04-14 11:54:58 +08:00
|
|
|
Ops.pop_back();
|
|
|
|
|
|
|
|
switch (BuiltinID) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Unsupported st intrinsic!");
|
2010-04-14 11:54:58 +08:00
|
|
|
case PPC::BI__builtin_altivec_stvx:
|
|
|
|
ID = Intrinsic::ppc_altivec_stvx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_stvxl:
|
|
|
|
ID = Intrinsic::ppc_altivec_stvxl;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_stvebx:
|
|
|
|
ID = Intrinsic::ppc_altivec_stvebx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_stvehx:
|
|
|
|
ID = Intrinsic::ppc_altivec_stvehx;
|
|
|
|
break;
|
|
|
|
case PPC::BI__builtin_altivec_stvewx:
|
|
|
|
ID = Intrinsic::ppc_altivec_stvewx;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
llvm::Function *F = CGM.getIntrinsic(ID);
|
2011-07-15 16:37:34 +08:00
|
|
|
return Builder.CreateCall(F, Ops, "");
|
2010-04-14 11:54:58 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
}
|