Add _mm_prefetch and some others as MS builtins

This patch adds several built-ins that are required for ms 
compatibility. _mm_prefetch must be a built-in because it takes a 
compile-time constant argument and our prior approach of using a #define 
to the current built-in doesn't work in the presence of re-declaration 
of _mm_prefetch. The others can be obtained by including the windows 
system headers. If a user includes the windows system headers but not 
intrin.h they still need to work and therefore must be built-in because 
we don't get a chance to implement them in intrin.h in this case.

llvm-svn: 201734
This commit is contained in:
Warren Hunt 2014-02-19 23:20:20 +00:00
parent 5142a2c03a
commit 40d6f29ad8
9 changed files with 123 additions and 23 deletions

View File

@ -679,6 +679,10 @@ LANGBUILTIN(_alloca, "v*z", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__assume, "vb", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "v.", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__debugbreak, "v", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedIncrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedDecrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
// C99 library functions
// C99 stdlib.h

View File

@ -59,6 +59,7 @@ BUILTIN(__builtin_ia32_pswapdsi, "V2iV2i", "nc")
// All MMX instructions will be generated via builtins. Any MMX vector
// types (<1 x i64>, <2 x i32>, etc.) that aren't used by these builtins will be
// expanded by the back-end.
BUILTIN(_mm_prefetch, "vcC*i", "nc")
BUILTIN(__builtin_ia32_emms, "v", "")
BUILTIN(__builtin_ia32_paddb, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_paddw, "V4sV4sV4s", "")

View File

@ -7892,6 +7892,7 @@ private:
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
@ -7906,6 +7907,7 @@ public:
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinMMPrefetch(CallExpr *TheCall);
bool SemaBuiltinObjectSize(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);

View File

@ -1500,6 +1500,42 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(EmitLValue(E->getArg(0)).getAddress());
case Builtin::BI__noop:
return RValue::get(0);
case Builtin::BI_InterlockedCompareExchange: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(1)),
SequentiallyConsistent);
CXI->setVolatile(true);
return RValue::get(CXI);
}
case Builtin::BI_InterlockedIncrement: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(Int32Ty, 1),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
}
case Builtin::BI_InterlockedDecrement: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Sub,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(Int32Ty, 1),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
}
case Builtin::BI_InterlockedExchangeAdd: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(RMWI);
}
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@ -4487,6 +4523,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
case X86::BI_mm_prefetch: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = ConstantInt::get(Int32Ty, 0);
Value *Locality = EmitScalarExpr(E->getArg(1));
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall4(F, Address, RW, Locality, Data);
}
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:

View File

@ -623,10 +623,6 @@ static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
@ -661,10 +657,6 @@ static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement16(short volatile *_Value) {
return __atomic_add_fetch(_Value, 1, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement(long volatile *_Value) {
return __atomic_add_fetch(_Value, 1, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedIncrement64(__int64 volatile *_Value) {
@ -678,10 +670,6 @@ static __inline__ short __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement16(short volatile *_Value) {
return __atomic_sub_fetch(_Value, 1, 0);
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement(long volatile *_Value) {
return __atomic_sub_fetch(_Value, 1, 0);
}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedDecrement64(__int64 volatile *_Value) {
@ -791,12 +779,6 @@ _InterlockedCompareExchange16(short volatile *_Destination,
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
static __inline__ long __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange(long volatile *_Destination,
long _Exchange, long _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
#ifdef __x86_64__
static __inline__ void *__attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchangePointer(void *volatile *_Destination,

View File

@ -672,11 +672,6 @@ _mm_storer_ps(float *__p, __m128 __a)
#define _MM_HINT_T2 1
#define _MM_HINT_NTA 0
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pi(__m64 *__p, __m64 __a)
{

View File

@ -317,6 +317,11 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
default:
break;
}
@ -655,6 +660,15 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
case X86::BI_mm_prefetch:
return SemaBuiltinMMPrefetch(TheCall);
break;
}
return false;
}
/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
/// parameter with the FormatAttr's correct format_idx and firstDataArg.
/// Returns true when the format fits the function and the FormatStringInfo has
@ -1921,6 +1935,26 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
return false;
}
/// SemaBuiltinMMPrefetch - Handle _mm_prefetch.
// This is declared to take (const char*, int)
bool Sema::SemaBuiltinMMPrefetch(CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(1);
// We can't check the value of a dependent argument.
if (Arg->isTypeDependent() || Arg->isValueDependent())
return false;
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, 1, Result))
return true;
if (Result.getLimitedValue() > 3)
return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
<< "0" << "3" << Arg->getSourceRange();
return false;
}
/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
/// TheCall is a constant expression.
bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,

View File

@ -0,0 +1,22 @@
// RUN: %clang_cc1 -triple=i686-unknown-unknown -fms-extensions -emit-llvm -o - %s | FileCheck %s
extern void printf(const char*, ...);
void f(char *a, volatile long* b) {
_mm_prefetch(a, 0);
_mm_prefetch(a, 1);
_mm_prefetch(a, 2);
_mm_prefetch(a, 3);
_InterlockedCompareExchange(b, 1, 0);
_InterlockedIncrement(b);
_InterlockedDecrement(b);
_InterlockedExchangeAdd(b, 2);
};
// CHECK: call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
// CHECK: call void @llvm.prefetch(i8* %3, i32 0, i32 1, i32 1)
// CHECK: call void @llvm.prefetch(i8* %5, i32 0, i32 2, i32 1)
// CHECK: call void @llvm.prefetch(i8* %7, i32 0, i32 3, i32 1)
// CHECK: cmpxchg
// CHECK: atomicrmw volatile add
// CHECK: atomicrmw volatile sub
// CHECK: atomicrmw volatile add

View File

@ -0,0 +1,16 @@
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -fms-extensions -emit-llvm -verify %s
#include <mmintrin.h>
// Check to make sure that _mm_prefetch survives redeclaration.
void _mm_prefetch(char const*, int);
void f(char *a) {
_mm_prefetch(a, 0);
_mm_prefetch(a, 1);
_mm_prefetch(a, 2);
_mm_prefetch(a, 3);
_mm_prefetch(a, 4); // expected-error {{argument should be a value from 0 to 3}}
_mm_prefetch(a, 0, 0); // expected-error {{too many arguments to function call, expected 2, have 3}}
_mm_prefetch(a); // expected-error {{too few arguments to function call, expected 2, have 1}}
};