When we adjust the inline ASM type, we need to take into account an early

clobber with the 'y' constraint. Otherwise, we get the wrong return type and an
assert, because it created a '<1 x i64>' vector type instead of the x86_mmx
type.

llvm-svn: 127185
This commit is contained in:
Bill Wendling 2011-03-07 22:47:14 +00:00
parent d648135902
commit ec9d2633f1
2 changed files with 23 additions and 1 deletions

View File

@ -358,7 +358,7 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
llvm::StringRef Constraint,
const llvm::Type* Ty) {
if (Constraint=="y" && Ty->isVectorTy())
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
return Ty;
}

View File

@ -0,0 +1,22 @@
// RUN: %clang -mmmx -ccc-host-triple i386-unknown-unknown -emit-llvm -S %s -o - | FileCheck %s
// <rdar://problem/9091220>
#include <mmintrin.h>
// CHECK: type { x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx }
void foo(long long fill) {
__m64 vfill = _mm_cvtsi64_m64(fill);
__m64 v1, v2, v3, v4, v5, v6, v7;
__asm__ __volatile__ (
"\tmovq %7, %0\n"
"\tmovq %7, %1\n"
"\tmovq %7, %2\n"
"\tmovq %7, %3\n"
"\tmovq %7, %4\n"
"\tmovq %7, %5\n"
"\tmovq %7, %6"
: "=&y" (v1), "=&y" (v2), "=&y" (v3),
"=&y" (v4), "=&y" (v5), "=&y" (v6), "=y" (v7)
: "y" (vfill));
}