Fix passing structs and AVX vectors through sysv_abi

Do this the same way we did it for ms_abi in r324594.

Fixes PR36806.

llvm-svn: 363973
This commit is contained in:
Reid Kleckner 2019-06-20 20:07:20 +00:00
parent 07ed9cfc3e
commit 3fd3de147b
2 changed files with 67 additions and 8 deletions

View File

@ -2222,8 +2222,8 @@ public:
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
class WinX86_64ABIInfo : public SwiftABIInfo {
public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
: SwiftABIInfo(CGT),
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
: SwiftABIInfo(CGT), AVXLevel(AVXLevel),
IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
void computeInfo(CGFunctionInfo &FI) const override;
@ -2259,7 +2259,9 @@ private:
void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
bool IsVectorCall, bool IsRegCall) const;
bool IsMingw64;
X86AVXABILevel AVXLevel;
bool IsMingw64;
};
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@ -2409,7 +2411,7 @@ class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
: TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
: TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
@ -3562,7 +3564,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// using __attribute__((ms_abi)). In such case to correctly emit Win64
// compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
if (CallingConv == llvm::CallingConv::Win64) {
WinX86_64ABIInfo Win64ABIInfo(CGT);
WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
Win64ABIInfo.computeInfo(FI);
return;
}
@ -4016,9 +4018,17 @@ void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
}
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
bool IsVectorCall =
FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
const unsigned CC = FI.getCallingConvention();
bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
// If __attribute__((sysv_abi)) is in use, use the SysV argument
// classification rules.
if (CC == llvm::CallingConv::X86_64_SysV) {
X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
SysVABIInfo.computeInfo(FI);
return;
}
unsigned FreeSSERegs = 0;
if (IsVectorCall) {

View File

@ -0,0 +1,49 @@
// RUN: %clang_cc1 -triple x86_64-pc-win32 -emit-llvm -target-cpu skylake-avx512 < %s | FileCheck %s --check-prefixes=CHECK,AVX
// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -target-cpu skylake-avx512 < %s | FileCheck %s --check-prefixes=CHECK,AVX
// RUN: %clang_cc1 -triple x86_64-pc-win32 -emit-llvm < %s | FileCheck %s --check-prefixes=CHECK,NOAVX
// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm < %s | FileCheck %s --check-prefixes=CHECK,NOAVX
#define SYSV_CC __attribute__((sysv_abi))
// Make sure we coerce structs according to the SysV rules instead of passing
// them indirectly as we would for Win64.
struct StringRef {
char *Str;
__SIZE_TYPE__ Size;
};
extern volatile char gc;
void SYSV_CC take_stringref(struct StringRef s);
void callit() {
struct StringRef s = {"asdf", 4};
take_stringref(s);
}
// CHECK: define {{(dso_local )?}}void @callit()
// CHECK: call {{(x86_64_sysvcc )?}}void @take_stringref(i8* {{[^,]*}}, i64 {{[^,]*}})
// CHECK: declare {{(dso_local )?}}{{(x86_64_sysvcc )?}}void @take_stringref(i8*, i64)
// Check that we pass vectors directly if the target feature is enabled, and
// not otherwise.
typedef __attribute__((vector_size(32))) float my_m256;
typedef __attribute__((vector_size(64))) float my_m512;
my_m256 SYSV_CC get_m256(void);
void SYSV_CC take_m256(my_m256);
my_m512 SYSV_CC get_m512(void);
void SYSV_CC take_m512(my_m512);
void use_vectors() {
my_m256 v1 = get_m256();
take_m256(v1);
my_m512 v2 = get_m512();
take_m512(v2);
}
// CHECK: define {{(dso_local )?}}void @use_vectors()
// AVX: call {{(x86_64_sysvcc )?}}<8 x float> @get_m256()
// AVX: call {{(x86_64_sysvcc )?}}void @take_m256(<8 x float> %{{.*}})
// AVX: call {{(x86_64_sysvcc )?}}<16 x float> @get_m512()
// AVX: call {{(x86_64_sysvcc )?}}void @take_m512(<16 x float> %{{.*}})
// NOAVX: call {{(x86_64_sysvcc )?}}<8 x float> @get_m256()
// NOAVX: call {{(x86_64_sysvcc )?}}void @take_m256(<8 x float>* byval(<8 x float>) align 32 %{{.*}})
// NOAVX: call {{(x86_64_sysvcc )?}}<16 x float> @get_m512()
// NOAVX: call {{(x86_64_sysvcc )?}}void @take_m512(<16 x float>* byval(<16 x float>) align 64 %{{.*}})