diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp index 5ae4962378d3..b648b90666e5 100644 --- a/llvm/lib/Target/X86/X86CallLowering.cpp +++ b/llvm/lib/Target/X86/X86CallLowering.cpp @@ -16,10 +16,18 @@ #include "X86CallLowering.h" #include "X86ISelLowering.h" #include "X86InstrInfo.h" +#include "X86TargetMachine.h" +#include "X86CallingConv.h" + #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/MachineValueType.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; +#include "X86GenCallingConv.inc" + #ifndef LLVM_BUILD_GLOBAL_ISEL #error "This shouldn't be built without GISel" #endif @@ -33,14 +41,75 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, if (Val) return false; + // silence unused-function warning, remove after the function implementation. + (void)RetCC_X86; + MIRBuilder.buildInstr(X86::RET).addImm(0); return true; } +namespace { +struct FormalArgHandler : public CallLowering::ValueHandler { + FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, + CCAssignFn *AssignFn, const DataLayout &DL) + : ValueHandler(MIRBuilder, MRI, AssignFn), DL(DL) {} + + unsigned getStackAddress(uint64_t Size, int64_t Offset, + MachinePointerInfo &MPO) override { + + auto &MFI = MIRBuilder.getMF().getFrameInfo(); + int FI = MFI.CreateFixedObject(Size, Offset, true); + MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); + + unsigned AddrReg = + MRI.createGenericVirtualRegister(LLT::pointer(0, + DL.getPointerSizeInBits(0))); + MIRBuilder.buildFrameIndex(AddrReg, FI); + return AddrReg; + } + + void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + MachinePointerInfo &MPO, CCValAssign &VA) override { + + auto MMO = MIRBuilder.getMF().getMachineMemOperand( + MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, + 0); + MIRBuilder.buildLoad(ValVReg, Addr, *MMO); + } + + void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + CCValAssign &VA) override { + MIRBuilder.getMBB().addLiveIn(PhysReg); + MIRBuilder.buildCopy(ValVReg, PhysReg); + } + + const DataLayout &DL; +}; +} + bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef VRegs) const { - // TODO: handle functions with one or more arguments. - return F.arg_empty(); + if (F.arg_empty()) + return true; + + //TODO: handle variadic function + if (F.isVarArg()) + return false; + + auto DL = MIRBuilder.getMF().getDataLayout(); + + SmallVector ArgInfos; + unsigned Idx = 0; + for (auto &Arg : F.getArgumentList()) { + ArgInfo AInfo(VRegs[Idx], Arg.getType()); + setArgFlags(AInfo, Idx + 1, DL, F); + ArgInfos.push_back(AInfo); + Idx++; + } + + FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(), + CC_X86, DL); + return handleAssignments(MIRBuilder, ArgInfos, ArgHandler); } diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll new file mode 100644 index 000000000000..e2d1ce6fe3d4 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll @@ -0,0 +1,128 @@ +; RUN: llc -mtriple=i386-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +@a1_8bit = external global i8 +@a7_8bit = external global i8 +@a8_8bit = external global i8 + +define void @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, + i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) { + +; ALL-LABEL: name: test_i8_args_8 + +; X64: fixedStack: +; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 1, alignment: 8, isImmutable: true, isAliased: false +; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false +; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d +; X64: [[ARG1:%[0-9]+]](s8) = COPY %edi +; X64-NEXT: %{{[0-9]+}}(s8) = COPY %esi +; X64-NEXT: %{{[0-9]+}}(s8) = COPY %edx +; X64-NEXT: %{{[0-9]+}}(s8) = COPY %ecx +; X64-NEXT: %{{[0-9]+}}(s8) = COPY %r8d +; X64-NEXT: %{{[0-9]+}}(s8) = COPY %r9d +; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]] +; X64-NEXT: [[ARG7:%[0-9]+]](s8) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK0]], align 0) +; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]] +; X64-NEXT: [[ARG8:%[0-9]+]](s8) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0) + +; X32: fixedStack: +; X32: id: [[STACK28:[0-9]+]], offset: 28, size: 1, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK24:[0-9]+]], offset: 24, size: 1, alignment: 8, isImmutable: true, isAliased: false } +; X32: id: [[STACK20:[0-9]+]], offset: 20, size: 1, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK16:[0-9]+]], offset: 16, size: 1, alignment: 16, isImmutable: true, isAliased: false } +; X32: id: [[STACK12:[0-9]+]], offset: 12, size: 1, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK8:[0-9]+]], offset: 8, size: 1, alignment: 8, isImmutable: true, isAliased: false } +; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 1, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false } +; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]] +; X32-NEXT: [[ARG1:%[0-9]+]](s8) = G_LOAD [[ARG1_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK0]], align 0) +; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]] +; X32-NEXT: [[ARG2:%[0-9]+]](s8) = G_LOAD [[ARG2_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK4]], align 0) +; X32-NEXT: [[ARG3_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]] +; X32-NEXT: [[ARG3:%[0-9]+]](s8) = G_LOAD [[ARG3_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0) +; X32-NEXT: [[ARG4_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK12]] +; X32-NEXT: [[ARG4:%[0-9]+]](s8) = G_LOAD [[ARG4_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK12]], align 0) +; X32-NEXT: [[ARG5_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]] +; X32-NEXT: [[ARG5:%[0-9]+]](s8) = G_LOAD [[ARG5_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK16]], align 0) +; X32-NEXT: [[ARG6_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK20]] +; X32-NEXT: [[ARG6:%[0-9]+]](s8) = G_LOAD [[ARG6_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK20]], align 0) +; X32-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK24]] +; X32-NEXT: [[ARG7:%[0-9]+]](s8) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK24]], align 0) +; X32-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK28]] +; X32-NEXT: [[ARG8:%[0-9]+]](s8) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK28]], align 0) + +; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_8bit +; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_8bit +; ALL-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_8bit +; ALL-NEXT: G_STORE [[ARG1]](s8), [[GADDR_A1]](p0) :: (store 1 into @a1_8bit) +; ALL-NEXT: G_STORE [[ARG7]](s8), [[GADDR_A7]](p0) :: (store 1 into @a7_8bit) +; ALL-NEXT: G_STORE [[ARG8]](s8), [[GADDR_A8]](p0) :: (store 1 into @a8_8bit) +entry: + store i8 %arg1, i8* @a1_8bit + store i8 %arg7, i8* @a7_8bit + store i8 %arg8, i8* @a8_8bit + ret void +} + +@a1_32bit = external global i32 +@a7_32bit = external global i32 +@a8_32bit = external global i32 + +define void @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, + i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) { + +; ALL-LABEL: name: test_i32_args_8 + +; X64: fixedStack: +; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 4, alignment: 8, isImmutable: true, isAliased: false +; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false +; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d +; X64: [[ARG1:%[0-9]+]](s32) = COPY %edi +; X64-NEXT: %{{[0-9]+}}(s32) = COPY %esi +; X64-NEXT: %{{[0-9]+}}(s32) = COPY %edx +; X64-NEXT: %{{[0-9]+}}(s32) = COPY %ecx +; X64-NEXT: %{{[0-9]+}}(s32) = COPY %r8d +; X64-NEXT: %{{[0-9]+}}(s32) = COPY %r9d +; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]] +; X64-NEXT: [[ARG7:%[0-9]+]](s32) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0) +; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]] +; X64-NEXT: [[ARG8:%[0-9]+]](s32) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK8]], align 0) + +; X32: fixedStack: +; X32: id: [[STACK28:[0-9]+]], offset: 28, size: 4, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK24:[0-9]+]], offset: 24, size: 4, alignment: 8, isImmutable: true, isAliased: false } +; X32: id: [[STACK20:[0-9]+]], offset: 20, size: 4, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK16:[0-9]+]], offset: 16, size: 4, alignment: 16, isImmutable: true, isAliased: false } +; X32: id: [[STACK12:[0-9]+]], offset: 12, size: 4, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK8:[0-9]+]], offset: 8, size: 4, alignment: 8, isImmutable: true, isAliased: false } +; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false } +; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false } +; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]] +; X32-NEXT: [[ARG1:%[0-9]+]](s32) = G_LOAD [[ARG1_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0) +; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]] +; X32-NEXT: [[ARG2:%[0-9]+]](s32) = G_LOAD [[ARG2_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK4]], align 0) +; X32-NEXT: [[ARG3_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]] +; X32-NEXT: [[ARG3:%[0-9]+]](s32) = G_LOAD [[ARG3_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK8]], align 0) +; X32-NEXT: [[ARG4_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK12]] +; X32-NEXT: [[ARG4:%[0-9]+]](s32) = G_LOAD [[ARG4_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK12]], align 0) +; X32-NEXT: [[ARG5_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]] +; X32-NEXT: [[ARG5:%[0-9]+]](s32) = G_LOAD [[ARG5_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK16]], align 0) +; X32-NEXT: [[ARG6_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK20]] +; X32-NEXT: [[ARG6:%[0-9]+]](s32) = G_LOAD [[ARG6_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK20]], align 0) +; X32-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK24]] +; X32-NEXT: [[ARG7:%[0-9]+]](s32) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK24]], align 0) +; X32-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK28]] +; X32-NEXT: [[ARG8:%[0-9]+]](s32) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK28]], align 0) + +; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_32bit +; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_32bit +; ALL-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_32bit +; ALL-NEXT: G_STORE [[ARG1]](s32), [[GADDR_A1]](p0) :: (store 4 into @a1_32bit) +; ALL-NEXT: G_STORE [[ARG7]](s32), [[GADDR_A7]](p0) :: (store 4 into @a7_32bit) +; ALL-NEXT: G_STORE [[ARG8]](s32), [[GADDR_A8]](p0) :: (store 4 into @a8_32bit) +entry: + store i32 %arg1, i32* @a1_32bit + store i32 %arg7, i32* @a7_32bit + store i32 %arg8, i32* @a8_32bit + ret void +} diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll new file mode 100644 index 000000000000..d9e9615a6940 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll @@ -0,0 +1,35 @@ +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=X64 + +@a1_64bit = external global i64 +@a7_64bit = external global i64 +@a8_64bit = external global i64 + +define void @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, + i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) { +; X64-LABEL: name: test_i64_args_8 +; X64: fixedStack: +; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 8, alignment: 8, isImmutable: true, isAliased: false +; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 8, alignment: 16, isImmutable: true, isAliased: false +; X64: liveins: %rcx, %rdi, %rdx, %rsi, %r8, %r9 +; X64: [[ARG1:%[0-9]+]](s64) = COPY %rdi +; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rsi +; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rdx +; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rcx +; X64-NEXT: %{{[0-9]+}}(s64) = COPY %r8 +; X64-NEXT: %{{[0-9]+}}(s64) = COPY %r9 +; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]] +; X64-NEXT: [[ARG7:%[0-9]+]](s64) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK0]], align 0) +; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]] +; X64-NEXT: [[ARG8:%[0-9]+]](s64) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK8]], align 0) +; X64-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_64bit +; X64-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_64bit +; X64-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_64bit +; X64-NEXT: G_STORE [[ARG1]](s64), [[GADDR_A1]](p0) :: (store 8 into @a1_64bit) +; X64-NEXT: G_STORE [[ARG7]](s64), [[GADDR_A7]](p0) :: (store 8 into @a7_64bit) +; X64-NEXT: G_STORE [[ARG8]](s64), [[GADDR_A8]](p0) :: (store 8 into @a8_64bit) +entry: + store i64 %arg1, i64* @a1_64bit + store i64 %arg7, i64* @a7_64bit + store i64 %arg8, i64* @a8_64bit + ret void +}