diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index 49fbb209ff35..f7659ddafd20 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -265,7 +265,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { llvm::Value *BlockLiteral = LoadBlockStruct(); Loc = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::Int64Ty, + llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), offset), "block.literal"); Ty = llvm::PointerType::get(Ty, 0); @@ -406,7 +406,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { BlockLiteral = Builder.CreateBitCast(BlockLiteral, - llvm::PointerType::getUnqual(llvm::Type::Int8Ty), + llvm::PointerType::getUnqual( + llvm::Type::getInt8Ty(VMContext)), "tmp"); // Add the block literal. @@ -456,7 +457,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { llvm::Value *BlockLiteral = LoadBlockStruct(); llvm::Value *V = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::Int64Ty, + llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), offset), "block.literal"); if (E->isByRef()) { @@ -688,7 +689,7 @@ uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { uint64_t Pad = BlockOffset - OldOffset; if (Pad) { - llvm::ArrayType::get(llvm::Type::Int8Ty, Pad); + llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad); QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, llvm::APInt(32, Pad), ArrayType::Normal, 0); @@ -777,7 +778,8 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); - llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + llvm::Value *N = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(T->getContext()), flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, Dstv, Srcv, N); } @@ -928,7 +930,8 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { flag |= BLOCK_BYREF_CALLER; - llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + llvm::Value *N = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(T->getContext()), flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, DstObj, SrcObj, N); @@ -1025,9 +1028,9 @@ llvm::Value *BlockFunction::getBlockObjectDispose() { if (CGM.BlockObjectDispose == 0) { const llvm::FunctionType *FTy; std::vector ArgTys; - const llvm::Type *ResultType = llvm::Type::VoidTy; + const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::Int32Ty); + ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectDispose = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); @@ -1039,10 +1042,10 @@ llvm::Value *BlockFunction::getBlockObjectAssign() { if (CGM.BlockObjectAssign == 0) { const llvm::FunctionType *FTy; std::vector ArgTys; - const llvm::Type *ResultType = llvm::Type::VoidTy; + const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::Int32Ty); + ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectAssign = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); @@ -1054,7 +1057,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { llvm::Value *F = getBlockObjectDispose(); llvm::Value *N; V = Builder.CreateBitCast(V, PtrToInt8Ty); - N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); Builder.CreateCall2(F, V, N); } @@ -1063,7 +1066,8 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B) : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { - PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + PtrToInt8Ty = llvm::PointerType::getUnqual( + llvm::Type::getInt8Ty(VMContext)); BlockHasCopyDispose = false; } diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h index 5a0af04b24d7..61415adc9b6a 100644 --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -112,7 +112,8 @@ public: GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0), BlockObjectAssign(0), BlockObjectDispose(0) { Block.GlobalUniqueCount = 0; - PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + PtrToInt8Ty = llvm::PointerType::getUnqual( + llvm::Type::getInt8Ty(M.getContext())); } }; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 62e5c80666c2..8f5da42b412b 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -78,7 +78,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_va_end: { Value *ArgValue = EmitVAListRef(E->getArg(0)); const llvm::Type *DestType = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); if (ArgValue->getType() != DestType) ArgValue = Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); @@ -92,7 +92,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *SrcPtr = EmitVAListRef(E->getArg(1)); const llvm::Type *Type = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); DstPtr = Builder.CreateBitCast(DstPtr, Type); SrcPtr = Builder.CreateBitCast(SrcPtr, Type); @@ -214,9 +214,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); // FIXME: Technically these constants should of type 'int', yes? RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : - llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : - llvm::ConstantInt::get(llvm::Type::Int32Ty, 3); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3); Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); } @@ -275,15 +275,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__builtin_alloca: { // FIXME: LLVM IR Should allow alloca with an i64 size! Value *Size = EmitScalarExpr(E->getArg(0)); - Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp"); - return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp")); + Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp"); + return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp")); } case Builtin::BI__builtin_bzero: { Value *Address = EmitScalarExpr(E->getArg(0)); Builder.CreateCall4(CGM.getMemSetFn(), Address, - llvm::ConstantInt::get(llvm::Type::Int8Ty, 0), + llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0), EmitScalarExpr(E->getArg(1)), - llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)); return RValue::get(Address); } case Builtin::BI__builtin_memcpy: { @@ -291,7 +291,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall4(CGM.getMemCpyFn(), Address, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), - llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)); return RValue::get(Address); } case Builtin::BI__builtin_memmove: { @@ -299,16 +299,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall4(CGM.getMemMoveFn(), Address, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), - llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)); return RValue::get(Address); } case Builtin::BI__builtin_memset: { Value *Address = EmitScalarExpr(E->getArg(0)); Builder.CreateCall4(CGM.getMemSetFn(), Address, Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), - llvm::Type::Int8Ty), + llvm::Type::getInt8Ty(VMContext)), EmitScalarExpr(E->getArg(2)), - llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)); return RValue::get(Address); } case Builtin::BI__builtin_return_address: { @@ -335,12 +335,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); Value *FrameAddr = Builder.CreateCall(FrameAddrF, - Constant::getNullValue(llvm::Type::Int32Ty)); + Constant::getNullValue(llvm::Type::getInt32Ty(VMContext))); Builder.CreateStore(FrameAddr, Buf); // Call the setjmp intrinsic Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0); const llvm::Type *DestType = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Buf = Builder.CreateBitCast(Buf, DestType); return RValue::get(Builder.CreateCall(F, Buf)); } @@ -348,7 +348,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0); Value *Buf = EmitScalarExpr(E->getArg(0)); const llvm::Type *DestType = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Buf = Builder.CreateBitCast(Buf, DestType); return RValue::get(Builder.CreateCall(F, Buf)); } @@ -514,8 +514,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_synchronize: { Value *C[5]; - C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1); - C[4] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 0); + C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1); + C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0); Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5); return RValue::get(0); } @@ -584,7 +584,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size()); QualType BuiltinRetType = E->getType(); - const llvm::Type *RetTy = llvm::Type::VoidTy; + const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext); if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); if (RetTy != V->getType()) { @@ -636,9 +636,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi128: case X86::BI__builtin_ia32_psrlqi128: case X86::BI__builtin_ia32_psrlwi128: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2); - llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2); + llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), Ops[1], Zero, "insert"); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); @@ -691,8 +691,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi: case X86::BI__builtin_ia32_psrlqi: case X86::BI__builtin_ia32_psrlwi: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1); + Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); const char *name = 0; Intrinsic::ID ID = Intrinsic::not_intrinsic; @@ -744,17 +744,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); } case X86::BI__builtin_ia32_ldmxcsr: { - llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); - Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); + Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); + Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); Builder.CreateStore(Ops[0], Tmp); return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); } case X86::BI__builtin_ia32_stmxcsr: { - llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); - Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); + Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); + Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); return Builder.CreateLoad(Tmp, "stmxcsr"); @@ -769,7 +769,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI__builtin_ia32_storehps: case X86::BI__builtin_ia32_storelps: { - const llvm::Type *EltTy = llvm::Type::Int64Ty; + const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); @@ -778,7 +778,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // extract (0, 1) unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; - llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index); + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index); Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); // cast pointer to i64 & store diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp index d1e855616056..2940f18b4dfc 100644 --- a/clang/lib/CodeGen/CGCXX.cpp +++ b/clang/lib/CodeGen/CGCXX.cpp @@ -31,14 +31,14 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(const CXXDestructorDecl *Dtor, // FIXME: This is ABI dependent and we use the Itanium ABI. const llvm::Type *Int8PtrTy = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); std::vector Params; Params.push_back(Int8PtrTy); // Get the destructor function type const llvm::Type *DtorFnTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Params, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false); DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy); Params.clear(); @@ -96,7 +96,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() { if (CXXGlobalInits.empty()) return; - const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::VoidTy, + const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false); // Create our global initialization function. @@ -140,18 +140,18 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, // Create the guard variable. llvm::GlobalValue *GuardV = - new llvm::GlobalVariable(CGM.getModule(), llvm::Type::Int64Ty, false, + new llvm::GlobalVariable(CGM.getModule(), llvm::Type::getInt64Ty(VMContext), false, GV->getLinkage(), - llvm::Constant::getNullValue(llvm::Type::Int64Ty), + llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)), GuardVName.c_str()); // Load the first byte of the guard variable. - const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy), "tmp"); // Compare it against 0. - llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::Int8Ty); + llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)); llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool"); llvm::BasicBlock *InitBlock = createBasicBlock("init"); @@ -164,7 +164,7 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, EmitCXXGlobalVarDeclInit(D, GV); - Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1), + Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1), Builder.CreateBitCast(GuardV, PtrTy)); EmitBlock(EndBlock); @@ -301,7 +301,7 @@ llvm::Value *CodeGenFunction::AddressCXXOfBaseClass(llvm::Value *BaseValue, if (ClassDecl == BaseClassDecl) return BaseValue; - llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); llvm::SmallVector NestedBasePaths; GetNestedPaths(NestedBasePaths, ClassDecl, BaseClassDecl); assert(NestedBasePaths.size() > 0 && @@ -602,7 +602,7 @@ const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D, llvm::Constant *CodeGenFunction::GenerateRtti(const CXXRecordDecl *RD) { llvm::Type *Ptr8Ty; - Ptr8Ty = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); llvm::Constant *Rtti = llvm::Constant::getNullValue(Ptr8Ty); if (!getContext().getLangOptions().Rtti) @@ -674,7 +674,7 @@ void CodeGenFunction::GenerateVtableForBase(const CXXRecordDecl *RD, bool ForVirtualBase, llvm::SmallSet &IndirectPrimary) { llvm::Type *Ptr8Ty; - Ptr8Ty = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); llvm::Constant *m = llvm::Constant::getNullValue(Ptr8Ty); if (RD && !RD->isDynamicClass()) @@ -693,7 +693,7 @@ void CodeGenFunction::GenerateVtableForBase(const CXXRecordDecl *RD, cast(i->getType()->getAs()->getDecl()); int64_t BaseOffset = Layout.getBaseClassOffset(Base) / 8; llvm::Constant *m; - m = llvm::ConstantInt::get(llvm::Type::Int64Ty, BaseOffset); + m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), BaseOffset); m = llvm::ConstantExpr::getIntToPtr(m, Ptr8Ty); methods.push_back(m); } @@ -723,7 +723,7 @@ void CodeGenFunction::GenerateVtableForBase(const CXXRecordDecl *RD, if (TopPrimary) { if (RD) { int64_t BaseOffset = -(Layout.getBaseClassOffset(RD) / 8); - m = llvm::ConstantInt::get(llvm::Type::Int64Ty, BaseOffset); + m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), BaseOffset); m = llvm::ConstantExpr::getIntToPtr(m, Ptr8Ty); } methods.push_back(m); @@ -750,7 +750,7 @@ llvm::Value *CodeGenFunction::GenerateVtable(const CXXRecordDecl *RD) { llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::WeakAnyLinkage; std::vector methods; - llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); int64_t Offset = 0; llvm::Constant *rtti = GenerateRtti(RD); @@ -793,7 +793,7 @@ llvm::Value *CodeGenFunction::GenerateVtable(const CXXRecordDecl *RD) { linktype, C, Name); vtable = Builder.CreateBitCast(vtable, Ptr8Ty); vtable = Builder.CreateGEP(vtable, - llvm::ConstantInt::get(llvm::Type::Int64Ty, + llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), Offset/8)); return vtable; } @@ -1100,7 +1100,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD) { LoadOfThis = LoadCXXThis(); llvm::Value *VtableField; llvm::Type *Ptr8Ty, *PtrPtr8Ty; - Ptr8Ty = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0); VtableField = Builder.CreateBitCast(LoadOfThis, PtrPtr8Ty); llvm::Value *vtable = GenerateVtable(ClassDecl); diff --git a/clang/lib/CodeGen/CGCXXTemp.cpp b/clang/lib/CodeGen/CGCXXTemp.cpp index c7ac4150b485..04d3842daabe 100644 --- a/clang/lib/CodeGen/CGCXXTemp.cpp +++ b/clang/lib/CodeGen/CGCXXTemp.cpp @@ -24,7 +24,7 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary, // Check if temporaries need to be conditional. If so, we'll create a // condition boolean, initialize it to 0 and if (!ConditionalTempDestructionStack.empty()) { - CondPtr = CreateTempAlloca(llvm::Type::Int1Ty, "cond"); + CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond"); // Initialize it to false. This initialization takes place right after // the alloca insert point. diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 24d82747a406..0a187fca7672 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -321,14 +321,14 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { case ABIArgInfo::Indirect: { assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); - ResultType = llvm::Type::VoidTy; + ResultType = llvm::Type::getVoidTy(getLLVMContext()); const llvm::Type *STy = ConvertType(RetTy); ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); break; } case ABIArgInfo::Ignore: - ResultType = llvm::Type::VoidTy; + ResultType = llvm::Type::getVoidTy(getLLVMContext()); break; case ABIArgInfo::Coerce: @@ -839,7 +839,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } llvm::Instruction *CI = CS.getInstruction(); - if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy) + if (Builder.isNamePreserving() && + CI->getType() != llvm::Type::getVoidTy(VMContext)) CI->setName("call"); switch (RetAI.getKind()) { diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index d3b7db089bc6..c12f23130ee0 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -224,11 +224,11 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty, bool needsCopyDispose = BlockRequiresCopying(Ty); std::vector Types(needsCopyDispose*2+5); const llvm::PointerType *PtrToInt8Ty - = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Types[0] = PtrToInt8Ty; Types[1] = PtrToInt8Ty; - Types[2] = llvm::Type::Int32Ty; - Types[3] = llvm::Type::Int32Ty; + Types[2] = llvm::Type::getInt32Ty(VMContext); + Types[3] = llvm::Type::getInt32Ty(VMContext); if (needsCopyDispose) { Types[4] = PtrToInt8Ty; Types[5] = PtrToInt8Ty; @@ -282,7 +282,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { if (!DidCallStackSave) { // Save the stack. const llvm::Type *LTy = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); @@ -310,10 +310,12 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { llvm::Value *VLASize = EmitVLASize(Ty); // Downcast the VLA size expression - VLASize = Builder.CreateIntCast(VLASize, llvm::Type::Int32Ty, false, "tmp"); + VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext), + false, "tmp"); // Allocate memory for the array. - llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::Int8Ty, VLASize, "vla"); + llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), + VLASize, "vla"); DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); } @@ -374,7 +376,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { if (isByRef) { const llvm::PointerType *PtrToInt8Ty - = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); EnsureInsertPoint(); llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); @@ -402,19 +404,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; - V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa); + V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding"); Builder.CreateStore(V, forwarding_field); - V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags); + V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast(DeclPtr->getType())->getElementType(); - V = llvm::ConstantInt::get(llvm::Type::Int32Ty, + V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), (CGM.getTargetData().getTypeStoreSizeInBits(V1) / 8)); Builder.CreateStore(V, size_field); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index a97f9ad06308..7ce1354ee078 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -239,8 +239,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, // Bool can have different representation in memory than in registers. if (Ty->isBooleanType()) - if (V->getType() != llvm::Type::Int1Ty) - V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); + if (V->getType() != llvm::Type::getInt1Ty(VMContext)) + V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); return V; } @@ -340,9 +340,8 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, // Fetch the high bits if necessary. if (LowBits < BitfieldSize) { unsigned HighBits = BitfieldSize - LowBits; - llvm::Value *HighPtr = - Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), - "bf.ptr.hi"); + llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); llvm::Value *HighVal = Builder.CreateLoad(HighPtr, LV.isVolatileQualified(), "tmp"); @@ -397,7 +396,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, const VectorType *ExprVT = ExprType->getAsVectorType(); if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); - llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + llvm::Value *Elt = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), InIdx); return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); } @@ -407,7 +407,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, llvm::SmallVector Mask; for (unsigned i = 0; i != NumResultElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), InIdx)); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -551,9 +552,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, // If the low part doesn't cover the bitfield emit a high part. if (LowBits < BitfieldSize) { unsigned HighBits = BitfieldSize - LowBits; - llvm::Value *HighPtr = - Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), - "bf.ptr.hi"); + llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); llvm::Value *HighVal = Builder.CreateLoad(HighPtr, Dst.isVolatileQualified(), "bf.prev.hi"); @@ -612,7 +612,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::SmallVector Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask[InIdx] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Mask[InIdx] = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), i); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -627,9 +628,11 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::SmallVector ExtMask; unsigned i; for (i = 0; i != NumSrcElts; ++i) - ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); + ExtMask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), i)); for (; i != NumDstElts; ++i) - ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty)); + ExtMask.push_back(llvm::UndefValue::get( + llvm::Type::getInt32Ty(VMContext))); llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], ExtMask.size()); llvm::Value *ExtSrcVal = @@ -639,12 +642,14 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, // build identity llvm::SmallVector Mask; for (unsigned i = 0; i != NumDstElts; ++i) { - Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), i)); } // modify when what gets shuffled in for (unsigned i = 0; i != NumSrcElts; ++i) { unsigned Idx = getAccessedFieldNo(i, Elts); - Mask[Idx] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts); + Mask[Idx] = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), i+NumDstElts); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); @@ -655,7 +660,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, } else { // If the Src is a scalar (not a vector) it must be updating one element. unsigned InIdx = getAccessedFieldNo(0, Elts); - llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + llvm::Value *Elt = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); } @@ -860,7 +866,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); - Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, "vidx"); + Idx = Builder.CreateIntCast(Idx, + llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType().getCVRQualifiers()); } @@ -871,7 +878,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Extend or truncate the index type to 32 or 64-bits. unsigned IdxBitwidth = cast(Idx->getType())->getBitWidth(); if (IdxBitwidth != LLVMPointerWidth) - Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth), + Idx = Builder.CreateIntCast(Idx, + llvm::IntegerType::get(VMContext, LLVMPointerWidth), IdxSigned, "idxprom"); // We know that the pointer points to a type of the correct size, @@ -898,7 +906,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { Idx = Builder.CreateMul(Idx, InterfaceSize); - llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Type *i8PTy = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), Idx, "arrayidx"); Address = Builder.CreateBitCast(Address, Base->getType()); @@ -926,7 +935,8 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, llvm::SmallVector CElts; for (unsigned i = 0, e = Elts.size(); i != e; ++i) - CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i])); + CElts.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), Elts[i])); return llvm::ConstantVector::get(&CElts[0], CElts.size()); } @@ -964,7 +974,8 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { for (unsigned i = 0, e = Indices.size(); i != e; ++i) { if (isa(BaseElts)) - CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + CElts.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), 0)); else CElts.push_back(BaseElts->getOperand(Indices[i])); } @@ -1037,7 +1048,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, "tmp"); llvm::Value *Idx = - llvm::ConstantInt::get(llvm::Type::Int32Ty, Info.FieldNo); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); return LValue::MakeBitfield(V, Info.Start, Info.Size, diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index b0d91aa916dc..0cf9cd3e3a1e 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -541,7 +541,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // equal, but other compilers do this optimization, and almost every memcpy // implementation handles this case safely. If there is a libc that does not // safely handle this, we can add a target hook. - const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BP = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); if (DestPtr->getType() != BP) DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); if (SrcPtr->getType() != BP) @@ -551,7 +552,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, std::pair TypeInfo = getContext().getTypeInfo(Ty); // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth); + const llvm::Type *IntPtr = + llvm::IntegerType::get(VMContext, LLVMPointerWidth); // FIXME: If we have a volatile struct, the optimizer can remove what might // appear to be `extra' memory ops: @@ -569,6 +571,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, DestPtr, SrcPtr, // TypeInfo.first describes size in bits. llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), TypeInfo.second/8)); } diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index d8fc31468945..73576751f92d 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -81,7 +81,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { uint64_t NumBytes = AlignedElementOffsetInBytes - ElementOffsetInBytes; - const llvm::Type *Ty = llvm::Type::Int8Ty; + const llvm::Type *Ty = llvm::Type::getInt8Ty(CGF->getLLVMContext()); if (NumBytes > 1) Ty = llvm::ArrayType::get(Ty, NumBytes); @@ -249,7 +249,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { if (!NumBytes) return; - const llvm::Type *Ty = llvm::Type::Int8Ty; + const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext()); if (NumBytes > 1) Ty = llvm::ArrayType::get(Ty, NumBytes); @@ -399,7 +399,7 @@ public: assert(CurSize <= TotalSize && "Union size mismatch!"); if (unsigned NumPadBytes = TotalSize - CurSize) { - const llvm::Type *Ty = llvm::Type::Int8Ty; + const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext); if (NumPadBytes > 1) Ty = llvm::ArrayType::get(Ty, NumPadBytes); @@ -545,7 +545,8 @@ public: // This must be a string initializing an array in a static initializer. // Don't emit it as the address of the string, emit the string data itself // as an inline array. - return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false); + return llvm::ConstantArray::get(VMContext, + CGM.GetStringForStringLiteral(E), false); } llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) { @@ -559,7 +560,7 @@ public: // Resize the string to the right size, adding zeros at the end, or // truncating as needed. Str.resize(CAT->getSize().getZExtValue(), '\0'); - return llvm::ConstantArray::get(Str, false); + return llvm::ConstantArray::get(VMContext, Str, false); } llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) { @@ -626,8 +627,10 @@ public: } case Expr::AddrLabelExprClass: { assert(CGF && "Invalid address of label expression outside function."); - unsigned id = CGF->GetIDForAddrOfLabel(cast(E)->getLabel()); - llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id); + unsigned id = + CGF->GetIDForAddrOfLabel(cast(E)->getLabel()); + llvm::Constant *C = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), id); return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType())); } case Expr::CallExprClass: { @@ -679,7 +682,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, case APValue::LValue: { const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType); llvm::Constant *Offset = - llvm::ConstantInt::get(llvm::Type::Int64Ty, + llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), Result.Val.getLValueOffset()); llvm::Constant *C; @@ -689,7 +692,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, // Apply offset if necessary. if (!Offset->isNullValue()) { const llvm::Type *Type = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type); Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1); C = llvm::ConstantExpr::getBitCast(Casted, C->getType()); @@ -720,7 +723,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, llvm::Constant *C = llvm::ConstantInt::get(VMContext, Result.Val.getInt()); - if (C->getType() == llvm::Type::Int1Ty) { + if (C->getType() == llvm::Type::getInt1Ty(VMContext)) { const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } @@ -765,7 +768,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, } llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast(E)); - if (C && C->getType() == llvm::Type::Int1Ty) { + if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) { const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 564322b4c880..0b8cb8cb761f 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -134,7 +134,7 @@ public: Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { llvm::Value *V = - llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), CGF.GetIDForAddrOfLabel(E->getLabel())); return Builder.CreateIntToPtr(V, ConvertType(E->getType())); @@ -206,13 +206,15 @@ public: unsigned i; for (i = 0; i < NumInitElements; ++i) { Value *NewV = Visit(E->getInit(i)); - Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Value *Idx = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), i); V = Builder.CreateInsertElement(V, NewV, Idx); } // Emit remaining default initializers for (/* Do not initialize i*/; i < NumVectorElements; ++i) { - Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Value *Idx = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), i); llvm::Value *NewV = llvm::Constant::getNullValue(ElementType); V = Builder.CreateInsertElement(V, NewV, Idx); } @@ -406,7 +408,8 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { // then zero extending it to int, then wanting it as a logical value again. // Optimize this common case. if (llvm::ZExtInst *ZI = dyn_cast(Src)) { - if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) { + if (ZI->getOperand(0)->getType() == + llvm::Type::getInt1Ty(CGF.getLLVMContext())) { Value *Result = ZI->getOperand(0); // If there aren't any more uses, zap the instruction to save space. // Note that there can be more uses, for example if this @@ -431,6 +434,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, if (SrcType == DstType) return Src; if (DstType->isVoidType()) return 0; + + llvm::LLVMContext &VMContext = CGF.getLLVMContext(); // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) @@ -460,7 +465,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = + llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); bool InputSigned = SrcType->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -482,14 +488,16 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, // Insert the element in element zero of an undef vector llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Value *Idx = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); // Splat the element across to all elements llvm::SmallVector Args; unsigned NumElements = cast(DstTy)->getNumElements(); for (unsigned i = 0; i < NumElements; i++) - Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + Args.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), 0)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); @@ -589,7 +597,9 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); - Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, + Idx = Builder.CreateIntCast(Idx, + llvm::Type::getInt32Ty(CGF.getLLVMContext()), + IdxSigned, "vecidxcast"); return Builder.CreateExtractElement(Base, Idx, "vecext"); } @@ -692,6 +702,8 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, LValue LV = EmitLValue(E->getSubExpr()); QualType ValTy = E->getSubExpr()->getType(); Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal(); + + llvm::LLVMContext &VMContext = CGF.getLLVMContext(); int AmountVal = isInc ? 1 : -1; @@ -705,7 +717,7 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, if (const llvm::PointerType *PT = dyn_cast(InVal->getType())) { llvm::Constant *Inc = - llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); if (!isa(PT->getElementType())) { QualType PTEE = ValTy->getPointeeType(); if (const ObjCInterfaceType *OIT = @@ -716,7 +728,7 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, size = -size; Inc = llvm::ConstantInt::get(Inc->getType(), size); const llvm::Type *i8Ty = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); InVal = Builder.CreateBitCast(InVal, i8Ty); NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); llvm::Value *lhs = LV.getAddress(); @@ -727,12 +739,12 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); } else { const llvm::Type *i8Ty = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); } - } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) { + } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) { // Bool++ is an interesting case, due to promotion rules, we get: // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> // Bool = ((int)Bool+1) != 0 @@ -749,11 +761,11 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); } else { // Add the inc/dec to the real part. - if (InVal->getType() == llvm::Type::FloatTy) + if (InVal->getType() == llvm::Type::getFloatTy(VMContext)) NextVal = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast(AmountVal))); - else if (InVal->getType() == llvm::Type::DoubleTy) + else if (InVal->getType() == llvm::Type::getDoubleTy(VMContext)) NextVal = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast(AmountVal))); @@ -997,22 +1009,22 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { // long long *__overflow_handler)(long long a, long long b, char op, // char width) std::vector handerArgTypes; - handerArgTypes.push_back(llvm::Type::Int64Ty); - handerArgTypes.push_back(llvm::Type::Int64Ty); - handerArgTypes.push_back(llvm::Type::Int8Ty); - handerArgTypes.push_back(llvm::Type::Int8Ty); - llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty, - handerArgTypes, false); + handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); + handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); + handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); + handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); + llvm::FunctionType *handlerTy = llvm::FunctionType::get( + llvm::Type::getInt64Ty(VMContext), handerArgTypes, false); llvm::Value *handlerFunction = CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", llvm::PointerType::getUnqual(handlerTy)); handlerFunction = Builder.CreateLoad(handlerFunction); llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, - Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty), - Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty), - llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID), - llvm::ConstantInt::get(llvm::Type::Int8Ty, + Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)), + Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)), + llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID), + llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), cast(opTy)->getBitWidth())); handlerResult = Builder.CreateTrunc(handlerResult, opTy); @@ -1073,7 +1085,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + const llvm::Type *IdxType = + llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); if (IdxExp->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1087,7 +1100,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { llvm::ConstantInt::get(Idx->getType(), CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); - const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *i8Ty = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); @@ -1097,7 +1111,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { // extensions. The GNU void* casts amount to no-ops since our void* // type is i8*, but this is future proof. if (ElementType->isVoidType() || ElementType->isFunctionType()) { - const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *i8Ty = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); @@ -1135,7 +1150,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + const llvm::Type *IdxType = + llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); if (Ops.E->getRHS()->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1152,7 +1168,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); const llvm::Type *i8Ty = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); @@ -1163,7 +1179,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { // void* type is i8*, but this is future proof. if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { const llvm::Type *i8Ty = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); @@ -1334,7 +1350,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be false. Start // setting up the PHI node in the Cont Block for this. - llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); + llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), + "", ContBlock); PN->reserveOperandSpace(2); // Normal case, two inputs. for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); PI != PE; ++PI) @@ -1381,7 +1398,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be true. Start // setting up the PHI node in the Cont Block for this. - llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); + llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), + "", ContBlock); PN->reserveOperandSpace(2); // Normal case, two inputs. for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); PI != PE; ++PI) @@ -1609,7 +1627,8 @@ Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { int n = va_arg(va, int); assert(n >= 0 && n < (int)NumElements * 2 && "Vector shuffle index out of bounds!"); - Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n)); + Args.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), n)); } const char *Name = va_arg(va, const char *); @@ -1627,7 +1646,8 @@ llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, for (unsigned i = 0, e = NumVals; i != e; ++i) { llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; - llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + llvm::Value *Idx = llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), i); Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp"); } diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index ac03ebb32b36..0a750577ddb6 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -214,10 +214,10 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm) Zeros[0] = llvm::ConstantInt::get(LongTy, 0); Zeros[1] = Zeros[0]; NULLPtr = llvm::ConstantPointerNull::get( - llvm::PointerType::getUnqual(llvm::Type::Int8Ty)); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext))); // C string type. Used in lots of places. PtrToInt8Ty = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); // Get the selector Type. SelectorTy = cast( CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType())); @@ -291,7 +291,7 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str, const std::string &Name) { - llvm::Constant * ConstStr = llvm::ConstantArray::get(Str); + llvm::Constant * ConstStr = llvm::ConstantArray::get(VMContext, Str); ConstStr = new llvm::GlobalVariable(TheModule, ConstStr->getType(), true, llvm::GlobalValue::InternalLinkage, ConstStr, Name); @@ -544,7 +544,7 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName, Methods.clear(); Methods.push_back(llvm::ConstantPointerNull::get( llvm::PointerType::getUnqual(ObjCMethodListTy))); - Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, + Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), MethodTypes.size())); Methods.push_back(MethodArray); @@ -733,7 +733,7 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol( // The isa pointer must be set to a magic number so the runtime knows it's // the correct layout. Elements.push_back(llvm::ConstantExpr::getIntToPtr( - llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), ProtocolVersion), IdTy)); Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); Elements.push_back(ProtocolList); Elements.push_back(InstanceMethodList); @@ -788,7 +788,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) { // The isa pointer must be set to a magic number so the runtime knows it's // the correct layout. Elements.push_back(llvm::ConstantExpr::getIntToPtr( - llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), ProtocolVersion), IdTy)); Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); Elements.push_back(ProtocolList); Elements.push_back(InstanceMethodList); @@ -914,7 +914,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) { Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter); } IvarOffsets.push_back( - llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset)); } // Collect information about instance methods @@ -1064,8 +1064,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { Classes.size() + Categories.size() + 2); llvm::StructType *SymTabTy = llvm::StructType::get(VMContext, LongTy, SelStructPtrTy, - llvm::Type::Int16Ty, - llvm::Type::Int16Ty, + llvm::Type::getInt16Ty(VMContext), + llvm::Type::getInt16Ty(VMContext), ClassListTy, NULL); Elements.clear(); @@ -1107,7 +1107,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end(); iter != iterEnd; ++iter) { llvm::Constant *Idxs[] = {Zeros[0], - llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]}; + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]}; llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy, true, llvm::GlobalValue::InternalLinkage, llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2), @@ -1124,7 +1124,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end(); iter != iterEnd; iter++) { llvm::Constant *Idxs[] = {Zeros[0], - llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]}; + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]}; llvm::Constant *SelPtr = new llvm::GlobalVariable (TheModule, SelStructPtrTy, true, llvm::GlobalValue::InternalLinkage, @@ -1139,10 +1139,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { (*iter).second->setAliasee(SelPtr); } // Number of classes defined. - Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty, + Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext), Classes.size())); // Number of categories defined - Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty, + Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext), Categories.size())); // Create an array of classes, then categories, then static object instances Classes.insert(Classes.end(), Categories.begin(), Categories.end()); @@ -1178,17 +1178,18 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { // Create the load function calling the runtime entry point with the module // structure llvm::Function * LoadFunction = llvm::Function::Create( - llvm::FunctionType::get(llvm::Type::VoidTy, false), + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false), llvm::GlobalValue::InternalLinkage, ".objc_load_function", &TheModule); - llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction); + llvm::BasicBlock *EntryBB = + llvm::BasicBlock::Create(VMContext, "entry", LoadFunction); CGBuilderTy Builder(VMContext); Builder.SetInsertPoint(EntryBB); std::vector Params(1, llvm::PointerType::getUnqual(ModuleTy)); llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get( - llvm::Type::VoidTy, Params, true), "__objc_exec_class"); + llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class"); Builder.CreateCall(Register, Module); Builder.CreateRetVoid(); @@ -1246,7 +1247,7 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() { Params.push_back(BoolTy); // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool) const llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Params, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false); return cast(CGM.CreateRuntimeFunction(FTy, "objc_setProperty")); } @@ -1266,14 +1267,14 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S) { // Pointer to the personality function llvm::Constant *Personality = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext), true), "__gnu_objc_personality_v0"); Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy); std::vector Params; Params.push_back(PtrTy); llvm::Value *RethrowFn = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false), "_Unwind_Resume_or_Rethrow"); bool isTry = isa(S); @@ -1289,7 +1290,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, if (!isTry) { std::vector Args(1, IdTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); llvm::Value *SyncArg = CGF.EmitScalarExpr(cast(S).getSynchExpr()); @@ -1377,7 +1378,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // We use a cleanup unless there was already a catch all. if (!HasCatchAll) { - ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0)); } @@ -1442,7 +1443,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, ESelArgs.clear(); ESelArgs.push_back(Exc); ESelArgs.push_back(Personality); - ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(), "selector"); CGF.Builder.CreateCall(llvm_eh_typeid_for, @@ -1466,7 +1467,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // @synchronized. std::vector Args(1, IdTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); llvm::Value *SyncArg = CGF.EmitScalarExpr(cast(S).getSynchExpr()); @@ -1496,7 +1497,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, std::vector Args(1, IdTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); llvm::Value *ThrowFn = CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 1e41c00d4ca8..f41b3b64f480 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -101,7 +101,8 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, unsigned CVRQualifiers, llvm::Value *Offset) { // Compute (type*) ( (char *) BaseValue + Offset) - llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Type *I8Ptr = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext())); QualType IvarTy = Ivar->getType(); const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr); @@ -160,7 +161,7 @@ private: Params.push_back(ObjectPtrTy); Params.push_back(SelectorPtrTy); return - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, true), "objc_msgSend_stret"); @@ -173,7 +174,8 @@ private: Params.push_back(ObjectPtrTy); Params.push_back(SelectorPtrTy); return - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::DoubleTy, + CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::Type::getDoubleTy(VMContext), Params, true), "objc_msgSend_fpret"); @@ -210,7 +212,7 @@ private: Params.push_back(SuperPtrTy); Params.push_back(SelectorPtrTy); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, true), "objc_msgSendSuper_stret"); } @@ -223,7 +225,7 @@ private: Params.push_back(SuperPtrTy); Params.push_back(SelectorPtrTy); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, true), "objc_msgSendSuper2_stret"); } @@ -395,7 +397,7 @@ public: // void objc_exception_throw(id) std::vector Args(1, ObjectPtrTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); } @@ -404,7 +406,7 @@ public: // void objc_sync_enter (id) std::vector Args(1, ObjectPtrTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); } @@ -413,7 +415,7 @@ public: // void objc_sync_exit (id) std::vector Args(1, ObjectPtrTy); llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); } @@ -508,7 +510,7 @@ public: std::vector Params; Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy)); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false), "objc_exception_try_enter"); } @@ -518,7 +520,7 @@ public: std::vector Params; Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy)); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false), "objc_exception_try_exit"); } @@ -539,7 +541,7 @@ public: Params.push_back(ClassPtrTy); Params.push_back(ObjectPtrTy); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::Int32Ty, + llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext), Params, false), "objc_exception_match"); @@ -548,9 +550,9 @@ public: /// SetJmpFn - LLVM _setjmp function. llvm::Constant *getSetJmpFn() { std::vector Params; - Params.push_back(llvm::PointerType::getUnqual(llvm::Type::Int32Ty)); + Params.push_back(llvm::PointerType::getUnqual(llvm::Type::getInt32Ty(VMContext))); return - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext), Params, false), "_setjmp"); @@ -714,7 +716,7 @@ public: /// exception personality function. llvm::Value *getEHPersonalityPtr() { llvm::Constant *Personality = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext), true), "__objc_personality_v0"); return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy); @@ -724,13 +726,13 @@ public: std::vector Params; Params.push_back(Int8PtrTy); return CGM.CreateRuntimeFunction( - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false), "_Unwind_Resume_or_Rethrow"); } llvm::Constant *getObjCEndCatchFn() { - return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false), "objc_end_catch"); @@ -1377,8 +1379,8 @@ static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext, unsigned idx0, unsigned idx1) { llvm::Value *Idxs[] = { - llvm::ConstantInt::get(llvm::Type::Int32Ty, idx0), - llvm::ConstantInt::get(llvm::Type::Int32Ty, idx1) + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0), + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1) }; return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2); } @@ -2471,7 +2473,8 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, "exceptiondata.ptr"); llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy, "_rethrow"); - llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(llvm::Type::Int1Ty, + llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca( + llvm::Type::getInt1Ty(VMContext), "_call_try_exit"); CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CallTryExitPtr); @@ -2849,10 +2852,10 @@ void CGObjCMac::EmitImageInfo() { // Emitted as int[2]; llvm::Constant *values[2] = { - llvm::ConstantInt::get(llvm::Type::Int32Ty, version), - llvm::ConstantInt::get(llvm::Type::Int32Ty, flags) + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version), + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags) }; - llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::Int32Ty, 2); + llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2); const char *Section; if (ObjCABI == 1) @@ -2973,7 +2976,7 @@ llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) { if (!Entry) Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_", - llvm::ConstantArray::get(Ident->getName()), + llvm::ConstantArray::get(VMContext, Ident->getName()), "__TEXT,__cstring,cstring_literals", 1, true); @@ -3191,7 +3194,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout( bool hasUnion = false; unsigned int WordsToScan, WordsToSkip; - const llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) return llvm::Constant::getNullValue(PtrTy); @@ -3357,7 +3360,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout( return llvm::Constant::getNullValue(PtrTy); llvm::GlobalVariable * Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_", - llvm::ConstantArray::get(BitMap.c_str()), + llvm::ConstantArray::get(VMContext, BitMap.c_str()), "__TEXT,__cstring,cstring_literals", 1, true); return getConstantGEP(VMContext, Entry, 0, 0); @@ -3369,7 +3372,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) { // FIXME: Avoid std::string copying. if (!Entry) Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_", - llvm::ConstantArray::get(Sel.getAsString()), + llvm::ConstantArray::get(VMContext, Sel.getAsString()), "__TEXT,__cstring,cstring_literals", 1, true); @@ -3394,7 +3397,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) { if (!Entry) Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_", - llvm::ConstantArray::get(TypeStr), + llvm::ConstantArray::get(VMContext, TypeStr), "__TEXT,__cstring,cstring_literals", 1, true); @@ -3410,7 +3413,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) { if (!Entry) Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_", - llvm::ConstantArray::get(TypeStr), + llvm::ConstantArray::get(VMContext, TypeStr), "__TEXT,__cstring,cstring_literals", 1, true); @@ -3423,7 +3426,7 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) { if (!Entry) Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_", - llvm::ConstantArray::get(Ident->getName()), + llvm::ConstantArray::get(VMContext, Ident->getName()), "__TEXT,__cstring,cstring_literals", 1, true); @@ -3525,7 +3528,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) IntTy = Types.ConvertType(Ctx.IntTy); LongTy = Types.ConvertType(Ctx.LongTy); LongLongTy = Types.ConvertType(Ctx.LongLongTy); - Int8PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Int8PtrTy = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType()); PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy); @@ -3814,10 +3817,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) // Exceptions const llvm::Type *StackPtrTy = llvm::ArrayType::get( - llvm::PointerType::getUnqual(llvm::Type::Int8Ty), 4); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)), 4); ExceptionDataTy = - llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::Int32Ty, + llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), SetJmpBufferSize), StackPtrTy, NULL); CGM.getModule().addTypeName("struct._objc_exception_data", @@ -5548,7 +5551,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, llvm::SmallVector Args; Args.push_back(Exc); Args.push_back(ObjCTypes.getEHPersonalityPtr()); - Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, + Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end()); CGF.Builder.CreateStore(Exc, RethrowPtr); @@ -5580,7 +5583,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, Args.clear(); Args.push_back(Exc); Args.push_back(ObjCTypes.getEHPersonalityPtr()); - Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, + Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end()); CGF.Builder.CreateStore(Exc, RethrowPtr); @@ -5693,7 +5696,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, llvm::GlobalValue::ExternalLinkage, 0, VTableName); - llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 2); + llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2); std::vector Values(3); Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1); diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 4576b808f02e..508a997b0a08 100644 --- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -282,7 +282,7 @@ void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) { if (NumBytes == 0) return; - const llvm::Type *Ty = llvm::Type::Int8Ty; + const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); if (NumBytes > 1) Ty = llvm::ArrayType::get(Ty, NumBytes); diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index 22d9a991ef29..12e3a95373a1 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -282,7 +282,7 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { // EmitIndirectSwitches(). We need a default dest, so we use the // current BB, but this is overwritten. llvm::Value *V = Builder.CreatePtrToInt(EmitScalarExpr(S.getTarget()), - llvm::Type::Int32Ty, + llvm::Type::getInt32Ty(VMContext), "addr"); llvm::SwitchInst *I = Builder.CreateSwitch(V, Builder.GetInsertBlock()); IndirectSwitches.push_back(I); @@ -780,7 +780,7 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S, uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty); if (Size <= 64 && llvm::isPowerOf2_64(Size)) { - Ty = llvm::IntegerType::get(Size); + Ty = llvm::IntegerType::get(VMContext, Size); Ty = llvm::PointerType::getUnqual(Ty); Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty)); @@ -896,7 +896,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { uint64_t InputSize = getContext().getTypeSize(InputTy); if (getContext().getTypeSize(OutputTy) < InputSize) { // Form the asm to return the value as a larger integer type. - ResultRegTypes.back() = llvm::IntegerType::get((unsigned)InputSize); + ResultRegTypes.back() = llvm::IntegerType::get(VMContext, (unsigned)InputSize); } } } else { @@ -954,9 +954,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Use ptrtoint as appropriate so that we can do our extension. if (isa(Arg->getType())) Arg = Builder.CreatePtrToInt(Arg, - llvm::IntegerType::get(LLVMPointerWidth)); + llvm::IntegerType::get(VMContext, LLVMPointerWidth)); unsigned OutputSize = (unsigned)getContext().getTypeSize(OutputTy); - Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(OutputSize)); + Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(VMContext, OutputSize)); } } @@ -998,7 +998,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { const llvm::Type *ResultType; if (ResultRegTypes.empty()) - ResultType = llvm::Type::VoidTy; + ResultType = llvm::Type::getVoidTy(VMContext); else if (ResultRegTypes.size() == 1) ResultType = ResultRegTypes[0]; else @@ -1035,7 +1035,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Truncate the integer result to the right size, note that // ResultTruncRegTypes can be a pointer. uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy); - Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get((unsigned)ResSize)); + Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext, (unsigned)ResSize)); if (Tmp->getType() != TruncTy) { assert(isa(TruncTy)); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index b7ae7c1f0d29..a977add65fb5 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -156,8 +156,8 @@ void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy, // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. - llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty); - AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "", + llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)); + AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::getInt32Ty(VMContext), "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); @@ -424,7 +424,7 @@ unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) { } void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) { - const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); if (DestPtr->getType() != BP) DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); @@ -436,13 +436,14 @@ void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) { return; // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth); + const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, + LLVMPointerWidth); Builder.CreateCall4(CGM.getMemSetFn(), DestPtr, - llvm::Constant::getNullValue(llvm::Type::Int8Ty), + llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), // TypeInfo.first describes size in bits. llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), TypeInfo.second/8)); } @@ -468,7 +469,7 @@ void CodeGenFunction::EmitIndirectSwitches() { I->setSuccessor(0, Default); for (std::map::iterator LI = LabelIDs.begin(), LE = LabelIDs.end(); LI != LE; ++LI) { - I->addCase(llvm::ConstantInt::get(llvm::Type::Int32Ty, + I->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LI->second), getBasicBlockForLabel(LI->first)); } @@ -590,7 +591,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() Builder.SetInsertPoint(SwitchBlock); - llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::Int32Ty, + llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext), "cleanup.dst"); llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); @@ -604,7 +605,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() // If we had a current basic block, we also need to emit an instruction // to initialize the cleanup destination. - Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::Int32Ty), + Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)), DestCodePtr); } else Builder.ClearInsertionPoint(); @@ -621,13 +622,13 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() // Check if we already have a destination for this block. if (Dest == SI->getDefaultDest()) - ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); else { ID = SI->findCaseDest(Dest); if (!ID) { // No code found, get a new unique one by using the number of // switch successors. - ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, + ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), SI->getNumSuccessors()); SI->addCase(ID, Dest); } @@ -644,7 +645,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn); // Create a unique case ID. - llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), SI->getNumSuccessors()); // Store the jump destination before the branch instruction. diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 71861273b896..1ea19c6e13b8 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -424,9 +424,9 @@ public: llvm::Function *Parent=0, llvm::BasicBlock *InsertBefore=0) { #ifdef NDEBUG - return llvm::BasicBlock::Create("", Parent, InsertBefore); + return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore); #else - return llvm::BasicBlock::Create(Name, Parent, InsertBefore); + return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore); #endif } diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index a99bae17995f..c9e7e934fd77 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -199,22 +199,22 @@ void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) { void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) { // Ctor function type is void()*. llvm::FunctionType* CtorFTy = - llvm::FunctionType::get(llvm::Type::VoidTy, + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), std::vector(), false); llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy); // Get the type of a ctor entry, { i32, void ()* }. llvm::StructType* CtorStructTy = - llvm::StructType::get(VMContext, llvm::Type::Int32Ty, + llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext), llvm::PointerType::getUnqual(CtorFTy), NULL); // Construct the constructor and destructor arrays. std::vector Ctors; for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) { std::vector S; - S.push_back( - llvm::ConstantInt::get(llvm::Type::Int32Ty, I->second, false)); + S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + I->second, false)); S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy)); Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S)); } @@ -421,7 +421,8 @@ void CodeGenModule::EmitLLVMUsed() { if (LLVMUsed.empty()) return; - llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Type *i8PTy = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); // Convert LLVMUsed to what ConstantArray needs. std::vector UsedArray; @@ -486,9 +487,12 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, // get [N x i8] constants for the annotation string, and the filename string // which are the 2nd and 3rd elements of the global annotation structure. - const llvm::Type *SBP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); - llvm::Constant *anno = llvm::ConstantArray::get(AA->getAnnotation(), true); - llvm::Constant *unit = llvm::ConstantArray::get(M->getModuleIdentifier(), + const llvm::Type *SBP = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); + llvm::Constant *anno = llvm::ConstantArray::get(VMContext, + AA->getAnnotation(), true); + llvm::Constant *unit = llvm::ConstantArray::get(VMContext, + M->getModuleIdentifier(), true); // Get the two global values corresponding to the ConstantArrays we just @@ -508,7 +512,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, llvm::ConstantExpr::getBitCast(GV, SBP), llvm::ConstantExpr::getBitCast(annoGV, SBP), llvm::ConstantExpr::getBitCast(unitGV, SBP), - llvm::ConstantInt::get(llvm::Type::Int32Ty, LineNo) + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo) }; return llvm::ConstantStruct::get(VMContext, Fields, 4, false); } @@ -663,7 +667,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName, // sure not to try to set attributes. bool IsIncompleteFunction = false; if (!isa(Ty)) { - Ty = llvm::FunctionType::get(llvm::Type::VoidTy, + Ty = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), std::vector(), false); IsIncompleteFunction = true; } @@ -1057,7 +1061,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(), ArgList.end(), "", CI); ArgList.clear(); - if (NewCall->getType() != llvm::Type::VoidTy) + if (NewCall->getType() != llvm::Type::getVoidTy(Old->getContext())) NewCall->takeName(CI); NewCall->setCallingConv(CI->getCallingConv()); NewCall->setAttributes(CI->getAttributes()); @@ -1264,19 +1268,19 @@ llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys, llvm::Function *CodeGenModule::getMemCpyFn() { if (MemCpyFn) return MemCpyFn; - const llvm::Type *IntPtr = TheTargetData.getIntPtrType(); + const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext); return MemCpyFn = getIntrinsic(llvm::Intrinsic::memcpy, &IntPtr, 1); } llvm::Function *CodeGenModule::getMemMoveFn() { if (MemMoveFn) return MemMoveFn; - const llvm::Type *IntPtr = TheTargetData.getIntPtrType(); + const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext); return MemMoveFn = getIntrinsic(llvm::Intrinsic::memmove, &IntPtr, 1); } llvm::Function *CodeGenModule::getMemSetFn() { if (MemSetFn) return MemSetFn; - const llvm::Type *IntPtr = TheTargetData.getIntPtrType(); + const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext); return MemSetFn = getIntrinsic(llvm::Intrinsic::memset, &IntPtr, 1); } @@ -1379,7 +1383,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { if (llvm::Constant *C = Entry.getValue()) return C; - llvm::Constant *Zero = llvm::Constant::getNullValue(llvm::Type::Int32Ty); + llvm::Constant *Zero = + llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)); llvm::Constant *Zeros[] = { Zero, Zero }; // If we don't already have it, get __CFConstantStringClassReference. @@ -1420,7 +1425,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { // String pointer. CurField = NextField; NextField = *Field++; - llvm::Constant *C = llvm::ConstantArray::get(Entry.getKey().str()); + llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str()); const char *Sect, *Prefix; bool isConstant; @@ -1519,7 +1524,8 @@ static llvm::Constant *GenerateStringLiteral(const std::string &str, CodeGenModule &CGM, const char *GlobalName) { // Create Constant for this string literal. Don't add a '\0'. - llvm::Constant *C = llvm::ConstantArray::get(str, false); + llvm::Constant *C = + llvm::ConstantArray::get(CGM.getLLVMContext(), str, false); // Create a global variable for this string return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant, diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp index 7b1b85c64ca9..5e2ba5a3cf59 100644 --- a/clang/lib/CodeGen/CodeGenTypes.cpp +++ b/clang/lib/CodeGen/CodeGenTypes.cpp @@ -82,8 +82,9 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) { const llvm::Type *ResultType = ConvertTypeRecursive(T); - if (ResultType == llvm::Type::Int1Ty) - return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); + if (ResultType == llvm::Type::getInt1Ty(getLLVMContext())) + return llvm::IntegerType::get(getLLVMContext(), + (unsigned)Context.getTypeSize(T)); return ResultType; } @@ -95,11 +96,12 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { const llvm::Type *R = ConvertType(T); // If this is a non-bool type, don't map it. - if (R != llvm::Type::Int1Ty) + if (R != llvm::Type::getInt1Ty(getLLVMContext())) return R; // Otherwise, return an integer of the target-specified size. - return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); + return llvm::IntegerType::get(getLLVMContext(), + (unsigned)Context.getTypeSize(T)); } @@ -161,17 +163,18 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { } } -static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) { +static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext, + const llvm::fltSemantics &format) { if (&format == &llvm::APFloat::IEEEsingle) - return llvm::Type::FloatTy; + return llvm::Type::getFloatTy(VMContext); if (&format == &llvm::APFloat::IEEEdouble) - return llvm::Type::DoubleTy; + return llvm::Type::getDoubleTy(VMContext); if (&format == &llvm::APFloat::IEEEquad) - return llvm::Type::FP128Ty; + return llvm::Type::getFP128Ty(VMContext); if (&format == &llvm::APFloat::PPCDoubleDouble) - return llvm::Type::PPC_FP128Ty; + return llvm::Type::getPPC_FP128Ty(VMContext); if (&format == &llvm::APFloat::x87DoubleExtended) - return llvm::Type::X86_FP80Ty; + return llvm::Type::getX86_FP80Ty(VMContext); assert(0 && "Unknown float format!"); return 0; } @@ -196,11 +199,11 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::ObjCClass: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. - return llvm::IntegerType::get(8); + return llvm::IntegerType::get(getLLVMContext(), 8); case BuiltinType::Bool: // Note that we always return bool as i1 for use as a scalar type. - return llvm::Type::Int1Ty; + return llvm::Type::getInt1Ty(getLLVMContext()); case BuiltinType::Char_S: case BuiltinType::Char_U: @@ -217,22 +220,24 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::WChar: case BuiltinType::Char16: case BuiltinType::Char32: - return llvm::IntegerType::get( + return llvm::IntegerType::get(getLLVMContext(), static_cast(Context.getTypeSize(T))); case BuiltinType::Float: case BuiltinType::Double: case BuiltinType::LongDouble: - return getTypeForFormat(Context.getFloatTypeSemantics(T)); + return getTypeForFormat(getLLVMContext(), + Context.getFloatTypeSemantics(T)); case BuiltinType::UInt128: case BuiltinType::Int128: - return llvm::IntegerType::get(128); + return llvm::IntegerType::get(getLLVMContext(), 128); } break; } case Type::FixedWidthInt: - return llvm::IntegerType::get(cast(T)->getWidth()); + return llvm::IntegerType::get(getLLVMContext(), + cast(T)->getWidth()); case Type::Complex: { const llvm::Type *EltTy = ConvertTypeRecursive(cast(Ty).getElementType()); diff --git a/clang/lib/CodeGen/TargetABIInfo.cpp b/clang/lib/CodeGen/TargetABIInfo.cpp index 87f728eb443e..a1c1092ffbff 100644 --- a/clang/lib/CodeGen/TargetABIInfo.cpp +++ b/clang/lib/CodeGen/TargetABIInfo.cpp @@ -307,15 +307,14 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, // registers and we need to make sure to pick a type the LLVM // backend will like. if (Size == 128) - return - ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, - 2)); + return ABIArgInfo::getCoerce(llvm::VectorType::get( + llvm::Type::getInt64Ty(VMContext), 2)); // Always return in register if it fits in a general purpose // register, or if it is 64 bits and has a single element. if ((Size == 8 || Size == 16 || Size == 32) || (Size == 64 && VT->getNumElements() == 1)) - return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size)); return ABIArgInfo::getIndirect(0); } @@ -340,21 +339,21 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, // element type. uint64_t Size = Context.getTypeSize(RetTy); return ABIArgInfo::getCoerce( - llvm::IntegerType::get((unsigned) Size)); + llvm::IntegerType::get(VMContext, (unsigned) Size)); } else if (BT->getKind() == BuiltinType::Float) { assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && "Unexpect single element structure size!"); - return ABIArgInfo::getCoerce(llvm::Type::FloatTy); + return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext)); } else if (BT->getKind() == BuiltinType::Double) { assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && "Unexpect single element structure size!"); - return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); + return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext)); } } else if (SeltTy->isPointerType()) { // FIXME: It would be really nice if this could come out as the proper // pointer type. llvm::Type *PtrTy = - llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); return ABIArgInfo::getCoerce(PtrTy); } else if (SeltTy->isVectorType()) { // 64- and 128-bit vectors are never returned in a @@ -371,7 +370,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, // in a register. if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) { uint64_t Size = Context.getTypeSize(RetTy); - return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size)); } return ABIArgInfo::getIndirect(0); @@ -424,7 +423,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { - const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext())); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); CGBuilderTy &Builder = CGF.Builder; @@ -438,8 +437,8 @@ llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, - llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get( + llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); @@ -796,13 +795,13 @@ void X86_64ABIInfo::classify(QualType Ty, ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, const llvm::Type *CoerceTo, ASTContext &Context) const { - if (CoerceTo == llvm::Type::Int64Ty) { + if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) { // Integer and pointer types will end up in a general purpose // register. if (Ty->isIntegralType() || Ty->isPointerType()) return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); - } else if (CoerceTo == llvm::Type::DoubleTy) { + } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) { // FIXME: It would probably be better to make CGFunctionInfo only map using // canonical types than to canonize here. QualType CTy = Context.getCanonicalType(Ty); @@ -858,25 +857,25 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next // available register of the sequence %rax, %rdx is used. case Integer: - ResType = llvm::Type::Int64Ty; break; + ResType = llvm::Type::getInt64Ty(VMContext); break; // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next // available SSE register of the sequence %xmm0, %xmm1 is used. case SSE: - ResType = llvm::Type::DoubleTy; break; + ResType = llvm::Type::getDoubleTy(VMContext); break; // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is // returned on the X87 stack in %st0 as 80-bit x87 number. case X87: - ResType = llvm::Type::X86_FP80Ty; break; + ResType = llvm::Type::getX86_FP80Ty(VMContext); break; // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real // part of the value is returned in %st0 and the imaginary part in // %st1. case ComplexX87: assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); - ResType = llvm::StructType::get(VMContext, llvm::Type::X86_FP80Ty, - llvm::Type::X86_FP80Ty, + ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext), + llvm::Type::getX86_FP80Ty(VMContext), NULL); break; } @@ -893,11 +892,11 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, case Integer: ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::Int64Ty, NULL); + llvm::Type::getInt64Ty(VMContext), NULL); break; case SSE: ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::DoubleTy, NULL); + llvm::Type::getDoubleTy(VMContext), NULL); break; // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte @@ -906,7 +905,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, // SSEUP should always be preceeded by SSE, just widen. case SSEUp: assert(Lo == SSE && "Unexpected SSEUp classification."); - ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2); break; // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is @@ -918,7 +917,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, // extra bits in an SSE reg. if (Lo != X87) ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::DoubleTy, NULL); + llvm::Type::getDoubleTy(VMContext), NULL); break; } @@ -964,7 +963,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, // and %r9 is used. case Integer: ++neededInt; - ResType = llvm::Type::Int64Ty; + ResType = llvm::Type::getInt64Ty(VMContext); break; // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next @@ -972,7 +971,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, // order from %xmm0 to %xmm7. case SSE: ++neededSSE; - ResType = llvm::Type::DoubleTy; + ResType = llvm::Type::getDoubleTy(VMContext); break; } @@ -989,7 +988,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, case NoClass: break; case Integer: ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::Int64Ty, NULL); + llvm::Type::getInt64Ty(VMContext), NULL); ++neededInt; break; @@ -998,7 +997,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, case X87Up: case SSE: ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::DoubleTy, NULL); + llvm::Type::getDoubleTy(VMContext), NULL); ++neededSSE; break; @@ -1007,7 +1006,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, // register. case SSEUp: assert(Lo == SSE && "Unexpected SSEUp classification."); - ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2); break; } @@ -1065,11 +1064,13 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, // shouldn't ever matter in practice. // overflow_arg_area = (overflow_arg_area + 15) & ~15; - llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15); + llvm::Value *Offset = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, - llvm::Type::Int64Ty); - llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL); + llvm::Type::getInt64Ty(CGF.getLLVMContext())); + llvm::Value *Mask = llvm::ConstantInt::get( + llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL); overflow_arg_area = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), overflow_arg_area->getType(), @@ -1088,7 +1089,8 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, // an 8 byte boundary. uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; - llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::Value *Offset = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), (SizeInBytes + 7) & ~7); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, "overflow_arg_area.next"); @@ -1137,7 +1139,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); InRegs = CGF.Builder.CreateICmpULE(gp_offset, - llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 48 - neededInt * 8), "fits_in_gp"); } @@ -1147,7 +1149,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, - llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 176 - neededSSE * 16), "fits_in_fp"); InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; @@ -1216,12 +1218,12 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); llvm::Value *RegAddrHi = CGF.Builder.CreateGEP(RegAddrLo, - llvm::ConstantInt::get(llvm::Type::Int32Ty, 16)); + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 16)); const llvm::Type *DblPtrTy = - llvm::PointerType::getUnqual(llvm::Type::DoubleTy); + llvm::PointerType::getUnqual(llvm::Type::getDoubleTy(VMContext)); const llvm::StructType *ST = llvm::StructType::get(VMContext, - llvm::Type::DoubleTy, - llvm::Type::DoubleTy, + llvm::Type::getDoubleTy(VMContext), + llvm::Type::getDoubleTy(VMContext), NULL); llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, @@ -1239,13 +1241,13 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, // l->gp_offset = l->gp_offset + num_gp * 8 // l->fp_offset = l->fp_offset + num_fp * 16. if (neededInt) { - llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), neededInt * 8); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), gp_offset_p); } if (neededSSE) { - llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), neededSSE * 16); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), fp_offset_p); @@ -1353,10 +1355,10 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, const llvm::Type* ElemTy; unsigned SizeRegs; if (Context.getTypeAlign(Ty) > 32) { - ElemTy = llvm::Type::Int64Ty; + ElemTy = llvm::Type::getInt64Ty(VMContext); SizeRegs = (Context.getTypeSize(Ty) + 63) / 64; } else { - ElemTy = llvm::Type::Int32Ty; + ElemTy = llvm::Type::getInt32Ty(VMContext); SizeRegs = (Context.getTypeSize(Ty) + 31) / 32; } std::vector LLVMFields; @@ -1375,7 +1377,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, // are returned indirectly. uint64_t Size = Context.getTypeSize(RetTy); if (Size <= 32) - return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); + return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext)); return ABIArgInfo::getIndirect(0); } else { return (RetTy->isPromotableIntegerType() ? @@ -1386,7 +1388,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { // FIXME: Need to handle alignment - const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BP = + llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext())); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); CGBuilderTy &Builder = CGF.Builder; @@ -1400,8 +1403,8 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, - llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get( + llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP);