forked from OSchip/llvm-project
Revert "Prevent IR-gen from emitting consteval declarations"
This reverts commit 3bab88b7ba
.
This patch causes test failures:
http://lab.llvm.org:8011/builders/clang-cmake-armv7-quick/builds/17260
This commit is contained in:
parent
ec4e68e667
commit
550c4562d1
|
@ -272,7 +272,6 @@ void ConstantExpr::DefaultInit(ResultStorageKind StorageKind) {
|
|||
ConstantExprBits.ResultKind = StorageKind;
|
||||
ConstantExprBits.APValueKind = APValue::None;
|
||||
ConstantExprBits.HasCleanup = false;
|
||||
ConstantExprBits.IsImmediateInvocation = false;
|
||||
if (StorageKind == ConstantExpr::RSK_APValue)
|
||||
::new (getTrailingObjects<APValue>()) APValue();
|
||||
}
|
||||
|
|
|
@ -9986,6 +9986,8 @@ public:
|
|||
// Visitor Methods
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
bool VisitConstantExpr(const ConstantExpr *E);
|
||||
|
||||
bool VisitIntegerLiteral(const IntegerLiteral *E) {
|
||||
return Success(E->getValue(), E);
|
||||
}
|
||||
|
@ -10767,6 +10769,13 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
|
||||
llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
|
||||
if (E->getResultAPValueKind() != APValue::None)
|
||||
return Success(E->getAPValueResult(), E);
|
||||
return ExprEvaluatorBaseTy::VisitConstantExpr(E);
|
||||
}
|
||||
|
||||
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
|
||||
if (unsigned BuiltinOp = E->getBuiltinCallee())
|
||||
return VisitBuiltinCallExpr(E, BuiltinOp);
|
||||
|
|
|
@ -1272,17 +1272,18 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
|
|||
// store the elements rather than the aggregate to be more friendly to
|
||||
// fast-isel.
|
||||
// FIXME: Do we need to recurse here?
|
||||
void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
|
||||
bool DestIsVolatile) {
|
||||
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
|
||||
Address Dest, bool DestIsVolatile) {
|
||||
// Prefer scalar stores to first-class aggregate stores.
|
||||
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
|
||||
if (llvm::StructType *STy =
|
||||
dyn_cast<llvm::StructType>(Val->getType())) {
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
Address EltPtr = Builder.CreateStructGEP(Dest, i);
|
||||
llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
|
||||
Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
|
||||
Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
|
||||
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
|
||||
CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
|
||||
}
|
||||
} else {
|
||||
Builder.CreateStore(Val, Dest, DestIsVolatile);
|
||||
CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1334,7 @@ static void CreateCoercedStore(llvm::Value *Src,
|
|||
// If store is legal, just bitcast the src pointer.
|
||||
if (SrcSize <= DstSize) {
|
||||
Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
|
||||
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
|
||||
BuildAggStore(CGF, Src, Dst, DstIsVolatile);
|
||||
} else {
|
||||
// Otherwise do coercion through memory. This is stupid, but
|
||||
// simple.
|
||||
|
@ -5069,7 +5070,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
|||
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
|
||||
DestIsVolatile = false;
|
||||
}
|
||||
EmitAggregateStore(CI, DestPtr, DestIsVolatile);
|
||||
BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
|
||||
return RValue::getAggregate(DestPtr);
|
||||
}
|
||||
case TEK_Scalar: {
|
||||
|
|
|
@ -762,8 +762,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
|
|||
|
||||
// If we're emitting a value with lifetime, we have to do the
|
||||
// initialization *before* we leave the cleanup scopes.
|
||||
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(init))
|
||||
init = EWC->getSubExpr();
|
||||
if (const FullExpr *fe = dyn_cast<FullExpr>(init))
|
||||
init = fe->getSubExpr();
|
||||
|
||||
CodeGenFunction::RunCleanupsScope Scope(*this);
|
||||
|
||||
// We have to maintain the illusion that the variable is
|
||||
|
|
|
@ -1302,15 +1302,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
|
|||
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
|
||||
case Expr::DeclRefExprClass:
|
||||
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
|
||||
case Expr::ConstantExprClass: {
|
||||
const ConstantExpr *CE = cast<ConstantExpr>(E);
|
||||
if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
|
||||
QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
|
||||
->getCallReturnType(getContext());
|
||||
return MakeNaturalAlignAddrLValue(Result, RetType);
|
||||
}
|
||||
case Expr::ConstantExprClass:
|
||||
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
|
||||
}
|
||||
case Expr::ParenExprClass:
|
||||
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
|
||||
case Expr::GenericSelectionExprClass:
|
||||
|
|
|
@ -127,11 +127,6 @@ public:
|
|||
}
|
||||
|
||||
void VisitConstantExpr(ConstantExpr *E) {
|
||||
if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
|
||||
CGF.EmitAggregateStore(Result, Dest.getAddress(),
|
||||
E->getType().isVolatileQualified());
|
||||
return;
|
||||
}
|
||||
return Visit(E->getSubExpr());
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include "CGOpenMPRuntime.h"
|
||||
#include "CodeGenFunction.h"
|
||||
#include "CodeGenModule.h"
|
||||
#include "ConstantEmitter.h"
|
||||
#include "clang/AST/StmtVisitor.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
|
@ -103,9 +102,6 @@ public:
|
|||
}
|
||||
ComplexPairTy VisitExpr(Expr *S);
|
||||
ComplexPairTy VisitConstantExpr(ConstantExpr *E) {
|
||||
if (llvm::Constant *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E))
|
||||
return ComplexPairTy(Result->getAggregateElement(0U),
|
||||
Result->getAggregateElement(1U));
|
||||
return Visit(E->getSubExpr());
|
||||
}
|
||||
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
|
||||
|
|
|
@ -1011,8 +1011,6 @@ public:
|
|||
}
|
||||
|
||||
llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
|
||||
if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
|
||||
return Result;
|
||||
return Visit(CE->getSubExpr(), T);
|
||||
}
|
||||
|
||||
|
@ -1360,20 +1358,6 @@ ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
|
|||
return validateAndPopAbstract(C, state);
|
||||
}
|
||||
|
||||
llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
|
||||
if (!CE->hasAPValueResult())
|
||||
return nullptr;
|
||||
const Expr *Inner = CE->getSubExpr()->IgnoreImplicit();
|
||||
QualType RetType;
|
||||
if (auto *Call = dyn_cast<CallExpr>(Inner))
|
||||
RetType = Call->getCallReturnType(CGF->getContext());
|
||||
else if (auto *Ctor = dyn_cast<CXXConstructExpr>(Inner))
|
||||
RetType = Ctor->getType();
|
||||
llvm::Constant *Res =
|
||||
emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
|
||||
return Res;
|
||||
}
|
||||
|
||||
llvm::Constant *
|
||||
ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
|
||||
auto state = pushAbstract();
|
||||
|
@ -1919,8 +1903,6 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
|
|||
|
||||
ConstantLValue
|
||||
ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
|
||||
if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(E))
|
||||
return Result;
|
||||
return Visit(E->getSubExpr());
|
||||
}
|
||||
|
||||
|
|
|
@ -419,12 +419,6 @@ public:
|
|||
Value *VisitExpr(Expr *S);
|
||||
|
||||
Value *VisitConstantExpr(ConstantExpr *E) {
|
||||
if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
|
||||
if (E->isGLValue())
|
||||
return CGF.Builder.CreateLoad(Address(
|
||||
Result, CGF.getContext().getTypeAlignInChars(E->getType())));
|
||||
return Result;
|
||||
}
|
||||
return Visit(E->getSubExpr());
|
||||
}
|
||||
Value *VisitParenExpr(ParenExpr *PE) {
|
||||
|
|
|
@ -1119,8 +1119,9 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
|
|||
SaveRetExprRAII SaveRetExpr(RV, *this);
|
||||
|
||||
RunCleanupsScope cleanupScope(*this);
|
||||
if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
|
||||
RV = EWC->getSubExpr();
|
||||
if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV))
|
||||
RV = fe->getSubExpr();
|
||||
|
||||
// FIXME: Clean this up by using an LValue for ReturnTemp,
|
||||
// EmitStoreThroughLValue, and EmitAnyExpr.
|
||||
// Check if the NRVO candidate was not globalized in OpenMP mode.
|
||||
|
|
|
@ -4138,10 +4138,6 @@ public:
|
|||
/// aggregate type into a temporary LValue.
|
||||
LValue EmitAggExprToLValue(const Expr *E);
|
||||
|
||||
/// Build all the stores needed to initialize an aggregate at Dest with the
|
||||
/// value Val.
|
||||
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
|
||||
|
||||
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
|
||||
/// make sure it survives garbage collection until this point.
|
||||
void EmitExtendGCLifetime(llvm::Value *object);
|
||||
|
|
|
@ -3336,8 +3336,6 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
|
|||
bool ForVTable,
|
||||
bool DontDefer,
|
||||
ForDefinition_t IsForDefinition) {
|
||||
assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
|
||||
"consteval function should never be emitted");
|
||||
// If there was no specific requested type, just convert it now.
|
||||
if (!Ty) {
|
||||
const auto *FD = cast<FunctionDecl>(GD.getDecl());
|
||||
|
@ -5332,11 +5330,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
|
|||
if (D->isTemplated())
|
||||
return;
|
||||
|
||||
// Consteval function shouldn't be emitted.
|
||||
if (auto *FD = dyn_cast<FunctionDecl>(D))
|
||||
if (FD->isConsteval())
|
||||
return;
|
||||
|
||||
switch (D->getKind()) {
|
||||
case Decl::CXXConversion:
|
||||
case Decl::CXXMethod:
|
||||
|
|
|
@ -110,8 +110,6 @@ public:
|
|||
llvm::Constant *tryEmitAbstract(const APValue &value, QualType T);
|
||||
llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T);
|
||||
|
||||
llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE);
|
||||
|
||||
llvm::Constant *emitNullForMemory(QualType T) {
|
||||
return emitNullForMemory(CGM, T);
|
||||
}
|
||||
|
|
|
@ -16139,7 +16139,7 @@ ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
|
|||
|
||||
ConstantExpr *Res = ConstantExpr::Create(
|
||||
getASTContext(), E.get(),
|
||||
ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
|
||||
ConstantExpr::getStorageKind(E.get()->getType().getTypePtr(),
|
||||
getASTContext()),
|
||||
/*IsImmediateInvocation*/ true);
|
||||
ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
// RUN: %clang_cc1 -emit-llvm %s -std=c++2a -o %t.ll
|
||||
// RUN: FileCheck -check-prefix=EVAL -input-file=%t.ll %s
|
||||
// RUN: FileCheck -check-prefix=EVAL-FN -input-file=%t.ll %s
|
||||
// RUN: FileCheck -check-prefix=EVAL-STATIC -input-file=%t.ll %s
|
||||
// RUN: %clang_cc1 -emit-llvm %s -Dconsteval="" -std=c++2a -o %t.ll
|
||||
// RUN: FileCheck -check-prefix=EXPR -input-file=%t.ll %s
|
||||
|
||||
// there is two version of symbol checks to ensure
|
||||
// that the symbol we are looking for are correct
|
||||
// EVAL-NOT: @__cxx_global_var_init()
|
||||
// EXPR: @__cxx_global_var_init()
|
||||
|
||||
// EVAL-NOT: @_Z4ret7v()
|
||||
// EXPR: @_Z4ret7v()
|
||||
consteval int ret7() {
|
||||
return 7;
|
||||
}
|
||||
|
||||
int test_ret7() {
|
||||
// EVAL-FN-LABEL: @_Z9test_ret7v(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[I:%.*]] = alloca i32, align 4
|
||||
// EVAL-FN-NEXT: store i32 7, i32* [[I]], align 4
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4
|
||||
// EVAL-FN-NEXT: ret i32 [[TMP0]]
|
||||
//
|
||||
int i = ret7();
|
||||
return i;
|
||||
}
|
||||
|
||||
// EVAL-STATIC: @global_i = global i32 7, align 4
|
||||
int global_i = ret7();
|
||||
|
||||
// EVAL-STATIC: @_ZL7i_const = internal constant i32 5, align 4
|
||||
constexpr int i_const = 5;
|
||||
|
||||
// EVAL-NOT: @_Z4retIv()
|
||||
// EXPR: @_Z4retIv()
|
||||
consteval const int &retI() {
|
||||
return i_const;
|
||||
}
|
||||
|
||||
const int &test_retRefI() {
|
||||
// EVAL-FN-LABEL: @_Z12test_retRefIv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: ret i32* @_ZL7i_const
|
||||
//
|
||||
return retI();
|
||||
}
|
||||
|
||||
int test_retI() {
|
||||
// EVAL-FN-LABEL: @_Z9test_retIv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4
|
||||
// EVAL-FN-NEXT: ret i32 [[TMP0]]
|
||||
//
|
||||
return retI();
|
||||
}
|
||||
|
||||
// EVAL-NOT: @_Z4retIv()
|
||||
// EXPR: @_Z4retIv()
|
||||
consteval const int *retIPtr() {
|
||||
return &i_const;
|
||||
}
|
||||
|
||||
int test_retIPtr() {
|
||||
// EVAL-FN-LABEL: @_Z12test_retIPtrv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4
|
||||
// EVAL-FN-NEXT: ret i32 [[TMP0]]
|
||||
//
|
||||
return *retIPtr();
|
||||
}
|
||||
|
||||
const int *test_retPIPtr() {
|
||||
// EVAL-FN-LABEL: @_Z13test_retPIPtrv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: ret i32* @_ZL7i_const
|
||||
//
|
||||
return retIPtr();
|
||||
}
|
||||
|
||||
// EVAL-NOT: @_Z4retIv()
|
||||
// EXPR: @_Z4retIv()
|
||||
consteval const int &&retIRRef() {
|
||||
return static_cast<const int &&>(i_const);
|
||||
}
|
||||
|
||||
const int &&test_retIRRef() {
|
||||
return static_cast<const int &&>(retIRRef());
|
||||
}
|
||||
|
||||
int test_retIRRefI() {
|
||||
// EVAL-FN-LABEL: @_Z14test_retIRRefIv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4
|
||||
// EVAL-FN-NEXT: ret i32 [[TMP0]]
|
||||
//
|
||||
return retIRRef();
|
||||
}
|
||||
|
||||
struct Agg {
|
||||
int a;
|
||||
long b;
|
||||
};
|
||||
|
||||
// EVAL-NOT: @_Z6retAggv()
|
||||
// EXPR: @_Z6retAggv()
|
||||
consteval Agg retAgg() {
|
||||
return {13, 17};
|
||||
}
|
||||
|
||||
long test_retAgg() {
|
||||
// EVAL-FN-LABEL: @_Z11test_retAggv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8
|
||||
// EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0
|
||||
// EVAL-FN-NEXT: store i32 13, i32* [[TMP0]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1
|
||||
// EVAL-FN-NEXT: store i64 17, i64* [[TMP1]], align 8
|
||||
// EVAL-FN-NEXT: store i64 17, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: ret i64 [[TMP2]]
|
||||
//
|
||||
long b = retAgg().b;
|
||||
return b;
|
||||
}
|
||||
|
||||
// EVAL-STATIC: @A = global %struct.Agg { i32 13, i64 17 }, align 8
|
||||
Agg A = retAgg();
|
||||
|
||||
// EVAL-NOT: @_Z9retRefAggv()
|
||||
// EXPR: @_Z9retRefAggv()
|
||||
consteval const Agg &retRefAgg() {
|
||||
const Agg &tmp = A;
|
||||
return A;
|
||||
}
|
||||
|
||||
long test_retRefAgg() {
|
||||
// EVAL-FN-LABEL: @_Z14test_retRefAggv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8
|
||||
// EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0
|
||||
// EVAL-FN-NEXT: store i32 13, i32* [[TMP0]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1
|
||||
// EVAL-FN-NEXT: store i64 17, i64* [[TMP1]], align 8
|
||||
// EVAL-FN-NEXT: store i64 17, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: ret i64 [[TMP2]]
|
||||
//
|
||||
long b = retAgg().b;
|
||||
return b;
|
||||
}
|
||||
|
||||
// EVAL-NOT: @_Z8is_constv()
|
||||
// EXPR: @_Z8is_constv()
|
||||
consteval Agg is_const() {
|
||||
return {5, 19 * __builtin_is_constant_evaluated()};
|
||||
}
|
||||
|
||||
long test_is_const() {
|
||||
// EVAL-FN-LABEL: @_Z13test_is_constv(
|
||||
// EVAL-FN-NEXT: entry:
|
||||
// EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8
|
||||
// EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0
|
||||
// EVAL-FN-NEXT: store i32 5, i32* [[TMP0]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1
|
||||
// EVAL-FN-NEXT: store i64 19, i64* [[TMP1]], align 8
|
||||
// EVAL-FN-NEXT: store i64 19, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8
|
||||
// EVAL-FN-NEXT: ret i64 [[TMP2]]
|
||||
//
|
||||
long b = is_const().b;
|
||||
return b;
|
||||
}
|
||||
|
||||
// EVAL-NOT: @_ZN7AggCtorC
|
||||
// EXPR: @_ZN7AggCtorC
|
||||
struct AggCtor {
|
||||
consteval AggCtor(int a = 3, long b = 5) : a(a * a), b(a * b) {}
|
||||
int a;
|
||||
long b;
|
||||
};
|
||||
|
||||
long test_AggCtor() {
|
||||
// CHECK-LABEL: @_Z12test_AggCtorv(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
|
||||
// CHECK-NEXT: [[C:%.*]] = alloca [[STRUCT_AGGCTOR:%.*]], align 8
|
||||
// CHECK-NEXT: store i32 2, i32* [[I]], align 4
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 0
|
||||
// CHECK-NEXT: store i32 4, i32* [[TMP0]], align 8
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 1
|
||||
// CHECK-NEXT: store i64 10, i64* [[TMP1]], align 8
|
||||
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 0
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 8
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64
|
||||
// CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 1
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[B]], align 8
|
||||
// CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], [[TMP3]]
|
||||
// CHECK-NEXT: ret i64 [[ADD]]
|
||||
//
|
||||
const int i = 2;
|
||||
AggCtor C(i);
|
||||
return C.a + C.b;
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// RUN: %clang_cc1 -std=c++2a -emit-llvm-only -Wno-unused-value %s -verify
|
||||
// RUN: %clang_cc1 -std=c++2a -fsyntax-only -Wno-unused-value %s -verify
|
||||
|
||||
typedef __SIZE_TYPE__ size_t;
|
||||
|
||||
|
|
Loading…
Reference in New Issue