Add support for __builtin_{add,sub,mul}_overflow.

Patch by David Grayson!

llvm-svn: 251651
This commit is contained in:
John McCall 2015-10-29 20:48:01 +00:00
parent b25423525c
commit 03107a4ef0
7 changed files with 382 additions and 3 deletions

View File

@ -1679,17 +1679,20 @@ an example of their usage:
errorcode_t security_critical_application(...) {
unsigned x, y, result;
...
if (__builtin_umul_overflow(x, y, &result))
if (__builtin_mul_overflow(x, y, &result))
return kErrorCodeHackers;
...
use_multiply(result);
...
}
A complete enumeration of the builtins are:
Clang provides the following checked arithmetic builtins:
.. code-block:: c
bool __builtin_add_overflow (type1 x, type2 y, type3 *sum);
bool __builtin_sub_overflow (type1 x, type2 y, type3 *diff);
bool __builtin_mul_overflow (type1 x, type2 y, type3 *prod);
bool __builtin_uadd_overflow (unsigned x, unsigned y, unsigned *sum);
bool __builtin_uaddl_overflow (unsigned long x, unsigned long y, unsigned long *sum);
bool __builtin_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *sum);
@ -1709,6 +1712,21 @@ A complete enumeration of the builtins are:
bool __builtin_smull_overflow (long x, long y, long *prod);
bool __builtin_smulll_overflow(long long x, long long y, long long *prod);
Each builtin performs the specified mathematical operation on the
first two arguments and stores the result in the third argument. If
possible, the result will be equal to mathematically-correct result
and the builtin will return 0. Otherwise, the builtin will return
1 and the result will be equal to the unique value that is equivalent
to the mathematically-correct result modulo two raised to the *k*
power, where *k* is the number of bits in the result type. The
behavior of these builtins is well-defined for all argument values.
The first three builtins work generically for operands of any integer type,
including boolean types. The operands need not have the same type as each
other, or as the result. The other builtins may implicitly promote or
convert their operands before performing the operation.
Query for this feature with ``__has_builtin(__builtin_add_overflow)``, etc.
.. _langext-__c11_atomic:

View File

@ -1217,6 +1217,9 @@ BUILTIN(__builtin_subcl, "ULiULiCULiCULiCULi*", "n")
BUILTIN(__builtin_subcll, "ULLiULLiCULLiCULLiCULLi*", "n")
// Checked Arithmetic Builtins for Security.
BUILTIN(__builtin_add_overflow, "v.", "nt")
BUILTIN(__builtin_sub_overflow, "v.", "nt")
BUILTIN(__builtin_mul_overflow, "v.", "nt")
BUILTIN(__builtin_uadd_overflow, "bUiCUiCUi*", "n")
BUILTIN(__builtin_uaddl_overflow, "bULiCULiCULi*", "n")
BUILTIN(__builtin_uaddll_overflow, "bULLiCULLiCULLi*", "n")

View File

@ -6260,6 +6260,12 @@ def warn_atomic_op_has_invalid_memory_order : Warning<
"memory order argument to atomic operation is invalid">,
InGroup<DiagGroup<"atomic-memory-ordering">>;
def err_overflow_builtin_must_be_int : Error<
"operand argument to overflow builtin must be an integer (%0 invalid)">;
def err_overflow_builtin_must_be_ptr_int : Error<
"result argument to overflow builtin must be a pointer "
"to a non-const integer (%0 invalid)">;
def err_atomic_load_store_uses_lib : Error<
"atomic %select{load|store}0 requires runtime support that is not "
"available for this target">;

View File

@ -279,6 +279,50 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
namespace {
struct WidthAndSignedness {
unsigned Width;
bool Signed;
};
}
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
}
// Given one or more integer types, this function produces an integer type that
// encompasses them: any value in one of the given types could be expressed in
// the encompassing type.
static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
assert(Types.size() > 0 && "Empty list of types.");
// If any of the given types is signed, we must return a signed type.
bool Signed = false;
for (const auto &Type : Types) {
Signed |= Type.Signed;
}
// The encompassing type must have a width greater than or equal to the width
// of the specified types. Aditionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
unsigned Width = 0;
for (const auto &Type : Types) {
unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
if (Width < MinWidth) {
Width = MinWidth;
}
}
return {Width, Signed};
}
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
@ -1606,6 +1650,88 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow: {
const clang::Expr *LeftArg = E->getArg(0);
const clang::Expr *RightArg = E->getArg(1);
const clang::Expr *ResultArg = E->getArg(2);
clang::QualType ResultQTy =
ResultArg->getType()->castAs<PointerType>()->getPointeeType();
WidthAndSignedness LeftInfo =
getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
WidthAndSignedness RightInfo =
getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
WidthAndSignedness ResultInfo =
getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
WidthAndSignedness EncompassingInfo =
EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
llvm::Type *EncompassingLLVMTy =
llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_add_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::sadd_with_overflow
: llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_sub_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::ssub_with_overflow
: llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_mul_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::smul_with_overflow
: llvm::Intrinsic::umul_with_overflow;
break;
}
llvm::Value *Left = EmitScalarExpr(LeftArg);
llvm::Value *Right = EmitScalarExpr(RightArg);
Address ResultPtr = EmitPointerWithAlignment(ResultArg);
// Extend each operand to the encompassing type.
Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
// Perform the operation on the extended values.
llvm::Value *Overflow, *Result;
Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
if (EncompassingInfo.Width > ResultInfo.Width) {
// The encompassing type is wider than the result type, so we need to
// truncate it.
llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
// To see if the truncation caused an overflow, we will extend
// the result and then compare it to the original result.
llvm::Value *ResultTruncExt = Builder.CreateIntCast(
ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
llvm::Value *TruncationOverflow =
Builder.CreateICmpNE(Result, ResultTruncExt);
Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
Result = ResultTrunc;
}
// Finally, store the result using the pointer.
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
return RValue::get(Overflow);
}
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
@ -1635,7 +1761,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown security overflow builtin id.");
default: llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:

View File

@ -112,6 +112,39 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 3))
return true;
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
Expr *Arg = TheCall->getArg(I);
QualType Ty = Arg->getType();
if (!Ty->isIntegerType()) {
S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg->getSourceRange();
return true;
}
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
Expr *Arg = TheCall->getArg(2);
QualType Ty = Arg->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
!PtrTy->getPointeeType().isConstQualified())) {
S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int)
<< Ty << Arg->getSourceRange();
return true;
}
}
return false;
}
static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
CallExpr *TheCall, unsigned SizeIdx,
unsigned DstSizeIdx) {
@ -457,6 +490,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
if (SemaBuiltinOverflow(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete:
if (!getLangOpts().CPlusPlus) {

View File

@ -11,6 +11,171 @@ extern unsigned long long UnsignedLongLongErrorCode;
extern int IntErrorCode;
extern long LongErrorCode;
extern long long LongLongErrorCode;
void overflowed(void);
unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) {
// CHECK-LABEL: define i32 @test_add_overflow_uint_uint_uint
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
unsigned r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
int test_add_overflow_int_int_int(int x, int y) {
// CHECK-LABEL: define i32 @test_add_overflow_int_int_int
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
int r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) {
// CHECK-LABEL: define i32 @test_sub_overflow_uint_uint_uint
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
unsigned r;
if (__builtin_sub_overflow(x, y, &r))
overflowed();
return r;
}
int test_sub_overflow_int_int_int(int x, int y) {
// CHECK-LABEL: define i32 @test_sub_overflow_int_int_int
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
int r;
if (__builtin_sub_overflow(x, y, &r))
overflowed();
return r;
}
unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) {
// CHECK-LABEL: define i32 @test_mul_overflow_uint_uint_uint
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
unsigned r;
if (__builtin_mul_overflow(x, y, &r))
overflowed();
return r;
}
int test_mul_overflow_int_int_int(int x, int y) {
// CHECK-LABEL: define i32 @test_mul_overflow_int_int_int
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
int r;
if (__builtin_mul_overflow(x, y, &r))
overflowed();
return r;
}
int test_add_overflow_uint_int_int(unsigned x, int y) {
// CHECK-LABEL: define i32 @test_add_overflow_uint_int_int
// CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33
// CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33
// CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]])
// CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0
// CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1
// CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32
// CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33
// CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]]
// CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
// CHECK: store i32 [[QT]], i32*
// CHECK: br i1 [[C3]]
int r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
_Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) {
// CHECK-LABEL: define {{.*}} i1 @test_add_overflow_uint_uint_bool
// CHECK-NOT: ext
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1
// CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32
// CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]]
// CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
// CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8
// CHECK: store i8 [[QT2]], i8*
// CHECK: br i1 [[C3]]
_Bool r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) {
// CHECK-LABEL: define i32 @test_add_overflow_bool_bool_uint
// CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]])
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: store i32 [[Q]], i32*
// CHECK: br i1 [[C]]
unsigned r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
_Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) {
// CHECK-LABEL: define {{.*}} i1 @test_add_overflow_bool_bool_bool
// CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1
// CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8
// CHECK: store i8 [[QT2]], i8*
// CHECK: br i1 [[C]]
_Bool r;
if (__builtin_add_overflow(x, y, &r))
overflowed();
return r;
}
int test_add_overflow_volatile(int x, int y) {
// CHECK-LABEL: define i32 @test_add_overflow_volatile
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
// CHECK: store volatile i32 [[Q]], i32*
// CHECK: br i1 [[C]]
volatile int result;
if (__builtin_add_overflow(x, y, &result))
overflowed();
return result;
}
unsigned test_uadd_overflow(unsigned x, unsigned y) {
// CHECK: @test_uadd_overflow

View File

@ -0,0 +1,22 @@
// RUN: %clang_cc1 -fsyntax-only -verify %s
#if __has_feature(__builtin_add_overflow)
#warning defined as expected
// expected-warning@-1 {{defined as expected}}
#endif
void test(void) {
unsigned r;
const char * c;
float f;
const unsigned q;
__builtin_add_overflow(); // expected-error {{too few arguments to function call, expected 3, have 0}}
__builtin_add_overflow(1, 1, 1, 1); // expected-error {{too many arguments to function call, expected 3, have 4}}
__builtin_add_overflow(c, 1, &r); // expected-error {{operand argument to overflow builtin must be an integer ('const char *' invalid)}}
__builtin_add_overflow(1, c, &r); // expected-error {{operand argument to overflow builtin must be an integer ('const char *' invalid)}}
__builtin_add_overflow(1, 1, 3); // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('int' invalid)}}
__builtin_add_overflow(1, 1, &f); // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('float *' invalid)}}
__builtin_add_overflow(1, 1, &q); // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('const unsigned int *' invalid)}}
}