From bcef4d238d113c61127575bf5dd0328f97bda9e9 Mon Sep 17 00:00:00 2001 From: Martin Sebor Date: Mon, 1 Aug 2022 16:44:53 -0600 Subject: [PATCH] [InstCombine] Correct strtol folding with nonnull endptr Reflect in the pointer's offset the length of the leading part of the consumed string preceding the first converted digit. Reviewed By: efriedma Differential Revision: https://reviews.llvm.org/D130912 --- .../lib/Transforms/Utils/SimplifyLibCalls.cpp | 12 +- llvm/test/Transforms/InstCombine/str-int-4.ll | 139 ++++++++++-------- llvm/test/Transforms/InstCombine/str-int-5.ll | 99 ++++++++----- 3 files changed, 147 insertions(+), 103 deletions(-) diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index 03087d8370d5..245f2d4e442a 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -89,10 +89,12 @@ static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr, // Fail for an invalid base (required by POSIX). return nullptr; + // Current offset into the original string to reflect in EndPtr. + size_t Offset = 0; // Strip leading whitespace. - for (unsigned i = 0; i != Str.size(); ++i) - if (!isSpace((unsigned char)Str[i])) { - Str = Str.substr(i); + for ( ; Offset != Str.size(); ++Offset) + if (!isSpace((unsigned char)Str[Offset])) { + Str = Str.substr(Offset); break; } @@ -108,6 +110,7 @@ static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr, if (Str.empty()) // Fail for a sign with nothing after it. return nullptr; + ++Offset; } // Set Max to the absolute value of the minimum (for signed), or @@ -127,6 +130,7 @@ static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr, return nullptr; Str = Str.drop_front(2); + Offset += 2; Base = 16; } else if (Base == 0) @@ -167,7 +171,7 @@ static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr, if (EndPtr) { // Store the pointer to the end. - Value *Off = B.getInt64(Str.size()); + Value *Off = B.getInt64(Offset + Str.size()); Value *StrBeg = CI->getArgOperand(0); Value *StrEnd = B.CreateInBoundsGEP(B.getInt8Ty(), StrBeg, Off, "endptr"); B.CreateStore(StrEnd, EndPtr); diff --git a/llvm/test/Transforms/InstCombine/str-int-4.ll b/llvm/test/Transforms/InstCombine/str-int-4.ll index 0de3808b3a4f..96ae9210817b 100644 --- a/llvm/test/Transforms/InstCombine/str-int-4.ll +++ b/llvm/test/Transforms/InstCombine/str-int-4.ll @@ -35,107 +35,118 @@ declare i64 @strtoll(i8*, i8**, i32) @x32max = constant [12 x i8] c" 0x7fffffff\00" @i32max_p1 = constant [12 x i8] c" 2147483648\00" -@ui32max = constant [12 x i8] c" 4294967295\00" -@ui32max_p1 = constant [12 x i8] c" 4294967296\00" +@endptr = external global i8* ; Exercise folding calls to 32-bit strtol. define void @fold_strtol(i32* %ps) { ; CHECK-LABEL: @fold_strtol( +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: store i32 -123, i32* [[PS:%.*]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 ; CHECK-NEXT: store i32 234, i32* [[PS1]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 2), i8** @endptr, align 8 ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 ; CHECK-NEXT: store i32 0, i32* [[PS2]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([3 x i8], [3 x i8]* @i9, i64 0, i64 2), i8** @endptr, align 8 ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 ; CHECK-NEXT: store i32 9, i32* [[PS3]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([3 x i8], [3 x i8]* @ia, i64 0, i64 2), i8** @endptr, align 8 ; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 ; CHECK-NEXT: store i32 10, i32* [[PS4]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([7 x i8], [7 x i8]* @i19azAZ, i64 0, i64 6), i8** @endptr, align 8 ; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 ; CHECK-NEXT: store i32 76095035, i32* [[PS5]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS6]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @mo32min, i64 0, i64 14), i8** @endptr, align 8 ; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS7]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @mx32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS8]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @mx32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS9]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max, i64 0, i64 11), i8** @endptr, align 8 ; CHECK-NEXT: [[PS10:%.*]] = getelementptr i32, i32* [[PS]], i64 10 ; CHECK-NEXT: store i32 2147483647, i32* [[PS10]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @x32max, i64 0, i64 11), i8** @endptr, align 8 ; CHECK-NEXT: [[PS11:%.*]] = getelementptr i32, i32* [[PS]], i64 11 ; CHECK-NEXT: store i32 2147483647, i32* [[PS11]], align 4 ; CHECK-NEXT: ret void ; ; Fold a valid sequence with leading POSIX whitespace and a minus to -123. %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 - %im123 = call i32 @strtol(i8* %pwsm123, i8** null, i32 10) + %im123 = call i32 @strtol(i8* %pwsm123, i8** @endptr, i32 10) %ps0 = getelementptr i32, i32* %ps, i32 0 store i32 %im123, i32* %ps0 ; Fold a valid sequence with leading POSIX whitespace and a plus to +234. %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 - %ip234 = call i32 @strtol(i8* %pwsp234, i8** null, i32 10) + %ip234 = call i32 @strtol(i8* %pwsp234, i8** @endptr, i32 10) %ps1 = getelementptr i32, i32* %ps, i32 1 store i32 %ip234, i32* %ps1 ; Fold " 0" in base 0 to verify correct base autodetection. %psi0 = getelementptr [3 x i8], [3 x i8]* @i0, i32 0, i32 0 - %i0 = call i32 @strtol(i8* %psi0, i8** null, i32 0) + %i0 = call i32 @strtol(i8* %psi0, i8** @endptr, i32 0) %ps2 = getelementptr i32, i32* %ps, i32 2 store i32 %i0, i32* %ps2 ; Fold " 9" in base 0 to verify correct base autodetection. %psi9 = getelementptr [3 x i8], [3 x i8]* @i9, i32 0, i32 0 - %i9 = call i32 @strtol(i8* %psi9, i8** null, i32 0) + %i9 = call i32 @strtol(i8* %psi9, i8** @endptr, i32 0) %ps3 = getelementptr i32, i32* %ps, i32 3 store i32 %i9, i32* %ps3 ; Fold " a" in base 16 to 10. %psia = getelementptr [3 x i8], [3 x i8]* @ia, i32 0, i32 0 - %ia = call i32 @strtol(i8* %psia, i8** null, i32 16) + %ia = call i32 @strtol(i8* %psia, i8** @endptr, i32 16) %ps4 = getelementptr i32, i32* %ps, i32 4 store i32 %ia, i32* %ps4 ; Fold "19azAZ" in base 36 to 76095035. %psi19azAZ = getelementptr [7 x i8], [7 x i8]* @i19azAZ, i32 0, i32 0 - %i19azAZ = call i32 @strtol(i8* %psi19azAZ, i8** null, i32 36) + %i19azAZ = call i32 @strtol(i8* %psi19azAZ, i8** @endptr, i32 36) %ps5 = getelementptr i32, i32* %ps, i32 5 store i32 %i19azAZ, i32* %ps5 ; Fold INT32_MIN. %psmin = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 - %min = call i32 @strtol(i8* %psmin, i8** null, i32 10) + %min = call i32 @strtol(i8* %psmin, i8** @endptr, i32 10) %ps6 = getelementptr i32, i32* %ps, i32 6 store i32 %min, i32* %ps6 ; Fold -INT32_MIN in octal. %psmo32min = getelementptr [15 x i8], [15 x i8]* @mo32min, i32 0, i32 0 - %mo32min = call i32 @strtol(i8* %psmo32min, i8** null, i32 0) + %mo32min = call i32 @strtol(i8* %psmo32min, i8** @endptr, i32 0) %ps7 = getelementptr i32, i32* %ps, i32 7 store i32 %mo32min, i32* %ps7 ; Fold -INT32_MIN in hex and base 0. %psmx32min = getelementptr [13 x i8], [13 x i8]* @mx32min, i32 0, i32 0 - %mx32min_0 = call i32 @strtol(i8* %psmx32min, i8** null, i32 0) + %mx32min_0 = call i32 @strtol(i8* %psmx32min, i8** @endptr, i32 0) %ps8 = getelementptr i32, i32* %ps, i32 8 store i32 %mx32min_0, i32* %ps8 ; Fold -INT32_MIN in hex and base 16. - %mx32min_16 = call i32 @strtol(i8* %psmx32min, i8** null, i32 16) + %mx32min_16 = call i32 @strtol(i8* %psmx32min, i8** @endptr, i32 16) %ps9 = getelementptr i32, i32* %ps, i32 9 store i32 %mx32min_16, i32* %ps9 ; Fold INT32_MAX. %psmax = getelementptr [12 x i8], [12 x i8]* @i32max, i32 0, i32 0 - %max = call i32 @strtol(i8* %psmax, i8** null, i32 10) + %max = call i32 @strtol(i8* %psmax, i8** @endptr, i32 10) %ps10 = getelementptr i32, i32* %ps, i32 10 store i32 %max, i32* %ps10 ; Fold INT32_MAX in hex. %psxmax = getelementptr [12 x i8], [12 x i8]* @x32max, i32 0, i32 0 - %xmax = call i32 @strtol(i8* %psxmax, i8** null, i32 0) + %xmax = call i32 @strtol(i8* %psxmax, i8** @endptr, i32 0) %ps11 = getelementptr i32, i32* %ps, i32 11 store i32 %xmax, i32* %ps11 @@ -147,48 +158,48 @@ define void @fold_strtol(i32* %ps) { define void @call_strtol(i32* %ps) { ; CHECK-LABEL: @call_strtol( -; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([13 x i8], [13 x i8]* @i32min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min_m1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: store i32 [[MINM1]], i32* [[PS:%.*]], align 4 -; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([12 x i8], [12 x i8]* @i32max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 ; CHECK-NEXT: store i32 [[MAXP1]], i32* [[PS1]], align 4 -; CHECK-NEXT: [[IPLUS:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @wsplus, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[IPLUS:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @wsplus, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 ; CHECK-NEXT: store i32 [[IPLUS]], i32* [[PS2]], align 4 -; CHECK-NEXT: [[IA:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @ia, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[IA:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @ia, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 ; CHECK-NEXT: store i32 [[IA]], i32* [[PS3]], align 4 -; CHECK-NEXT: [[I8:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @i8, i64 0, i64 0), i8** null, i32 8) +; CHECK-NEXT: [[I8:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @i8, i64 0, i64 0), i8** nonnull @endptr, i32 8) ; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 ; CHECK-NEXT: store i32 [[I8]], i32* [[PS4]], align 4 -; CHECK-NEXT: [[I0X:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @x0x, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[I0X:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @x0x, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 ; CHECK-NEXT: store i32 [[I0X]], i32* [[PS5]], align 4 -; CHECK-NEXT: [[IWSPWS0:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([5 x i8], [5 x i8]* @wsplusws0, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[IWSPWS0:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @wsplusws0, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 ; CHECK-NEXT: store i32 [[IWSPWS0]], i32* [[PS6]], align 4 -; CHECK-NEXT: [[I19AZAZ:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @i19azAZ, i64 0, i64 0), i8** null, i32 35) +; CHECK-NEXT: [[I19AZAZ:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @i19azAZ, i64 0, i64 0), i8** nonnull @endptr, i32 35) ; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 ; CHECK-NEXT: store i32 [[I19AZAZ]], i32* [[PS7]], align 4 -; CHECK-NEXT: [[O32MIN:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[O32MIN:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8 ; CHECK-NEXT: store i32 [[O32MIN]], i32* [[PS8]], align 4 -; CHECK-NEXT: [[X32MIN:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 0), i8** null, i32 0) +; CHECK-NEXT: [[X32MIN:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 0), i8** nonnull @endptr, i32 0) ; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9 ; CHECK-NEXT: store i32 [[X32MIN]], i32* [[PS9]], align 4 -; CHECK-NEXT: [[X32MIN_10:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[X32MIN_10:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS10:%.*]] = getelementptr i32, i32* [[PS]], i64 10 ; CHECK-NEXT: store i32 [[X32MIN_10]], i32* [[PS10]], align 4 -; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS11:%.*]] = getelementptr i32, i32* [[PS]], i64 11 ; CHECK-NEXT: store i32 [[NWS]], i32* [[PS11]], align 4 -; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS12:%.*]] = getelementptr i32, i32* [[PS]], i64 12 ; CHECK-NEXT: store i32 [[NWSP6]], i32* [[PS12]], align 4 -; CHECK-NEXT: [[I0B1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** null, i32 1) +; CHECK-NEXT: [[I0B1:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** nonnull @endptr, i32 1) ; CHECK-NEXT: [[PS13:%.*]] = getelementptr i32, i32* [[PS]], i64 13 ; CHECK-NEXT: store i32 [[I0B1]], i32* [[PS13]], align 4 -; CHECK-NEXT: [[I0B256:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** null, i32 256) +; CHECK-NEXT: [[I0B256:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @i0, i64 0, i64 0), i8** nonnull @endptr, i32 256) ; CHECK-NEXT: [[PS14:%.*]] = getelementptr i32, i32* [[PS]], i64 14 ; CHECK-NEXT: store i32 [[I0B256]], i32* [[PS14]], align 4 ; CHECK-NEXT: ret void @@ -196,90 +207,90 @@ define void @call_strtol(i32* %ps) { ; Do not fold the result of conversion that's less than INT32_MIN. %psminm1 = getelementptr [13 x i8], [13 x i8]* @i32min_m1, i32 0, i32 0 - %minm1 = call i32 @strtol(i8* %psminm1, i8** null, i32 10) + %minm1 = call i32 @strtol(i8* %psminm1, i8** @endptr, i32 10) %ps0 = getelementptr i32, i32* %ps, i32 0 store i32 %minm1, i32* %ps0 ; Do not fold the result of conversion that's greater than INT32_MAX. %psmaxp1 = getelementptr [12 x i8], [12 x i8]* @i32max_p1, i32 0, i32 0 - %maxp1 = call i32 @strtol(i8* %psmaxp1, i8** null, i32 10) + %maxp1 = call i32 @strtol(i8* %psmaxp1, i8** @endptr, i32 10) %ps1 = getelementptr i32, i32* %ps, i32 1 store i32 %maxp1, i32* %ps1 ; Do not fold " +". %psplus = getelementptr [3 x i8], [3 x i8]* @wsplus, i32 0, i32 0 - %iplus = call i32 @strtol(i8* %psplus, i8** null, i32 0) + %iplus = call i32 @strtol(i8* %psplus, i8** @endptr, i32 0) %ps2 = getelementptr i32, i32* %ps, i32 2 store i32 %iplus, i32* %ps2 ; Do not fold " a" in base 0. %psia = getelementptr [3 x i8], [3 x i8]* @ia, i32 0, i32 0 - %ia = call i32 @strtol(i8* %psia, i8** null, i32 0) + %ia = call i32 @strtol(i8* %psia, i8** @endptr, i32 0) %ps3 = getelementptr i32, i32* %ps, i32 3 store i32 %ia, i32* %ps3 ; Do not fold " 8" in base 8. %psi8 = getelementptr [3 x i8], [3 x i8]* @i8, i32 0, i32 0 - %i8 = call i32 @strtol(i8* %psi8, i8** null, i32 8) + %i8 = call i32 @strtol(i8* %psi8, i8** @endptr, i32 8) %ps4 = getelementptr i32, i32* %ps, i32 4 store i32 %i8, i32* %ps4 ; Do not fold the "0x" alone in base 0 that some implementations (e.g., ; BSD and Darwin) set errno to EINVAL for. %psx0x = getelementptr [3 x i8], [3 x i8]* @x0x, i32 0, i32 0 - %i0x = call i32 @strtol(i8* %psx0x, i8** null, i32 0) + %i0x = call i32 @strtol(i8* %psx0x, i8** @endptr, i32 0) %ps5 = getelementptr i32, i32* %ps, i32 5 store i32 %i0x, i32* %ps5 ; Do not fold " + 0". %pwspws0 = getelementptr [5 x i8], [5 x i8]* @wsplusws0, i32 0, i32 0 - %iwspws0 = call i32 @strtol(i8* %pwspws0, i8** null, i32 0) + %iwspws0 = call i32 @strtol(i8* %pwspws0, i8** @endptr, i32 0) %ps6 = getelementptr i32, i32* %ps, i32 6 store i32 %iwspws0, i32* %ps6 ; Do not fold "19azAZ" in base 35. %psi19azAZ = getelementptr [7 x i8], [7 x i8]* @i19azAZ, i32 0, i32 0 - %i19azAZ = call i32 @strtol(i8* %psi19azAZ, i8** null, i32 35) + %i19azAZ = call i32 @strtol(i8* %psi19azAZ, i8** @endptr, i32 35) %ps7 = getelementptr i32, i32* %ps, i32 7 store i32 %i19azAZ, i32* %ps7 ; Do not fold INT32_MIN in octal. %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 - %o32min = call i32 @strtol(i8* %pso32min, i8** null, i32 0) + %o32min = call i32 @strtol(i8* %pso32min, i8** @endptr, i32 0) %ps8 = getelementptr i32, i32* %ps, i32 8 store i32 %o32min, i32* %ps8 ; Do not fold INT32_MIN in hex. %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 - %x32min = call i32 @strtol(i8* %psx32min, i8** null, i32 0) + %x32min = call i32 @strtol(i8* %psx32min, i8** @endptr, i32 0) %ps9 = getelementptr i32, i32* %ps, i32 9 store i32 %x32min, i32* %ps9 ; Do not fold INT32_MIN in hex in base 10. - %x32min_10 = call i32 @strtol(i8* %psx32min, i8** null, i32 10) + %x32min_10 = call i32 @strtol(i8* %psx32min, i8** @endptr, i32 10) %ps10 = getelementptr i32, i32* %ps, i32 10 store i32 %x32min_10, i32* %ps10 ; Do not fold a sequence consisting of just whitespace characters. %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 - %nws = call i32 @strtol(i8* %psws, i8** null, i32 10) + %nws = call i32 @strtol(i8* %psws, i8** @endptr, i32 10) %ps11 = getelementptr i32, i32* %ps, i32 11 store i32 %nws, i32* %ps11 ; Do not fold an empty sequence. %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 - %nwsp6 = call i32 @strtol(i8* %pswsp6, i8** null, i32 10) + %nwsp6 = call i32 @strtol(i8* %pswsp6, i8** @endptr, i32 10) %ps12 = getelementptr i32, i32* %ps, i32 12 store i32 %nwsp6, i32* %ps12 ; Do not fold the invalid base 1. %psi0 = getelementptr [3 x i8], [3 x i8]* @i0, i32 0, i32 0 - %i0b1 = call i32 @strtol(i8* %psi0, i8** null, i32 1) + %i0b1 = call i32 @strtol(i8* %psi0, i8** @endptr, i32 1) %ps13 = getelementptr i32, i32* %ps, i32 13 store i32 %i0b1, i32* %ps13 ; Do not fold the invalid base 256. - %i0b256 = call i32 @strtol(i8* %psi0, i8** null, i32 256) + %i0b256 = call i32 @strtol(i8* %psi0, i8** @endptr, i32 256) %ps14 = getelementptr i32, i32* %ps, i32 14 store i32 %i0b256, i32* %ps14 @@ -301,36 +312,40 @@ define void @call_strtol(i32* %ps) { define void @fold_strtoll(i64* %ps) { ; CHECK-LABEL: @fold_strtoll( +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: store i64 -123, i64* [[PS:%.*]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 ; CHECK-NEXT: store i64 234, i64* [[PS1]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min, i64 0, i64 21), i8** @endptr, align 8 ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 ; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS2]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max, i64 0, i64 20), i8** @endptr, align 8 ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 ; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS3]], align 4 ; CHECK-NEXT: ret void ; ; Fold a valid sequence with leading POSIX whitespace and a minus to -123. %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 - %im123 = call i64 @strtoll(i8* %pwsm123, i8** null, i32 10) + %im123 = call i64 @strtoll(i8* %pwsm123, i8** @endptr, i32 10) %ps0 = getelementptr i64, i64* %ps, i32 0 store i64 %im123, i64* %ps0 ; Fold a valid sequence with leading POSIX whitespace and a plus to +234. %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 - %ip234 = call i64 @strtoll(i8* %pwsp234, i8** null, i32 10) + %ip234 = call i64 @strtoll(i8* %pwsp234, i8** @endptr, i32 10) %ps1 = getelementptr i64, i64* %ps, i32 1 store i64 %ip234, i64* %ps1 ; Fold INT64_MIN. %psmin = getelementptr [22 x i8], [22 x i8]* @i64min, i32 0, i32 0 - %min = call i64 @strtoll(i8* %psmin, i8** null, i32 10) + %min = call i64 @strtoll(i8* %psmin, i8** @endptr, i32 10) %ps2 = getelementptr i64, i64* %ps, i32 2 store i64 %min, i64* %ps2 ; Fold INT64_MAX. %psmax = getelementptr [21 x i8], [21 x i8]* @i64max, i32 0, i32 0 - %max = call i64 @strtoll(i8* %psmax, i8** null, i32 10) + %max = call i64 @strtoll(i8* %psmax, i8** @endptr, i32 10) %ps3 = getelementptr i64, i64* %ps, i32 3 store i64 %max, i64* %ps3 @@ -342,40 +357,40 @@ define void @fold_strtoll(i64* %ps) { define void @call_strtoll(i64* %ps) { ; CHECK-LABEL: @call_strtoll( -; CHECK-NEXT: [[MINM1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MINM1:%.*]] = call i64 @strtoll(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: store i64 [[MINM1]], i64* [[PS:%.*]], align 4 -; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([21 x i8], [21 x i8]* @i64max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoll(i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 ; CHECK-NEXT: store i64 [[MAXP1]], i64* [[PS1]], align 4 -; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoll(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 ; CHECK-NEXT: store i64 [[NWS]], i64* [[PS2]], align 4 -; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoll(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 ; CHECK-NEXT: store i64 [[NWSP6]], i64* [[PS3]], align 4 ; CHECK-NEXT: ret void ; ; Do not fold the result of conversion that's less than INT64_MIN. %psminm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 - %minm1 = call i64 @strtoll(i8* %psminm1, i8** null, i32 10) + %minm1 = call i64 @strtoll(i8* %psminm1, i8** @endptr, i32 10) %ps0 = getelementptr i64, i64* %ps, i32 0 store i64 %minm1, i64* %ps0 ; Do not fold the result of conversion that's greater than INT64_MAX. %psmaxp1 = getelementptr [21 x i8], [21 x i8]* @i64max_p1, i32 0, i32 0 - %maxp1 = call i64 @strtoll(i8* %psmaxp1, i8** null, i32 10) + %maxp1 = call i64 @strtoll(i8* %psmaxp1, i8** @endptr, i32 10) %ps1 = getelementptr i64, i64* %ps, i32 1 store i64 %maxp1, i64* %ps1 ; Do not fold a sequence consisting of just whitespace characters. %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 - %nws = call i64 @strtoll(i8* %psws, i8** null, i32 10) + %nws = call i64 @strtoll(i8* %psws, i8** @endptr, i32 10) %ps2 = getelementptr i64, i64* %ps, i32 2 store i64 %nws, i64* %ps2 ; Do not fold an empty sequence. %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 - %nwsp6 = call i64 @strtoll(i8* %pswsp6, i8** null, i32 10) + %nwsp6 = call i64 @strtoll(i8* %pswsp6, i8** @endptr, i32 10) %ps3 = getelementptr i64, i64* %ps, i32 3 store i64 %nwsp6, i64* %ps3 @@ -391,29 +406,29 @@ define void @call_strtoll(i64* %ps) { define void @call_strtol_trailing_space(i32* %ps) { ; CHECK-LABEL: @call_strtol_trailing_space( -; CHECK-NEXT: [[N1:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[N1:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS:%.*]], i64 1 ; CHECK-NEXT: store i32 [[N1]], i32* [[PS1]], align 4 -; CHECK-NEXT: [[N2:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 2), i8** null, i32 10) +; CHECK-NEXT: [[N2:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 2), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 ; CHECK-NEXT: store i32 [[N2]], i32* [[PS2]], align 4 -; CHECK-NEXT: [[N3:%.*]] = call i32 @strtol(i8* nocapture getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 4), i8** null, i32 10) +; CHECK-NEXT: [[N3:%.*]] = call i32 @strtol(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @i_1_2_3_, i64 0, i64 4), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 ; CHECK-NEXT: store i32 [[N3]], i32* [[PS3]], align 4 ; CHECK-NEXT: ret void ; %p1 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 0 - %n1 = call i32 @strtol(i8* %p1, i8** null, i32 10) + %n1 = call i32 @strtol(i8* %p1, i8** @endptr, i32 10) %ps1 = getelementptr i32, i32* %ps, i32 1 store i32 %n1, i32* %ps1 %p2 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 2 - %n2 = call i32 @strtol(i8* %p2, i8** null, i32 10) + %n2 = call i32 @strtol(i8* %p2, i8** @endptr, i32 10) %ps2 = getelementptr i32, i32* %ps, i32 2 store i32 %n2, i32* %ps2 %p3 = getelementptr [9 x i8], [9 x i8]* @i_1_2_3_, i32 0, i32 4 - %n3 = call i32 @strtol(i8* %p3, i8** null, i32 10) + %n3 = call i32 @strtol(i8* %p3, i8** @endptr, i32 10) %ps3 = getelementptr i32, i32* %ps, i32 3 store i32 %n3, i32* %ps3 diff --git a/llvm/test/Transforms/InstCombine/str-int-5.ll b/llvm/test/Transforms/InstCombine/str-int-5.ll index 87e3455a7971..140e49d262de 100644 --- a/llvm/test/Transforms/InstCombine/str-int-5.ll +++ b/llvm/test/Transforms/InstCombine/str-int-5.ll @@ -39,32 +39,46 @@ declare i64 @strtoull(i8*, i8**, i32) @x64max = constant [20 x i8] c" 0xffffffffffffffff\00" @ui64max_p1 = constant [22 x i8] c" 18446744073709551616\00" +@endptr = external global i8* + ; Exercise folding calls to 32-bit strtoul. define void @fold_strtoul(i32* %ps) { ; CHECK-LABEL: @fold_strtoul( +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: store i32 -123, i32* [[PS:%.*]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 ; CHECK-NEXT: store i32 234, i32* [[PS1]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min_m1, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 ; CHECK-NEXT: store i32 2147483647, i32* [[PS2]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS3]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 14), i8** @endptr, align 8 ; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS4]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @mo32min, i64 0, i64 14), i8** @endptr, align 8 ; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS5]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS6]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @mx32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS7]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max, i64 0, i64 11), i8** @endptr, align 8 ; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8 ; CHECK-NEXT: store i32 2147483647, i32* [[PS8]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @mX01, i64 0, i64 5), i8** @endptr, align 8 ; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9 ; CHECK-NEXT: store i32 -1, i32* [[PS9]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max_p1, i64 0, i64 11), i8** @endptr, align 8 ; CHECK-NEXT: [[PS10:%.*]] = getelementptr i32, i32* [[PS]], i64 10 ; CHECK-NEXT: store i32 -2147483648, i32* [[PS10]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max, i64 0, i64 11), i8** @endptr, align 8 ; CHECK-NEXT: [[PS11:%.*]] = getelementptr i32, i32* [[PS]], i64 11 ; CHECK-NEXT: store i32 -1, i32* [[PS11]], align 4 ; CHECK-NEXT: ret void @@ -72,73 +86,73 @@ define void @fold_strtoul(i32* %ps) { ; Fold a valid sequence with leading POSIX whitespace and a minus to ; (uint32_t)-123. %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 - %im123 = call i32 @strtoul(i8* %pwsm123, i8** null, i32 10) + %im123 = call i32 @strtoul(i8* %pwsm123, i8** @endptr, i32 10) %ps0 = getelementptr i32, i32* %ps, i32 0 store i32 %im123, i32* %ps0 ; Fold a valid sequence with leading POSIX whitespace and a plus to +234. %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 - %ip234 = call i32 @strtoul(i8* %pwsp234, i8** null, i32 10) + %ip234 = call i32 @strtoul(i8* %pwsp234, i8** @endptr, i32 10) %ps1 = getelementptr i32, i32* %ps, i32 1 store i32 %ip234, i32* %ps1 ; Fold the result of conversion that's equal to INT32_MIN - 1. %psi32minm1 = getelementptr [13 x i8], [13 x i8]* @i32min_m1, i32 0, i32 0 - %i32min32m1 = call i32 @strtoul(i8* %psi32minm1, i8** null, i32 10) + %i32min32m1 = call i32 @strtoul(i8* %psi32minm1, i8** @endptr, i32 10) %ps2 = getelementptr i32, i32* %ps, i32 2 store i32 %i32min32m1, i32* %ps2 ; Fold INT32_MIN. %psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 - %i32min = call i32 @strtoul(i8* %psi32min, i8** null, i32 10) + %i32min = call i32 @strtoul(i8* %psi32min, i8** @endptr, i32 10) %ps3 = getelementptr i32, i32* %ps, i32 3 store i32 %i32min, i32* %ps3 ; Fold INT32_MIN in octal. %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 - %o32min = call i32 @strtoul(i8* %pso32min, i8** null, i32 0) + %o32min = call i32 @strtoul(i8* %pso32min, i8** @endptr, i32 0) %ps4 = getelementptr i32, i32* %ps, i32 4 store i32 %o32min, i32* %ps4 ; Fold -INT32_MIN in octal. %psmo32min = getelementptr [15 x i8], [15 x i8]* @mo32min, i32 0, i32 0 - %mo32min = call i32 @strtoul(i8* %psmo32min, i8** null, i32 0) + %mo32min = call i32 @strtoul(i8* %psmo32min, i8** @endptr, i32 0) %ps5 = getelementptr i32, i32* %ps, i32 5 store i32 %mo32min, i32* %ps5 ; Fold INT32_MIN in hex. %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 - %x32min = call i32 @strtoul(i8* %psx32min, i8** null, i32 0) + %x32min = call i32 @strtoul(i8* %psx32min, i8** @endptr, i32 0) %ps6 = getelementptr i32, i32* %ps, i32 6 store i32 %x32min, i32* %ps6 ; Fold -INT32_MIN in hex. %psmx32min = getelementptr [13 x i8], [13 x i8]* @mx32min, i32 0, i32 0 - %mx32min = call i32 @strtoul(i8* %psmx32min, i8** null, i32 0) + %mx32min = call i32 @strtoul(i8* %psmx32min, i8** @endptr, i32 0) %ps7 = getelementptr i32, i32* %ps, i32 7 store i32 %x32min, i32* %ps7 ; Fold INT32_MAX. %psi32max = getelementptr [12 x i8], [12 x i8]* @i32max, i32 0, i32 0 - %i32max = call i32 @strtoul(i8* %psi32max, i8** null, i32 10) + %i32max = call i32 @strtoul(i8* %psi32max, i8** @endptr, i32 10) %ps8 = getelementptr i32, i32* %ps, i32 8 store i32 %i32max, i32* %ps8 ; Fold -0x01. %psmX01 = getelementptr [6 x i8], [6 x i8]* @mX01, i32 0, i32 0 - %mX01 = call i32 @strtoul(i8* %psmX01, i8** null, i32 0) + %mX01 = call i32 @strtoul(i8* %psmX01, i8** @endptr, i32 0) %ps9 = getelementptr i32, i32* %ps, i32 9 store i32 %mX01, i32* %ps9 ; Fold the result of conversion that's equal to INT32_MAX + 1. %psmax32p1 = getelementptr [12 x i8], [12 x i8]* @i32max_p1, i32 0, i32 0 - %i32max32p1 = call i32 @strtoul(i8* %psmax32p1, i8** null, i32 10) + %i32max32p1 = call i32 @strtoul(i8* %psmax32p1, i8** @endptr, i32 10) %ps10 = getelementptr i32, i32* %ps, i32 10 store i32 %i32max32p1, i32* %ps10 ; Fold UINT32_MAX. %psmax = getelementptr [12 x i8], [12 x i8]* @ui32max, i32 0, i32 0 - %ui32max = call i32 @strtoul(i8* %psmax, i8** null, i32 10) + %ui32max = call i32 @strtoul(i8* %psmax, i8** @endptr, i32 10) %ps11 = getelementptr i32, i32* %ps, i32 11 store i32 %ui32max, i32* %ps11 @@ -150,15 +164,15 @@ define void @fold_strtoul(i32* %ps) { define void @call_strtoul(i32* %ps) { ; CHECK-LABEL: @call_strtoul( -; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: store i32 [[MINM1]], i32* [[PS:%.*]], align 4 -; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1 ; CHECK-NEXT: store i32 [[MAXP1]], i32* [[PS1]], align 4 -; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2 ; CHECK-NEXT: store i32 [[NWS]], i32* [[PS2]], align 4 -; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3 ; CHECK-NEXT: store i32 [[NWSP6]], i32* [[PS3]], align 4 ; CHECK-NEXT: ret void @@ -166,27 +180,27 @@ define void @call_strtoul(i32* %ps) { ; Do not fold the result of conversion that overflows uint32_t. This ; could be folded into a constant provided errnor were set to ERANGE. %psminm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 - %minm1 = call i32 @strtoul(i8* %psminm1, i8** null, i32 10) + %minm1 = call i32 @strtoul(i8* %psminm1, i8** @endptr, i32 10) %ps0 = getelementptr i32, i32* %ps, i32 0 store i32 %minm1, i32* %ps0 ; Do not fold the result of conversion that's greater than UINT32_MAX ; (same logic as above applies here). %psui32maxp1 = getelementptr [12 x i8], [12 x i8]* @ui32max_p1, i32 0, i32 0 - %maxp1 = call i32 @strtoul(i8* %psui32maxp1, i8** null, i32 10) + %maxp1 = call i32 @strtoul(i8* %psui32maxp1, i8** @endptr, i32 10) %ps1 = getelementptr i32, i32* %ps, i32 1 store i32 %maxp1, i32* %ps1 ; Do not fold a sequence consisting of just whitespace characters. %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 - %nws = call i32 @strtoul(i8* %psws, i8** null, i32 10) + %nws = call i32 @strtoul(i8* %psws, i8** @endptr, i32 10) %ps2 = getelementptr i32, i32* %ps, i32 2 store i32 %nws, i32* %ps2 ; Do not fold an empty sequence. The library call may or may not end up ; storing EINVAL in errno. %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 - %nwsp6 = call i32 @strtoul(i8* %pswsp6, i8** null, i32 10) + %nwsp6 = call i32 @strtoul(i8* %pswsp6, i8** @endptr, i32 10) %ps3 = getelementptr i32, i32* %ps, i32 3 store i32 %nwsp6, i32* %ps3 @@ -198,25 +212,36 @@ define void @call_strtoul(i32* %ps) { define void @fold_strtoull(i64* %ps) { ; CHECK-LABEL: @fold_strtoull( +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: store i64 -123, i64* [[PS:%.*]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8 ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1 ; CHECK-NEXT: store i64 234, i64* [[PS1]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 21), i8** @endptr, align 8 ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 ; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS2]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 ; CHECK-NEXT: store i64 -2147483648, i64* [[PS3]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 14), i8** @endptr, align 8 ; CHECK-NEXT: [[PS4:%.*]] = getelementptr i64, i64* [[PS]], i64 4 ; CHECK-NEXT: store i64 2147483648, i64* [[PS4]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 12), i8** @endptr, align 8 ; CHECK-NEXT: [[PS5:%.*]] = getelementptr i64, i64* [[PS]], i64 5 ; CHECK-NEXT: store i64 2147483648, i64* [[PS5]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min, i64 0, i64 21), i8** @endptr, align 8 ; CHECK-NEXT: [[PS6:%.*]] = getelementptr i64, i64* [[PS]], i64 6 ; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS6]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max, i64 0, i64 20), i8** @endptr, align 8 ; CHECK-NEXT: [[PS7:%.*]] = getelementptr i64, i64* [[PS]], i64 7 ; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS7]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max_p1, i64 0, i64 20), i8** @endptr, align 8 ; CHECK-NEXT: [[PS8:%.*]] = getelementptr i64, i64* [[PS]], i64 8 ; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS8]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max, i64 0, i64 21), i8** @endptr, align 8 ; CHECK-NEXT: [[PS9:%.*]] = getelementptr i64, i64* [[PS]], i64 9 ; CHECK-NEXT: store i64 -1, i64* [[PS9]], align 4 +; CHECK-NEXT: store i8* getelementptr inbounds ([20 x i8], [20 x i8]* @x64max, i64 0, i64 19), i8** @endptr, align 8 ; CHECK-NEXT: [[PS10:%.*]] = getelementptr i64, i64* [[PS]], i64 10 ; CHECK-NEXT: store i64 -1, i64* [[PS10]], align 4 ; CHECK-NEXT: ret void @@ -224,67 +249,67 @@ define void @fold_strtoull(i64* %ps) { ; Fold a valid sequence with leading POSIX whitespace and a minus to ; (uint64_t)-123. %pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0 - %im123 = call i64 @strtoull(i8* %pwsm123, i8** null, i32 10) + %im123 = call i64 @strtoull(i8* %pwsm123, i8** @endptr, i32 10) %ps0 = getelementptr i64, i64* %ps, i32 0 store i64 %im123, i64* %ps0 ; Fold a valid sequence with leading POSIX whitespace and a plus to +234. %pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0 - %ip234 = call i64 @strtoull(i8* %pwsp234, i8** null, i32 10) + %ip234 = call i64 @strtoull(i8* %pwsp234, i8** @endptr, i32 10) %ps1 = getelementptr i64, i64* %ps, i32 1 store i64 %ip234, i64* %ps1 ; Fold the result of conversion that's equal to INT64_MIN - 1. %psi64minm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0 - %i64min32m1 = call i64 @strtoull(i8* %psi64minm1, i8** null, i32 10) + %i64min32m1 = call i64 @strtoull(i8* %psi64minm1, i8** @endptr, i32 10) %ps2 = getelementptr i64, i64* %ps, i32 2 store i64 %i64min32m1, i64* %ps2 ; Fold INT32_MIN. %psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0 - %i32min = call i64 @strtoull(i8* %psi32min, i8** null, i32 10) + %i32min = call i64 @strtoull(i8* %psi32min, i8** @endptr, i32 10) %ps3 = getelementptr i64, i64* %ps, i32 3 store i64 %i32min, i64* %ps3 ; Fold INT32_MIN in octal. %pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0 - %o32min = call i64 @strtoull(i8* %pso32min, i8** null, i32 0) + %o32min = call i64 @strtoull(i8* %pso32min, i8** @endptr, i32 0) %ps4 = getelementptr i64, i64* %ps, i32 4 store i64 %o32min, i64* %ps4 ; Fold INT32_MIN in hex. %psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0 - %x32min = call i64 @strtoull(i8* %psx32min, i8** null, i32 0) + %x32min = call i64 @strtoull(i8* %psx32min, i8** @endptr, i32 0) %ps5 = getelementptr i64, i64* %ps, i32 5 store i64 %x32min, i64* %ps5 ; Fold INT64_MIN. %psi64min = getelementptr [22 x i8], [22 x i8]* @i64min, i32 0, i32 0 - %i64min = call i64 @strtoull(i8* %psi64min, i8** null, i32 10) + %i64min = call i64 @strtoull(i8* %psi64min, i8** @endptr, i32 10) %ps6 = getelementptr i64, i64* %ps, i32 6 store i64 %i64min, i64* %ps6 ; Fold INT64_MAX. %psi64max = getelementptr [21 x i8], [21 x i8]* @i64max, i32 0, i32 0 - %i64max = call i64 @strtoull(i8* %psi64max, i8** null, i32 10) + %i64max = call i64 @strtoull(i8* %psi64max, i8** @endptr, i32 10) %ps7 = getelementptr i64, i64* %ps, i32 7 store i64 %i64max, i64* %ps7 ; Fold the result of conversion that's equal to INT64_MAX + 1 to INT64_MIN. %psmax32p1 = getelementptr [21 x i8], [21 x i8]* @i64max_p1, i32 0, i32 0 - %i64max32p1 = call i64 @strtoull(i8* %psmax32p1, i8** null, i32 10) + %i64max32p1 = call i64 @strtoull(i8* %psmax32p1, i8** @endptr, i32 10) %ps8 = getelementptr i64, i64* %ps, i32 8 store i64 %i64max32p1, i64* %ps8 ; Fold UINT64_MAX. %psmax = getelementptr [22 x i8], [22 x i8]* @ui64max, i32 0, i32 0 - %ui64max = call i64 @strtoull(i8* %psmax, i8** null, i32 10) + %ui64max = call i64 @strtoull(i8* %psmax, i8** @endptr, i32 10) %ps9 = getelementptr i64, i64* %ps, i32 9 store i64 %ui64max, i64* %ps9 ; Fold UINT64_MAX in hex. %psxmax = getelementptr [20 x i8], [20 x i8]* @x64max, i32 0, i32 0 - %x64max = call i64 @strtoull(i8* %psxmax, i8** null, i32 0) + %x64max = call i64 @strtoull(i8* %psxmax, i8** @endptr, i32 0) %ps10 = getelementptr i64, i64* %ps, i32 10 store i64 %x64max, i64* %ps10 @@ -296,13 +321,13 @@ define void @fold_strtoull(i64* %ps) { define void @call_strtoull(i64* %ps) { ; CHECK-LABEL: @call_strtoull( -; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max_p1, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS:%.*]], i64 1 ; CHECK-NEXT: store i64 [[MAXP1]], i64* [[PS1]], align 4 -; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** null, i32 10) +; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2 ; CHECK-NEXT: store i64 [[NWS]], i64* [[PS2]], align 4 -; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(i8* nocapture getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** null, i32 10) +; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10) ; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3 ; CHECK-NEXT: store i64 [[NWSP6]], i64* [[PS3]], align 4 ; CHECK-NEXT: ret void @@ -310,20 +335,20 @@ define void @call_strtoull(i64* %ps) { ; Do not fold the result of conversion that overflows uint64_t. This ; could be folded into a constant provided errnor were set to ERANGE. %psui64maxp1 = getelementptr [22 x i8], [22 x i8]* @ui64max_p1, i32 0, i32 0 - %maxp1 = call i64 @strtoull(i8* %psui64maxp1, i8** null, i32 10) + %maxp1 = call i64 @strtoull(i8* %psui64maxp1, i8** @endptr, i32 10) %ps1 = getelementptr i64, i64* %ps, i32 1 store i64 %maxp1, i64* %ps1 ; Do not fold a sequence consisting of just whitespace characters. %psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0 - %nws = call i64 @strtoull(i8* %psws, i8** null, i32 10) + %nws = call i64 @strtoull(i8* %psws, i8** @endptr, i32 10) %ps2 = getelementptr i64, i64* %ps, i32 2 store i64 %nws, i64* %ps2 ; Do not fold an empty sequence. The library call may or may not end up ; storing EINVAL in errno. %pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6 - %nwsp6 = call i64 @strtoull(i8* %pswsp6, i8** null, i32 10) + %nwsp6 = call i64 @strtoull(i8* %pswsp6, i8** @endptr, i32 10) %ps3 = getelementptr i64, i64* %ps, i32 3 store i64 %nwsp6, i64* %ps3