2014-06-27 21:04:12 +08:00
|
|
|
; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
|
|
|
|
|
|
|
|
; Due to a bug in resolveFrameIndex we ended up with invalid addresses
|
|
|
|
; containing a base register 0. Verify that this no longer happens.
|
|
|
|
; CHECK-NOT: (0)
|
|
|
|
|
|
|
|
target datalayout = "E-m:e-i64:64-n32:64"
|
|
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
|
|
|
|
%struct.S1998 = type { [2 x i32*], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
|
|
|
|
%struct.anon = type { [16 x double], i32, i16, i32, [3 x i8], [6 x i8], [4 x i32], i8 }
|
|
|
|
|
|
|
|
@info = global %struct.Info zeroinitializer, align 8
|
|
|
|
@fails = global i32 0, align 4
|
|
|
|
@intarray = global [256 x i32] zeroinitializer, align 4
|
|
|
|
@s1998 = global %struct.S1998 zeroinitializer, align 16
|
|
|
|
@a1998 = external global [5 x %struct.S1998]
|
|
|
|
|
|
|
|
define void @test1998() {
|
|
|
|
entry:
|
|
|
|
%i = alloca i32, align 4
|
|
|
|
%j = alloca i32, align 4
|
|
|
|
%tmp = alloca i32, align 4
|
|
|
|
%agg.tmp = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp111 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp112 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp113 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp114 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp115 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp116 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp117 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp118 = alloca %struct.S1998, align 16
|
|
|
|
%agg.tmp119 = alloca %struct.S1998, align 16
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i1 false)
|
|
|
|
call void @llvm.memset.p0i8.i64(i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i1 false)
|
|
|
|
call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i1 false)
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 2), align 8
|
|
|
|
store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 3), align 8
|
|
|
|
store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 4), align 8
|
|
|
|
store i64 5168, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 6), align 8
|
|
|
|
store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
|
|
|
|
store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 9), align 8
|
|
|
|
store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 10), align 8
|
|
|
|
%0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
|
2014-06-27 21:04:12 +08:00
|
|
|
%sub = sub i64 %0, 1
|
2015-03-14 02:20:45 +08:00
|
|
|
%and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
|
2014-06-27 21:04:12 +08:00
|
|
|
%tobool = icmp ne i64 %and, 0
|
|
|
|
br i1 %tobool, label %if.then, label %if.end
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
2015-02-28 05:17:42 +08:00
|
|
|
%1 = load i32, i32* @fails, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc = add nsw i32 %1, 1
|
|
|
|
store i32 %inc, i32* @fails, align 4
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.then, %entry
|
|
|
|
store i32 0, i32* %i, align 4
|
|
|
|
store i32 0, i32* %j, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%2 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom = sext i32 %2 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
|
|
|
|
store i8* bitcast (i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%3 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom1 = sext i32 %3 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx2, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%4 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom3 = sext i32 %4 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx4, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
|
|
|
|
store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%5 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc5 = add nsw i32 %5, 1
|
|
|
|
store i32 %inc5, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%6 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom6 = sext i32 %6 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
|
|
|
|
store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%7 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom8 = sext i32 %7 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx9, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%8 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom10 = sext i32 %8 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx11, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1), align 8
|
|
|
|
store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%9 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc12 = add nsw i32 %9, 1
|
|
|
|
store i32 %inc12, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%10 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom13 = sext i32 %10 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
|
|
|
|
store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%11 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom15 = sext i32 %11 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx16, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%12 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom17 = sext i32 %12 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx18, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2), align 8
|
|
|
|
store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%13 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc19 = add nsw i32 %13, 1
|
|
|
|
store i32 %inc19, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%14 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom20 = sext i32 %14 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
|
|
|
|
store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%15 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom22 = sext i32 %15 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx23, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%16 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom24 = sext i32 %16 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 16, i64* %arrayidx25, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3), align 16
|
|
|
|
store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
|
2015-02-28 05:17:42 +08:00
|
|
|
%17 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc26 = add nsw i32 %17, 1
|
|
|
|
store i32 %inc26, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%18 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom27 = sext i32 %18 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
|
|
|
|
store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%19 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom29 = sext i32 %19 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 2, i64* %arrayidx30, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%20 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom31 = sext i32 %20 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 2, i64* %arrayidx32, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i16 -15897, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4), align 2
|
|
|
|
store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
|
2015-02-28 05:17:42 +08:00
|
|
|
%21 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc33 = add nsw i32 %21, 1
|
|
|
|
store i32 %inc33, i32* %i, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 -419541644, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 5), align 4
|
|
|
|
store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%22 = load i32, i32* %j, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc34 = add nsw i32 %22, 1
|
|
|
|
store i32 %inc34, i32* %j, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%23 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom35 = sext i32 %23 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
|
|
|
|
store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%24 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom37 = sext i32 %24 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx38, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%25 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom39 = sext i32 %25 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 8, i64* %arrayidx40, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
|
|
|
|
store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%26 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc41 = add nsw i32 %26, 1
|
|
|
|
store i32 %inc41, i32* %i, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
%bf.load = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear = and i32 %bf.load, 7
|
|
|
|
%bf.set = or i32 %bf.clear, 16
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
|
|
|
|
%bf.load42 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear43 = and i32 %bf.load42, 7
|
|
|
|
%bf.set44 = or i32 %bf.clear43, 24
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%27 = load i32, i32* %j, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc45 = add nsw i32 %27, 1
|
|
|
|
store i32 %inc45, i32* %j, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
%bf.load46 = load i16, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear47 = and i16 %bf.load46, 127
|
2015-03-14 02:20:45 +08:00
|
|
|
store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
|
|
|
|
%bf.load48 = load i16, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear49 = and i16 %bf.load48, 127
|
2015-03-14 02:20:45 +08:00
|
|
|
store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%28 = load i32, i32* %j, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc50 = add nsw i32 %28, 1
|
|
|
|
store i32 %inc50, i32* %j, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
%bf.load51 = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear52 = and i32 %bf.load51, 63
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
|
|
|
|
%bf.load53 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear54 = and i32 %bf.load53, 63
|
|
|
|
%bf.set55 = or i32 %bf.clear54, 64
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%29 = load i32, i32* %j, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc56 = add nsw i32 %29, 1
|
|
|
|
store i32 %inc56, i32* %j, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
%bf.load57 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear58 = and i24 %bf.load57, 63
|
2015-03-14 02:20:45 +08:00
|
|
|
store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
|
|
|
|
%bf.load59 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%bf.clear60 = and i24 %bf.load59, 63
|
2015-03-14 02:20:45 +08:00
|
|
|
store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%30 = load i32, i32* %j, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc61 = add nsw i32 %30, 1
|
|
|
|
store i32 %inc61, i32* %j, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%31 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom62 = sext i32 %31 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
|
|
|
|
store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%32 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom64 = sext i32 %32 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx65, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%33 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom66 = sext i32 %33 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx67, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8 -83, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
|
|
|
|
store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%34 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc68 = add nsw i32 %34, 1
|
|
|
|
store i32 %inc68, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%35 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom69 = sext i32 %35 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
|
|
|
|
store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%36 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom71 = sext i32 %36 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx72, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%37 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom73 = sext i32 %37 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx74, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8 34, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
|
|
|
|
store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%38 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc75 = add nsw i32 %38, 1
|
|
|
|
store i32 %inc75, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%39 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom76 = sext i32 %39 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
|
|
|
|
store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%40 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom78 = sext i32 %40 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 4, i64* %arrayidx79, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%41 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom80 = sext i32 %41 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 4, i64* %arrayidx81, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 -3, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
|
|
|
|
store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%42 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc82 = add nsw i32 %42, 1
|
|
|
|
store i32 %inc82, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%43 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom83 = sext i32 %43 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
|
|
|
|
store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%44 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom85 = sext i32 %44 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx86, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%45 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom87 = sext i32 %45 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx88, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8 106, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
|
|
|
|
store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%46 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc89 = add nsw i32 %46, 1
|
|
|
|
store i32 %inc89, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%47 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom90 = sext i32 %47 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
|
|
|
|
store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%48 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom92 = sext i32 %48 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 2, i64* %arrayidx93, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%49 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom94 = sext i32 %49 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 2, i64* %arrayidx95, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i16 29665, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7), align 2
|
|
|
|
store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
|
2015-02-28 05:17:42 +08:00
|
|
|
%50 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc96 = add nsw i32 %50, 1
|
|
|
|
store i32 %inc96, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%51 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom97 = sext i32 %51 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
|
|
|
|
store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%52 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom99 = sext i32 %52 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx100, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%53 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom101 = sext i32 %53 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 1, i64* %arrayidx102, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8 52, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), align 1
|
|
|
|
store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%54 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc103 = add nsw i32 %54, 1
|
|
|
|
store i32 %inc103, i32* %i, align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%55 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom104 = sext i32 %55 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
|
|
|
|
store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%56 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom106 = sext i32 %56 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 4, i64* %arrayidx107, align 8
|
2015-02-28 05:17:42 +08:00
|
|
|
%57 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%idxprom108 = sext i32 %57 to i64
|
2015-03-14 02:20:45 +08:00
|
|
|
%arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
|
2014-06-27 21:04:12 +08:00
|
|
|
store i64 4, i64* %arrayidx109, align 8
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 -54118453, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9), align 4
|
|
|
|
store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%58 = load i32, i32* %i, align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%inc110 = add nsw i32 %58, 1
|
|
|
|
store i32 %inc110, i32* %i, align 4
|
|
|
|
store i32 %inc110, i32* %tmp
|
2015-02-28 05:17:42 +08:00
|
|
|
%59 = load i32, i32* %tmp
|
|
|
|
%60 = load i32, i32* %i, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %60, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 0), align 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%61 = load i32, i32* %j, align 4
|
2015-03-14 02:20:45 +08:00
|
|
|
store i32 %61, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 1), align 4
|
2014-06-27 21:04:12 +08:00
|
|
|
%62 = bitcast %struct.S1998* %agg.tmp111 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%63 = bitcast %struct.S1998* %agg.tmp112 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
|
2015-03-14 02:20:45 +08:00
|
|
|
call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
|
2014-06-27 21:04:12 +08:00
|
|
|
call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp)
|
|
|
|
%64 = bitcast %struct.S1998* %agg.tmp113 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%65 = bitcast %struct.S1998* %agg.tmp114 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%66 = bitcast %struct.S1998* %agg.tmp115 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
|
2014-06-27 21:04:12 +08:00
|
|
|
%67 = bitcast %struct.S1998* %agg.tmp116 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%68 = bitcast %struct.S1998* %agg.tmp117 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %68, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%69 = bitcast %struct.S1998* %agg.tmp118 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
|
2014-06-27 21:04:12 +08:00
|
|
|
%70 = bitcast %struct.S1998* %agg.tmp119 to i8*
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
|
2014-06-27 21:04:12 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
Change memcpy/memset/memmove to have dest and source alignments.
Note, this was reviewed (and more details are in) http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
These intrinsics currently have an explicit alignment argument which is
required to be a constant integer. It represents the alignment of the
source and dest, and so must be the minimum of those.
This change allows source and dest to each have their own alignments
by using the alignment attribute on their arguments. The alignment
argument itself is removed.
There are a few places in the code for which the code needs to be
checked by an expert as to whether using only src/dest alignment is
safe. For those places, they currently take the minimum of src/dest
alignments which matches the current behaviour.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false)
will now read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false)
For out of tree owners, I was able to strip alignment from calls using sed by replacing:
(call.*llvm\.memset.*)i32\ [0-9]*\,\ i1 false\)
with:
$1i1 false)
and similarly for memmove and memcpy.
I then added back in alignment to test cases which needed it.
A similar commit will be made to clang which actually has many differences in alignment as now
IRBuilder can generate different source/dest alignments on calls.
In IRBuilder itself, a new argument was added. Instead of calling:
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, /* isVolatile */ false)
you now call
CreateMemCpy(Dst, Src, getInt64(Size), DstAlign, SrcAlign, /* isVolatile */ false)
There is a temporary class (IntegerAlignment) which takes the source alignment and rejects
implicit conversion from bool. This is to prevent isVolatile here from passing its default
parameter to the source alignment.
Note, changes in future can now be made to codegen. I didn't change anything here, but this
change should enable better memcpy code sequences.
Reviewed by Hal Finkel.
llvm-svn: 253511
2015-11-19 06:17:24 +08:00
|
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
|
|
|
|
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
|
2014-06-27 21:04:12 +08:00
|
|
|
|
|
|
|
declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16)
|
|
|
|
declare void @check1998va(i32 signext, ...)
|
|
|
|
declare void @checkx1998(%struct.S1998* byval align 16 %arg)
|
|
|
|
|