Automatically generate aix32-cc-abi-vaarg.ll . NFC

This commit is contained in:
Amaury Séchet 2022-05-07 13:04:40 +00:00
parent 8650f05c97
commit f4183441d4
1 changed files with 225 additions and 207 deletions

View File

@ -1,45 +1,57 @@
; RUN: llc -O2 -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -verify-machineinstrs < %s | \
; RUN: FileCheck --check-prefix=32BIT %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O2 -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=ASM32 %s
; RUN: llc -O2 -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -verify-machineinstrs < %s | FileCheck --check-prefix=32BIT %s
; RUN: llc -O2 -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec \
; RUN: -mtriple powerpc-ibm-aix-xcoff < %s | \
; RUN: FileCheck --check-prefix=ASM32 %s
define i32 @int_va_arg(i32 %a, ...) local_unnamed_addr {
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to i32*
%3 = load i32, i32* %2, align 4
%add = add nsw i32 %3, %a
%argp.cur2 = load i8*, i8** %arg2, align 4
%argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 4
store i8* %argp.next3, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur2 to i32*
%5 = load i32, i32* %4, align 4
%mul = shl i32 %5, 1
%add4 = add nsw i32 %add, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret i32 %add4
}
declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
declare void @llvm.va_start(i8*)
declare void @llvm.va_copy(i8*, i8*)
declare void @llvm.va_end(i8*)
declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
define i32 @int_va_arg(i32 %a, ...) local_unnamed_addr {
; ASM32-LABEL: int_va_arg:
; ASM32: # %bb.0: # %entry
; ASM32-NEXT: addi 11, 1, 28
; ASM32-NEXT: stw 4, 28(1)
; ASM32-NEXT: addi 4, 1, 32
; ASM32-NEXT: stw 6, 36(1)
; ASM32-NEXT: stw 11, -4(1)
; ASM32-NEXT: stw 11, -8(1)
; ASM32-NEXT: stw 4, -4(1)
; ASM32-NEXT: lwz 6, 28(1)
; ASM32-NEXT: stw 4, -8(1)
; ASM32-NEXT: add 3, 6, 3
; ASM32-NEXT: lwz 4, 28(1)
; ASM32-NEXT: slwi 4, 4, 1
; ASM32-NEXT: stw 7, 40(1)
; ASM32-NEXT: add 3, 3, 4
; ASM32-NEXT: stw 8, 44(1)
; ASM32-NEXT: stw 9, 48(1)
; ASM32-NEXT: stw 10, 52(1)
; ASM32-NEXT: stw 5, 32(1)
; ASM32-NEXT: blr
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to i32*
%3 = load i32, i32* %2, align 4
%add = add nsw i32 %3, %a
%argp.cur2 = load i8*, i8** %arg2, align 4
%argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 4
store i8* %argp.next3, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur2 to i32*
%5 = load i32, i32* %4, align 4
%mul = shl i32 %5, 1
%add4 = add nsw i32 %add, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret i32 %add4
}
; 32BIT-LABEL: name: int_va_arg
; 32BIT-LABEL: liveins:
@ -80,57 +92,66 @@
; 32BIT-DAG: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
; 32BIT-DAG: BLR implicit $lr, implicit $rm, implicit $r3
; ASM32-LABEL: .int_va_arg:
; set up fixed stack frame for incoming va_args r4->r10
; ASM32-DAG: stw 4, 28(1)
; ASM32-DAG: stw 5, 32(1)
; ASM32-DAG: stw 6, 36(1)
; ASM32-DAG: stw 7, 40(1)
; ASM32-DAG: stw 8, 44(1)
; ASM32-DAG: stw 9, 48(1)
; ASM32-DAG: stw 10, 52(1)
; load of arg1 from fixed stack offset
; ASM32-DAG: lwz [[ARG1:[0-9]+]], 28(1)
; va_copy load of arg2 from fixed stack offset
; ASM32-DAG: lwz [[ARG2:[0-9]+]], 28(1)
; ASM32-DAG: blr
declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
declare void @llvm.va_start(i8*)
declare void @llvm.va_copy(i8*, i8*)
declare void @llvm.va_end(i8*)
declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
define i32 @int_stack_va_arg(i32 %one, i32 %two, i32 %three, i32 %four, i32 %five, i32 %six, i32 %seven, i32 %eight, ...) local_unnamed_addr {
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%add = add nsw i32 %two, %one
%add2 = add nsw i32 %add, %three
%add3 = add nsw i32 %add2, %four
%add4 = add nsw i32 %add3, %five
%add5 = add nsw i32 %add4, %six
%add6 = add nsw i32 %add5, %seven
%add7 = add nsw i32 %add6, %eight
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to i32*
%3 = load i32, i32* %2, align 4
%add8 = add nsw i32 %add7, %3
%argp.cur9 = load i8*, i8** %arg2, align 4
%argp.next10 = getelementptr inbounds i8, i8* %argp.cur9, i32 4
store i8* %argp.next10, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur9 to i32*
%5 = load i32, i32* %4, align 4
%mul = shl i32 %5, 1
%add11 = add nsw i32 %add8, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret i32 %add11
}
define i32 @int_stack_va_arg(i32 %one, i32 %two, i32 %three, i32 %four, i32 %five, i32 %six, i32 %seven, i32 %eight, ...) local_unnamed_addr {
; ASM32-LABEL: int_stack_va_arg:
; ASM32: # %bb.0: # %entry
; ASM32-NEXT: add 3, 4, 3
; ASM32-NEXT: lwz 4, 56(1)
; ASM32-NEXT: li 11, 4
; ASM32-NEXT: add 3, 3, 5
; ASM32-NEXT: addi 12, 1, 56
; ASM32-NEXT: add 3, 3, 6
; ASM32-NEXT: rlwimi 12, 11, 0, 29, 29
; ASM32-NEXT: stw 12, -4(1)
; ASM32-NEXT: add 3, 3, 7
; ASM32-NEXT: add 3, 3, 8
; ASM32-NEXT: add 3, 3, 9
; ASM32-NEXT: add 3, 3, 10
; ASM32-NEXT: add 3, 3, 4
; ASM32-NEXT: slwi 4, 4, 1
; ASM32-NEXT: add 3, 3, 4
; ASM32-NEXT: blr
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%add = add nsw i32 %two, %one
%add2 = add nsw i32 %add, %three
%add3 = add nsw i32 %add2, %four
%add4 = add nsw i32 %add3, %five
%add5 = add nsw i32 %add4, %six
%add6 = add nsw i32 %add5, %seven
%add7 = add nsw i32 %add6, %eight
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to i32*
%3 = load i32, i32* %2, align 4
%add8 = add nsw i32 %add7, %3
%argp.cur9 = load i8*, i8** %arg2, align 4
%argp.next10 = getelementptr inbounds i8, i8* %argp.cur9, i32 4
store i8* %argp.next10, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur9 to i32*
%5 = load i32, i32* %4, align 4
%mul = shl i32 %5, 1
%add11 = add nsw i32 %add8, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret i32 %add11
}
; 32BIT-LABEL: name: int_stack_va_arg
; 32BIT-LABEL: liveins:
@ -167,49 +188,56 @@
; 32BIT-DAG: renamable $r11 = LI 4
; 32BIT-DAG: BLR implicit $lr, implicit $rm, implicit $r3
; ASM32-LABEL: .int_stack_va_arg:
; ASM32-DAG: add 3, 4, 3
; ASM32-DAG: add 3, 3, 5
; ASM32-DAG: add 3, 3, 6
; ASM32-DAG: add 3, 3, 7
; ASM32-DAG: add 3, 3, 8
; ASM32-DAG: add 3, 3, 9
; ASM32-DAG: add 3, 3, 10
; ASM32-DAG: lwz [[ARG1:[0-9]+]], 56(1)
; ASM32-DAG: li [[ARG2:[0-9]+]], [[ARG1]]
; ASM32-DAG: add 3, 3, [[ARG1:[0-9]+]]
; ASM32-DAG: add 3, 3, [[ARG2:[0-9]+]]
; ASM32-DAG: blr
define double @double_va_arg(double %a, ...) local_unnamed_addr {
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 8
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to double*
%3 = load double, double* %2, align 4
%add = fadd double %3, %a
%argp.cur2 = load i8*, i8** %arg2, align 4
%argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 8
store i8* %argp.next3, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur2 to double*
%5 = load double, double* %4, align 4
%mul = fmul double %5, 2.000000e+00
%add4 = fadd double %add, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret double %add4
}
define double @double_va_arg(double %a, ...) local_unnamed_addr {
; ASM32-LABEL: double_va_arg:
; ASM32: # %bb.0: # %entry
; ASM32-NEXT: stw 6, -12(1)
; ASM32-NEXT: addi 3, 1, 32
; ASM32-NEXT: stw 5, -16(1)
; ASM32-NEXT: lfd 0, -16(1)
; ASM32-NEXT: stw 6, -20(1)
; ASM32-NEXT: fadd 0, 0, 1
; ASM32-NEXT: stw 5, -24(1)
; ASM32-NEXT: lfd 1, -24(1)
; ASM32-NEXT: fadd 1, 1, 1
; ASM32-NEXT: stw 7, 40(1)
; ASM32-NEXT: fadd 1, 0, 1
; ASM32-NEXT: stw 5, 32(1)
; ASM32-NEXT: stw 6, 36(1)
; ASM32-NEXT: stw 8, 44(1)
; ASM32-NEXT: stw 9, 48(1)
; ASM32-NEXT: stw 10, 52(1)
; ASM32-NEXT: stw 3, -4(1)
; ASM32-NEXT: stw 3, -8(1)
; ASM32-NEXT: blr
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%argp.cur = load i8*, i8** %arg1, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 8
store i8* %argp.next, i8** %arg1, align 4
%2 = bitcast i8* %argp.cur to double*
%3 = load double, double* %2, align 4
%add = fadd double %3, %a
%argp.cur2 = load i8*, i8** %arg2, align 4
%argp.next3 = getelementptr inbounds i8, i8* %argp.cur2, i32 8
store i8* %argp.next3, i8** %arg2, align 4
%4 = bitcast i8* %argp.cur2 to double*
%5 = load double, double* %4, align 4
%mul = fmul double %5, 2.000000e+00
%add4 = fadd double %add, %mul
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.va_end(i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret double %add4
}
; 32BIT-LABEL: name: double_va_arg
; 32BIT-LABEL: liveins:
@ -241,59 +269,69 @@
; 32BIT-DAG: STW killed renamable $r3, 0, %stack.1.arg2 :: (store (s32) into %ir.1)
; 32BIT-DAG: BLR implicit $lr, implicit $rm, implicit $f1
; ASM32-LABEL: .double_va_arg:
; ASM32-DAG: stw 5, 32(1)
; ASM32-DAG: stw 6, 36(1)
; ASM32-DAG: stw 7, 40(1)
; ASM32-DAG: stw 8, 44(1)
; ASM32-DAG: stw 9, 48(1)
; ASM32-DAG: stw 10, 52(1)
; ASM32-DAG: stw [[ARG1A:[0-9]+]], -12(1)
; ASM32-DAG: stw [[ARG1B:[0-9]+]], -16(1)
; ASM32-DAG: stw [[ARG2A:[0-9]+]], -20(1)
; ASM32-DAG: stw [[ARG2B:[0-9]+]], -24(1)
; ASM32-DAG: lfd [[ARG1:[0-9]+]], -16(1)
; ASM32-DAG: fadd 0, [[ARG1]], 1
; ASM32-DAG: fadd 1, 1, 1
; ASM32-DAG: lfd [[ARG2:[0-9]+]], -24(1)
; ASM32-DAG: fadd 1, 0, [[ARG2]]
; ASM32-DAG: blr
define double @double_stack_va_arg(double %one, double %two, double %three, double %four, double %five, double %six, double %seven, double %eight, double %nine, double %ten, double %eleven, double %twelve, double %thirteen, ...) local_unnamed_addr {
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%add = fadd double %one, %two
%add2 = fadd double %add, %three
%add3 = fadd double %add2, %four
%add4 = fadd double %add3, %five
%add5 = fadd double %add4, %six
%add6 = fadd double %add5, %seven
%add7 = fadd double %add6, %eight
%add8 = fadd double %add7, %nine
%add9 = fadd double %add8, %ten
%add10 = fadd double %add9, %eleven
%add11 = fadd double %add10, %twelve
%add12 = fadd double %add11, %thirteen
%2 = bitcast i8** %arg1 to double**
%argp.cur1 = load double*, double** %2, align 4
%3 = load double, double* %argp.cur1, align 4
%add13 = fadd double %add12, %3
%4 = bitcast i8** %arg2 to double**
%argp.cur142 = load double*, double** %4, align 4
%5 = load double, double* %argp.cur142, align 4
%mul = fmul double %5, 2.000000e+00
%add16 = fadd double %add13, %mul
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret double %add16
}
define double @double_stack_va_arg(double %one, double %two, double %three, double %four, double %five, double %six, double %seven, double %eight, double %nine, double %ten, double %eleven, double %twelve, double %thirteen, ...) local_unnamed_addr {
; ASM32-LABEL: double_stack_va_arg:
; ASM32: # %bb.0: # %entry
; ASM32-NEXT: fadd 0, 1, 2
; ASM32-NEXT: addi 3, 1, 128
; ASM32-NEXT: lwz 4, 132(1)
; ASM32-NEXT: fadd 0, 0, 3
; ASM32-NEXT: stw 3, -4(1)
; ASM32-NEXT: fadd 0, 0, 4
; ASM32-NEXT: lwz 3, 128(1)
; ASM32-NEXT: fadd 0, 0, 5
; ASM32-NEXT: stw 3, -16(1)
; ASM32-NEXT: fadd 0, 0, 6
; ASM32-NEXT: stw 4, -12(1)
; ASM32-NEXT: fadd 0, 0, 7
; ASM32-NEXT: lfd 1, -16(1)
; ASM32-NEXT: fadd 0, 0, 8
; ASM32-NEXT: stw 3, -24(1)
; ASM32-NEXT: fadd 0, 0, 9
; ASM32-NEXT: stw 4, -20(1)
; ASM32-NEXT: fadd 0, 0, 10
; ASM32-NEXT: fadd 0, 0, 11
; ASM32-NEXT: fadd 0, 0, 12
; ASM32-NEXT: fadd 0, 0, 13
; ASM32-NEXT: fadd 0, 0, 1
; ASM32-NEXT: lfd 1, -24(1)
; ASM32-NEXT: fadd 1, 1, 1
; ASM32-NEXT: fadd 1, 0, 1
; ASM32-NEXT: blr
entry:
%arg1 = alloca i8*, align 4
%arg2 = alloca i8*, align 4
%0 = bitcast i8** %arg1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
%1 = bitcast i8** %arg2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
call void @llvm.va_start(i8* nonnull %0)
call void @llvm.va_copy(i8* nonnull %1, i8* nonnull %0)
%add = fadd double %one, %two
%add2 = fadd double %add, %three
%add3 = fadd double %add2, %four
%add4 = fadd double %add3, %five
%add5 = fadd double %add4, %six
%add6 = fadd double %add5, %seven
%add7 = fadd double %add6, %eight
%add8 = fadd double %add7, %nine
%add9 = fadd double %add8, %ten
%add10 = fadd double %add9, %eleven
%add11 = fadd double %add10, %twelve
%add12 = fadd double %add11, %thirteen
%2 = bitcast i8** %arg1 to double**
%argp.cur1 = load double*, double** %2, align 4
%3 = load double, double* %argp.cur1, align 4
%add13 = fadd double %add12, %3
%4 = bitcast i8** %arg2 to double**
%argp.cur142 = load double*, double** %4, align 4
%5 = load double, double* %argp.cur142, align 4
%mul = fmul double %5, 2.000000e+00
%add16 = fadd double %add13, %mul
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret double %add16
}
; 32BIT-LABEL: name: double_stack_va_arg
; 32BIT-LABEL: liveins:
@ -349,23 +387,3 @@
; 32BIT-DAG: renamable $f1 = nofpexcept FADD killed renamable $f2, renamable $f2, implicit $rm
; 32BIT-DAG: BLR implicit $lr, implicit $rm, implicit $f1
; ASM32-LABEL: .double_stack_va_arg:
; ASM32-DAG: fadd 0, 1, 2
; ASM32-DAG: fadd 0, 0, 3
; ASM32-DAG: fadd 0, 0, 4
; ASM32-DAG: fadd 0, 0, 5
; ASM32-DAG: fadd 0, 0, 6
; ASM32-DAG: fadd 0, 0, 7
; ASM32-DAG: fadd 0, 0, 8
; ASM32-DAG: fadd 0, 0, 9
; ASM32-DAG: fadd 0, 0, 10
; ASM32-DAG: fadd 0, 0, 11
; ASM32-DAG: fadd 0, 0, 12
; ASM32-DAG: fadd 0, 0, 13
; ASM32-DAG: lwz [[ARG1:[0-9]+]], 128(1)
; ASM32-DAG: lwz [[ARG2:[0-9]+]], 132(1)
; ASM32-DAG: lfd [[ARG1:[0-9]+]], -16(1)
; ASM32-DAG: lfd [[ARG2:[0-9]+]], -24(1)
; ASM32-DAG: fadd 0, 0, [[ARG1]]
; ASM32-DAG: fadd [[ARG1]], 0, [[ARG2]]
; ASM32-DAG: blr