forked from OSchip/llvm-project
[AArch64] Simplify BTI/PAC-RET module flags
These module flags use the Min merge behavior with a default value of zero, so we don't need to emit them if zero. Reviewed By: danielkiss Differential Revision: https://reviews.llvm.org/D130145
This commit is contained in:
parent
9891bb2302
commit
de1b5c9145
|
@ -795,18 +795,17 @@ void CodeGenModule::Release() {
|
|||
Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
|
||||
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
|
||||
Arch == llvm::Triple::aarch64_be) {
|
||||
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
|
||||
LangOpts.BranchTargetEnforcement);
|
||||
|
||||
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address",
|
||||
LangOpts.hasSignReturnAddress());
|
||||
|
||||
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
|
||||
LangOpts.isSignReturnAddressScopeAll());
|
||||
|
||||
getModule().addModuleFlag(llvm::Module::Min,
|
||||
"sign-return-address-with-bkey",
|
||||
!LangOpts.isSignReturnAddressWithAKey());
|
||||
if (LangOpts.BranchTargetEnforcement)
|
||||
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
|
||||
1);
|
||||
if (LangOpts.hasSignReturnAddress())
|
||||
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
|
||||
if (LangOpts.isSignReturnAddressScopeAll())
|
||||
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
|
||||
1);
|
||||
if (!LangOpts.isSignReturnAddressWithAKey())
|
||||
getModule().addModuleFlag(llvm::Module::Min,
|
||||
"sign-return-address-with-bkey", 1);
|
||||
}
|
||||
|
||||
if (!CodeGenOpts.MemoryProfileOutput.empty()) {
|
||||
|
|
|
@ -5,7 +5,7 @@ struct foo { unsigned long long x[8]; };
|
|||
|
||||
// CHECK-LABEL: @load(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(i8* [[ADDR:%.*]]) #[[ATTR1:[0-9]+]], !srcloc !6
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(i8* [[ADDR:%.*]]) #[[ATTR1:[0-9]+]], !srcloc !2
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.foo* [[OUTPUT:%.*]] to i512*
|
||||
// CHECK-NEXT: store i512 [[TMP0]], i512* [[TMP1]], align 8
|
||||
// CHECK-NEXT: ret void
|
||||
|
@ -19,7 +19,7 @@ void load(struct foo *output, void *addr)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.foo* [[INPUT:%.*]] to i512*
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load i512, i512* [[TMP0]], align 8
|
||||
// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[TMP1]], i8* [[ADDR:%.*]]) #[[ATTR1]], !srcloc !7
|
||||
// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[TMP1]], i8* [[ADDR:%.*]]) #[[ATTR1]], !srcloc !3
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
void store(const struct foo *input, void *addr)
|
||||
|
@ -29,28 +29,28 @@ void store(const struct foo *input, void *addr)
|
|||
|
||||
// CHECK-LABEL: @store2(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[IN:%.*]], align 4, !tbaa [[TBAA8:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[IN:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]]
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 1
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[TMP1]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 4
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV5:%.*]] = sext i32 [[TMP2]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 16
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV8:%.*]] = sext i32 [[TMP3]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 25
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[TMP4]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 36
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV14:%.*]] = sext i32 [[TMP5]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 49
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV17:%.*]] = sext i32 [[TMP6]] to i64
|
||||
// CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 64
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX19]], align 4, !tbaa [[TBAA8]]
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX19]], align 4, !tbaa [[TBAA4]]
|
||||
// CHECK-NEXT: [[CONV20:%.*]] = sext i32 [[TMP7]] to i64
|
||||
// CHECK-NEXT: [[S_SROA_10_0_INSERT_EXT:%.*]] = zext i64 [[CONV20]] to i512
|
||||
// CHECK-NEXT: [[S_SROA_10_0_INSERT_SHIFT:%.*]] = shl nuw i512 [[S_SROA_10_0_INSERT_EXT]], 448
|
||||
|
@ -74,7 +74,7 @@ void store(const struct foo *input, void *addr)
|
|||
// CHECK-NEXT: [[S_SROA_0_0_INSERT_EXT:%.*]] = zext i64 [[CONV]] to i512
|
||||
// CHECK-NEXT: [[S_SROA_0_0_INSERT_MASK:%.*]] = or i512 [[S_SROA_4_0_INSERT_MASK]], [[S_SROA_4_0_INSERT_SHIFT]]
|
||||
// CHECK-NEXT: [[S_SROA_0_0_INSERT_INSERT:%.*]] = or i512 [[S_SROA_0_0_INSERT_MASK]], [[S_SROA_0_0_INSERT_EXT]]
|
||||
// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[S_SROA_0_0_INSERT_INSERT]], i8* [[ADDR:%.*]]) #[[ATTR1]], !srcloc !12
|
||||
// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[S_SROA_0_0_INSERT_INSERT]], i8* [[ADDR:%.*]]) #[[ATTR1]], !srcloc !8
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
void store2(int *in, void *addr)
|
||||
|
|
|
@ -21,35 +21,35 @@ uint64_t status;
|
|||
// CHECK-C-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8
|
||||
// CHECK-C-NEXT: [[TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8
|
||||
// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8
|
||||
// CHECK-C-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
|
||||
// CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !6
|
||||
// CHECK-C-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !6
|
||||
// CHECK-C-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META2:![0-9]+]])
|
||||
// CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !2
|
||||
// CHECK-C-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !2
|
||||
// CHECK-C-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[TMP]], i32 0, i32 0
|
||||
// CHECK-C-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0
|
||||
// CHECK-C-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !6
|
||||
// CHECK-C-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !2
|
||||
// CHECK-C-NEXT: [[TMP3:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 0
|
||||
// CHECK-C-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 1
|
||||
// CHECK-C-NEXT: [[TMP5:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 1
|
||||
// CHECK-C-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 2
|
||||
// CHECK-C-NEXT: [[TMP7:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 2
|
||||
// CHECK-C-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 3
|
||||
// CHECK-C-NEXT: [[TMP9:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 3
|
||||
// CHECK-C-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP10:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 4
|
||||
// CHECK-C-NEXT: [[TMP11:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 4
|
||||
// CHECK-C-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 5
|
||||
// CHECK-C-NEXT: [[TMP13:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 5
|
||||
// CHECK-C-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP14:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 6
|
||||
// CHECK-C-NEXT: [[TMP15:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 6
|
||||
// CHECK-C-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP16:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 7
|
||||
// CHECK-C-NEXT: [[TMP17:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 7
|
||||
// CHECK-C-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !6
|
||||
// CHECK-C-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !2
|
||||
// CHECK-C-NEXT: [[TMP18:%.*]] = bitcast %struct.data512_t* [[TMP]] to i8*
|
||||
// CHECK-C-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast (%struct.data512_t* @val to i8*), i8* align 8 [[TMP18]], i64 64, i1 false)
|
||||
// CHECK-C-NEXT: ret void
|
||||
|
@ -59,35 +59,35 @@ uint64_t status;
|
|||
// CHECK-CXX-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8
|
||||
// CHECK-CXX-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8
|
||||
// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8
|
||||
// CHECK-CXX-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
|
||||
// CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !6
|
||||
// CHECK-CXX-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !6
|
||||
// CHECK-CXX-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META2:![0-9]+]])
|
||||
// CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !2
|
||||
// CHECK-CXX-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !2
|
||||
// CHECK-CXX-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[REF_TMP]], i32 0, i32 0
|
||||
// CHECK-CXX-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0
|
||||
// CHECK-CXX-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !6
|
||||
// CHECK-CXX-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !2
|
||||
// CHECK-CXX-NEXT: [[TMP3:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 0
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 1
|
||||
// CHECK-CXX-NEXT: [[TMP5:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 1
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 2
|
||||
// CHECK-CXX-NEXT: [[TMP7:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 2
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 3
|
||||
// CHECK-CXX-NEXT: [[TMP9:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 3
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP10:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 4
|
||||
// CHECK-CXX-NEXT: [[TMP11:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 4
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 5
|
||||
// CHECK-CXX-NEXT: [[TMP13:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 5
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP14:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 6
|
||||
// CHECK-CXX-NEXT: [[TMP15:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP16:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 7
|
||||
// CHECK-CXX-NEXT: [[TMP17:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 7
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !6
|
||||
// CHECK-CXX-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !2
|
||||
// CHECK-CXX-NEXT: [[TMP18:%.*]] = bitcast %struct.data512_t* [[REF_TMP]] to i8*
|
||||
// CHECK-CXX-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast (%struct.data512_t* @val to i8*), i8* align 8 [[TMP18]], i64 64, i1 false)
|
||||
// CHECK-CXX-NEXT: ret void
|
||||
|
|
|
@ -19,28 +19,28 @@
|
|||
|
||||
// Check module attributes
|
||||
|
||||
// NONE: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// ALL: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// PART: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
|
||||
// B-KEY: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// NONE-NOT: !"branch-target-enforcement"
|
||||
// ALL-NOT: !"branch-target-enforcement"
|
||||
// PART-NOT: !"branch-target-enforcement"
|
||||
// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
|
||||
// B-KEY-NOT: !"branch-target-enforcement"
|
||||
|
||||
// NONE: !{i32 8, !"sign-return-address", i32 0}
|
||||
// NONE-NOT: !"sign-return-address"
|
||||
// ALL: !{i32 8, !"sign-return-address", i32 1}
|
||||
// PART: !{i32 8, !"sign-return-address", i32 1}
|
||||
// BTE: !{i32 8, !"sign-return-address", i32 0}
|
||||
// BTE-NOT: !"sign-return-address"
|
||||
// B-KEY: !{i32 8, !"sign-return-address", i32 1}
|
||||
|
||||
// NONE: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// NONE-NOT: !"sign-return-address-all"
|
||||
// ALL: !{i32 8, !"sign-return-address-all", i32 1}
|
||||
// PART: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// BTE: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// B-KEY: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// PART-NOT: !"sign-return-address-all"
|
||||
// BTE-NOT: !"sign-return-address-all"
|
||||
// B-KEY-NOT: !"sign-return-address-all"
|
||||
|
||||
// NONE: !{i32 8, !"sign-return-address-with-bkey", i32 0}
|
||||
// ALL: !{i32 8, !"sign-return-address-with-bkey", i32 0}
|
||||
// PART: !{i32 8, !"sign-return-address-with-bkey", i32 0}
|
||||
// BTE: !{i32 8, !"sign-return-address-with-bkey", i32 0}
|
||||
// NONE-NOT: !"sign-return-address-with-bkey"
|
||||
// ALL-NOT: !"sign-return-address-with-bkey"
|
||||
// PART-NOT: !"sign-return-address-with-bkey"
|
||||
// BTE-NOT: !"sign-return-address-with-bkey"
|
||||
// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 1}
|
||||
|
||||
void foo() {}
|
||||
|
|
|
@ -17,13 +17,13 @@
|
|||
// CHECK-LABEL: @test_svld1_s8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z13test_svld1_s8u10__SVBool_tPKa(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
svint8_t test_svld1_s8(svbool_t pg, const int8_t *base)
|
||||
|
@ -35,14 +35,14 @@ svint8_t test_svld1_s8(svbool_t pg, const int8_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_s16u10__SVBool_tPKs(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
|
||||
//
|
||||
svint16_t test_svld1_s16(svbool_t pg, const int16_t *base)
|
||||
|
@ -54,14 +54,14 @@ svint16_t test_svld1_s16(svbool_t pg, const int16_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_s32u10__SVBool_tPKi(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
|
||||
//
|
||||
svint32_t test_svld1_s32(svbool_t pg, const int32_t *base)
|
||||
|
@ -73,14 +73,14 @@ svint32_t test_svld1_s32(svbool_t pg, const int32_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_s64u10__SVBool_tPKl(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
//
|
||||
svint64_t test_svld1_s64(svbool_t pg, const int64_t *base)
|
||||
|
@ -91,13 +91,13 @@ svint64_t test_svld1_s64(svbool_t pg, const int64_t *base)
|
|||
// CHECK-LABEL: @test_svld1_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z13test_svld1_u8u10__SVBool_tPKh(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP0]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base)
|
||||
|
@ -109,14 +109,14 @@ svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_u16u10__SVBool_tPKt(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
|
||||
//
|
||||
svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base)
|
||||
|
@ -128,14 +128,14 @@ svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_u32u10__SVBool_tPKj(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
|
||||
//
|
||||
svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base)
|
||||
|
@ -147,14 +147,14 @@ svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_u64u10__SVBool_tPKm(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
//
|
||||
svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base)
|
||||
|
@ -166,14 +166,14 @@ svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA15:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA11:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_f16u10__SVBool_tPKDh(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA15:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA11:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP2]]
|
||||
//
|
||||
svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base)
|
||||
|
@ -185,14 +185,14 @@ svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA17:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA13:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_f32u10__SVBool_tPKf(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA17:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA13:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP2]]
|
||||
//
|
||||
svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base)
|
||||
|
@ -204,14 +204,14 @@ svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA19:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA15:![0-9]+]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z14test_svld1_f64u10__SVBool_tPKd(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA19:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA15:![0-9]+]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
|
||||
//
|
||||
svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base)
|
||||
|
@ -224,7 +224,7 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base)
|
|||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* [[TMP0]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_s8u10__SVBool_tPKal(
|
||||
|
@ -232,7 +232,7 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base)
|
|||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* [[TMP0]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
|
||||
//
|
||||
svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum)
|
||||
|
@ -246,7 +246,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i16* [[TMP2]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s16u10__SVBool_tPKsl(
|
||||
|
@ -255,7 +255,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i16* [[TMP2]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
|
||||
//
|
||||
svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum)
|
||||
|
@ -269,7 +269,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s32u10__SVBool_tPKil(
|
||||
|
@ -278,7 +278,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
|
||||
//
|
||||
svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum)
|
||||
|
@ -292,7 +292,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s64u10__SVBool_tPKll(
|
||||
|
@ -301,7 +301,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
|
||||
//
|
||||
svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum)
|
||||
|
@ -314,7 +314,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* [[TMP0]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <vscale x 16 x i8>*
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_u8u10__SVBool_tPKhl(
|
||||
|
@ -322,7 +322,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* [[TMP0]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <vscale x 16 x i8>*
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA6]]
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer), !tbaa [[TBAA2]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
|
||||
//
|
||||
svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum)
|
||||
|
@ -336,7 +336,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i16* [[TMP2]] to <vscale x 8 x i16>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u16u10__SVBool_tPKtl(
|
||||
|
@ -345,7 +345,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i16* [[TMP2]] to <vscale x 8 x i16>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer), !tbaa [[TBAA5]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
|
||||
//
|
||||
svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
|
||||
|
@ -359,7 +359,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u32u10__SVBool_tPKjl(
|
||||
|
@ -368,7 +368,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <vscale x 4 x i32>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer), !tbaa [[TBAA7]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
|
||||
//
|
||||
svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
|
||||
|
@ -382,7 +382,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u64u10__SVBool_tPKml(
|
||||
|
@ -391,7 +391,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to <vscale x 2 x i64>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer), !tbaa [[TBAA9]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
|
||||
//
|
||||
svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
|
||||
|
@ -405,7 +405,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast half* [[TMP2]] to <vscale x 8 x half>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA15]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f16u10__SVBool_tPKDhl(
|
||||
|
@ -414,7 +414,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast half* [[TMP2]] to <vscale x 8 x half>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA15]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer), !tbaa [[TBAA11]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
|
||||
//
|
||||
svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum)
|
||||
|
@ -428,7 +428,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <vscale x 4 x float>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA17]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f32u10__SVBool_tPKfl(
|
||||
|
@ -437,7 +437,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <vscale x 4 x float>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA17]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer), !tbaa [[TBAA13]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
|
||||
//
|
||||
svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum)
|
||||
|
@ -451,7 +451,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[TMP2]] to <vscale x 2 x double>*
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA19]]
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA15]]
|
||||
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f64u10__SVBool_tPKdl(
|
||||
|
@ -460,7 +460,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* [[TMP1]], i64 [[VNUM:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[TMP2]] to <vscale x 2 x double>*
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA19]]
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer), !tbaa [[TBAA15]]
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
|
||||
//
|
||||
svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum)
|
||||
|
|
|
@ -15,19 +15,19 @@
|
|||
|
||||
// Check module attributes
|
||||
|
||||
// NONE: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// PART: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// ALL: !{i32 8, !"branch-target-enforcement", i32 0}
|
||||
// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
|
||||
// NONE-NOT: !"branch-target-enforcement"
|
||||
// PART-NOT: !"branch-target-enforcement"
|
||||
// ALL-NOT: !"branch-target-enforcement"
|
||||
// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
|
||||
|
||||
// NONE: !{i32 8, !"sign-return-address", i32 0}
|
||||
// PART: !{i32 8, !"sign-return-address", i32 1}
|
||||
// ALL: !{i32 8, !"sign-return-address", i32 1}
|
||||
// BTE: !{i32 8, !"sign-return-address", i32 0}
|
||||
// NONE-NOT: !"sign-return-address"
|
||||
// PART: !{i32 8, !"sign-return-address", i32 1}
|
||||
// ALL: !{i32 8, !"sign-return-address", i32 1}
|
||||
// BTE-NOT: !"sign-return-address"
|
||||
|
||||
// NONE: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// PART: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// ALL: !{i32 8, !"sign-return-address-all", i32 1}
|
||||
// BTE: !{i32 8, !"sign-return-address-all", i32 0}
|
||||
// NONE-NOT: !"sign-return-address-all", i32 0}
|
||||
// PART-NOT: !"sign-return-address-all", i32 0}
|
||||
// ALL: !{i32 8, !"sign-return-address-all", i32 1}
|
||||
// BTE-NOT: !"sign-return-address-all", i32 0}
|
||||
|
||||
void foo() {}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue