forked from OSchip/llvm-project
[VE] Add vmul and vdiv intrinsic instructions
Add vmul and vdiv intrinsic instructions and regression tests. Reviewed By: simoll Differential Revision: https://reviews.llvm.org/D92377
This commit is contained in:
parent
00f4269cef
commit
10b164d2f7
|
@ -190,3 +190,82 @@ let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvl : GCCBuiltin<"__builtin_ve_
|
|||
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
|
||||
|
|
|
@ -304,3 +304,129 @@ def : Pat<(int_ve_vl_vsubsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl),
|
|||
def : Pat<(int_ve_vl_vsubsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VSUBSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vsubsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VSUBSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vsubsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VSUBSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULULvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULULvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VMULULrvl i64:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULULrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULULivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULULivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULULvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULULrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulul_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULULivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULUWvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULUWvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMULUWrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULUWrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULUWivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULUWivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULUWvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULUWrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmuluw_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULUWivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULSWSXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWSXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMULSWSXrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWSXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULSWSXivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWSXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWSXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWSXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswsx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWSXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULSWZXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWZXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMULSWZXrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWZXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULSWZXivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSWZXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWZXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWZXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulswzx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSWZXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULSLvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VMULSLrvl i64:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULSLivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMULSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMULSLWvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLWvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMULSLWrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLWrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMULSLWivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vmulslw_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMULSLWivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VDIVULvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVULvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VDIVULrvl i64:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVULrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VDIVULivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVULivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVULvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVULrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVULivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VDIVUWvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVUWvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VDIVUWrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVUWrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VDIVUWivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVUWivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVUWvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVUWrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVUWivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsl v256f64:$vy, i64:$sy, i32:$vl), (VDIVULvrl v256f64:$vy, i64:$sy, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsvl v256f64:$vy, i64:$sy, v256f64:$pt, i32:$vl), (VDIVULvrl_v v256f64:$vy, i64:$sy, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsl v256f64:$vy, simm7:$I, i32:$vl), (VDIVULvil v256f64:$vy, (LO7 $I), i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVULvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsmvl v256f64:$vy, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVULvrml_v v256f64:$vy, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivul_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVULviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsl v256f64:$vy, i32:$sy, i32:$vl), (VDIVUWvrl v256f64:$vy, i32:$sy, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsvl v256f64:$vy, i32:$sy, v256f64:$pt, i32:$vl), (VDIVUWvrl_v v256f64:$vy, i32:$sy, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsl v256f64:$vy, simm7:$I, i32:$vl), (VDIVUWvil v256f64:$vy, (LO7 $I), i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVUWvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsmvl v256f64:$vy, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVUWvrml_v v256f64:$vy, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivuw_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVUWviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VDIVSWSXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWSXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VDIVSWSXrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWSXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VDIVSWSXivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWSXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWSXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWSXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWSXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VDIVSWZXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWZXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VDIVSWZXrvl i32:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWZXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VDIVSWZXivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSWZXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWZXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWZXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWZXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsl v256f64:$vy, i32:$sy, i32:$vl), (VDIVSWSXvrl v256f64:$vy, i32:$sy, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsvl v256f64:$vy, i32:$sy, v256f64:$pt, i32:$vl), (VDIVSWSXvrl_v v256f64:$vy, i32:$sy, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsl v256f64:$vy, simm7:$I, i32:$vl), (VDIVSWSXvil v256f64:$vy, (LO7 $I), i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVSWSXvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsmvl v256f64:$vy, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWSXvrml_v v256f64:$vy, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswsx_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWSXviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsl v256f64:$vy, i32:$sy, i32:$vl), (VDIVSWZXvrl v256f64:$vy, i32:$sy, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsvl v256f64:$vy, i32:$sy, v256f64:$pt, i32:$vl), (VDIVSWZXvrl_v v256f64:$vy, i32:$sy, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsl v256f64:$vy, simm7:$I, i32:$vl), (VDIVSWZXvil v256f64:$vy, (LO7 $I), i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVSWZXvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsmvl v256f64:$vy, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWZXvrml_v v256f64:$vy, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivswzx_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSWZXviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VDIVSLvvl v256f64:$vy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSLvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VDIVSLrvl i64:$sy, v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSLrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VDIVSLivl (LO7 $I), v256f64:$vz, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VDIVSLivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsl v256f64:$vy, i64:$sy, i32:$vl), (VDIVSLvrl v256f64:$vy, i64:$sy, i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsvl v256f64:$vy, i64:$sy, v256f64:$pt, i32:$vl), (VDIVSLvrl_v v256f64:$vy, i64:$sy, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsl v256f64:$vy, simm7:$I, i32:$vl), (VDIVSLvil v256f64:$vy, (LO7 $I), i32:$vl)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVSLvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLvrml_v v256f64:$vy, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,831 @@
|
|||
; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
|
||||
|
||||
;;; Test vector multiply intrinsic instructions
|
||||
;;;
|
||||
;;; Note:
|
||||
;;; We test VMUL*vvl, VMUL*vvl_v, VMUL*rvl, VMUL*rvl_v, VMUL*ivl, VMUL*ivl_v,
|
||||
;;; VMUL*vvml_v, VMUL*rvml_v, and VMUL*ivml_v instructions.
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulul_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulul_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvl(i64 %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulul_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.l %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvl(i64 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vsvl(i64, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulul_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.l %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmulul_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvl(i64 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulul_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulul_vvvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v2, %v0, %v1, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulul_vsvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.l %v1, %s0, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulul_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulul_vsvmvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.l %v1, 8, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmuluw_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmuluw_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvl(i32 signext %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmuluw_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.w %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmuluw_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.w %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmuluw_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmuluw_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmuluw_vvvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v2, %v0, %v1, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmuluw_vsvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmulu.w %v1, %s0, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmuluw_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmuluw_vsvmvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmulu.w %v1, 8, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswsx_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswsx_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvl(i32 signext %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswsx_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.sx %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswsx_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.sx %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmulswsx_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswsx_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulswsx_vvvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v2, %v0, %v1, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulswsx_vsvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.sx %v1, %s0, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswsx_vsvmvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.sx %v1, 8, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswzx_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswzx_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvl(i32 signext %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswzx_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.zx %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswzx_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.zx %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmulswzx_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulswzx_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulswzx_vvvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v2, %v0, %v1, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulswzx_vsvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.w.zx %v1, %s0, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulswzx_vsvmvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.w.zx %v1, 8, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulsl_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulsl_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvl(i64 %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulsl_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.l %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulsl_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.l %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmulsl_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulsl_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulsl_vvvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v2, %v0, %v1, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
|
||||
; CHECK-LABEL: vmulsl_vsvmvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.l %v1, %s0, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
|
||||
ret <256 x double> %5
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulsl_vsvmvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l %v1, 8, %v0, %vm1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulslw_vvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l.w %v0, %v0, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulslw_vvvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l.w %v2, %v0, %v1
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v2
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulslw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vsvl(i32 signext %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulslw_vsvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 256
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.l.w %v0, %s0, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32 %0, <256 x double> %1, i32 256)
|
||||
ret <256 x double> %3
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
|
||||
; CHECK-LABEL: vmulslw_vsvvl:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea %s1, 128
|
||||
; CHECK-NEXT: lvl %s1
|
||||
; CHECK-NEXT: vmuls.l.w %v1, %s0, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
|
||||
ret <256 x double> %4
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32, <256 x double>, <256 x double>, i32)
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vsvl_imm(<256 x double> %0) {
|
||||
; CHECK-LABEL: vmulslw_vsvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 256
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l.w %v0, 8, %v0
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32 8, <256 x double> %0, i32 256)
|
||||
ret <256 x double> %2
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vmulslw_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
|
||||
; CHECK-LABEL: vmulslw_vsvvl_imm:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: lea %s0, 128
|
||||
; CHECK-NEXT: lvl %s0
|
||||
; CHECK-NEXT: vmuls.l.w %v1, 8, %v0
|
||||
; CHECK-NEXT: lea %s16, 256
|
||||
; CHECK-NEXT: lvl %s16
|
||||
; CHECK-NEXT: vor %v0, (0)1, %v1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
|
||||
ret <256 x double> %3
|
||||
}
|
Loading…
Reference in New Issue