// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])