// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_s16'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_s32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_s64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_u16'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_u32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_u64'}}
// CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s16'}}
// CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s32'}}
// CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s64'}}
// CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u16'}}
// CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u32'}}
// CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u64'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmlslt.nxv4f32(<vscale x 4 x float> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %op3)
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_f32'}}
// CHECK: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op3)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmlslt.nxv4f32(<vscale x 4 x float> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %[[DUP]])
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_n_f32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmlslt.lane.nxv4f32(<vscale x 4 x float> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %op3, i32 0)
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_f32'}}
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmlslt.lane.nxv4f32(<vscale x 4 x float> %op1, <vscale x 8 x half> %op2, <vscale x 8 x half> %op3, i32 7)
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
// overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}}
// expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_f32'}}