// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8> [[X0:%.*]], <vscale x 16 x i8> [[X1:%.*]], <vscale x 16 x i8> [[X2:%.*]], <vscale x 16 x i8> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8> [[X0:%.*]], <vscale x 16 x i8> [[X1:%.*]], <vscale x 16 x i8> [[X2:%.*]], <vscale x 16 x i8> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16(<vscale x 8 x i16> [[X0:%.*]], <vscale x 8 x i16> [[X1:%.*]], <vscale x 8 x i16> [[X2:%.*]], <vscale x 8 x i16> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16(<vscale x 8 x i16> [[X0:%.*]], <vscale x 8 x i16> [[X1:%.*]], <vscale x 8 x i16> [[X2:%.*]], <vscale x 8 x i16> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32(<vscale x 4 x i32> [[X0:%.*]], <vscale x 4 x i32> [[X1:%.*]], <vscale x 4 x i32> [[X2:%.*]], <vscale x 4 x i32> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32(<vscale x 4 x i32> [[X0:%.*]], <vscale x 4 x i32> [[X1:%.*]], <vscale x 4 x i32> [[X2:%.*]], <vscale x 4 x i32> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64(<vscale x 2 x i64> [[X0:%.*]], <vscale x 2 x i64> [[X1:%.*]], <vscale x 2 x i64> [[X2:%.*]], <vscale x 2 x i64> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64(<vscale x 2 x i64> [[X0:%.*]], <vscale x 2 x i64> [[X1:%.*]], <vscale x 2 x i64> [[X2:%.*]], <vscale x 2 x i64> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8> [[X0:%.*]], <vscale x 16 x i8> [[X1:%.*]], <vscale x 16 x i8> [[X2:%.*]], <vscale x 16 x i8> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8> [[X0:%.*]], <vscale x 16 x i8> [[X1:%.*]], <vscale x 16 x i8> [[X2:%.*]], <vscale x 16 x i8> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16(<vscale x 8 x i16> [[X0:%.*]], <vscale x 8 x i16> [[X1:%.*]], <vscale x 8 x i16> [[X2:%.*]], <vscale x 8 x i16> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16(<vscale x 8 x i16> [[X0:%.*]], <vscale x 8 x i16> [[X1:%.*]], <vscale x 8 x i16> [[X2:%.*]], <vscale x 8 x i16> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32(<vscale x 4 x i32> [[X0:%.*]], <vscale x 4 x i32> [[X1:%.*]], <vscale x 4 x i32> [[X2:%.*]], <vscale x 4 x i32> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32(<vscale x 4 x i32> [[X0:%.*]], <vscale x 4 x i32> [[X1:%.*]], <vscale x 4 x i32> [[X2:%.*]], <vscale x 4 x i32> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64(<vscale x 2 x i64> [[X0:%.*]], <vscale x 2 x i64> [[X1:%.*]], <vscale x 2 x i64> [[X2:%.*]], <vscale x 2 x i64> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64(<vscale x 2 x i64> [[X0:%.*]], <vscale x 2 x i64> [[X1:%.*]], <vscale x 2 x i64> [[X2:%.*]], <vscale x 2 x i64> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.aarch64.sve.tuple.create4.nxv32f16.nxv8f16(<vscale x 8 x half> [[X0:%.*]], <vscale x 8 x half> [[X1:%.*]], <vscale x 8 x half> [[X2:%.*]], <vscale x 8 x half> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.aarch64.sve.tuple.create4.nxv32f16.nxv8f16(<vscale x 8 x half> [[X0:%.*]], <vscale x 8 x half> [[X1:%.*]], <vscale x 8 x half> [[X2:%.*]], <vscale x 8 x half> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 32 x half> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.aarch64.sve.tuple.create4.nxv16f32.nxv4f32(<vscale x 4 x float> [[X0:%.*]], <vscale x 4 x float> [[X1:%.*]], <vscale x 4 x float> [[X2:%.*]], <vscale x 4 x float> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.aarch64.sve.tuple.create4.nxv16f32.nxv4f32(<vscale x 4 x float> [[X0:%.*]], <vscale x 4 x float> [[X1:%.*]], <vscale x 4 x float> [[X2:%.*]], <vscale x 4 x float> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x float> [[TMP0]]
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.aarch64.sve.tuple.create4.nxv8f64.nxv2f64(<vscale x 2 x double> [[X0:%.*]], <vscale x 2 x double> [[X1:%.*]], <vscale x 2 x double> [[X2:%.*]], <vscale x 2 x double> [[X4:%.*]])
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.aarch64.sve.tuple.create4.nxv8f64.nxv2f64(<vscale x 2 x double> [[X0:%.*]], <vscale x 2 x double> [[X1:%.*]], <vscale x 2 x double> [[X2:%.*]], <vscale x 2 x double> [[X4:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x double> [[TMP0]]