// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 0)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 0)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 0)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 0)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> [[PG]], i8* %base, <vscale x 4 x i32> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> [[PG]], i8* %base, <vscale x 2 x i64> %offsets)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 %offset)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 %offset)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>
// CHECK: [[PG:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> %bases, i64 %offset)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 4 x i8> [[LOAD]] to <vscale x 4 x i32>
// CHECK: [[PG:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
// CHECK: [[LOAD:%.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> %bases, i64 %offset)
// CHECK: [[ZEXT:%.*]] = zext <vscale x 2 x i8> [[LOAD]] to <vscale x 2 x i64>