forked from OSchip/llvm-project
[clang][NFC][SVE] Add tests for operators on VLS vectors
This patch adds codegen tests for operators on SVE VLS vector types
This commit is contained in:
parent
2a156f6058
commit
d261d3e4a7
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,468 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
|
||||
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \
|
||||
// RUN: -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
|
||||
|
||||
// REQUIRES: aarch64-registered-target
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#define N 512
|
||||
|
||||
typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
// AND
|
||||
|
||||
// CHECK-LABEL: @and_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
fixed_bool_t and_bool(fixed_bool_t a, fixed_bool_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @and_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
return a & b;
|
||||
}
|
||||
|
||||
// OR
|
||||
|
||||
// CHECK-LABEL: @or_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
fixed_bool_t or_bool(fixed_bool_t a, fixed_bool_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @or_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
return a | b;
|
||||
}
|
||||
|
||||
// XOR
|
||||
|
||||
// CHECK-LABEL: @xor_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
fixed_bool_t xor_bool(fixed_bool_t a, fixed_bool_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @xor_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
return a ^ b;
|
||||
}
|
||||
|
||||
// NEG
|
||||
|
||||
// CHECK-LABEL: @neg_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
fixed_bool_t neg_bool(fixed_bool_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t neg_i8(fixed_int8_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t neg_i16(fixed_int16_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t neg_i32(fixed_int32_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t neg_i64(fixed_int64_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t neg_u8(fixed_uint8_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t neg_u16(fixed_uint16_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t neg_u32(fixed_uint32_t a) {
|
||||
return ~a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @neg_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t neg_u64(fixed_uint64_t a) {
|
||||
return ~a;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,650 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
|
||||
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \
|
||||
// RUN: -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
|
||||
|
||||
// REQUIRES: aarch64-registered-target
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#define N 512
|
||||
|
||||
typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
// CHECK-LABEL: @lshift_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_i64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_i64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
||||
return b >> a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
||||
return a << b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @lshift_u64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) {
|
||||
return b << a;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
||||
return a >> b;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @rshift_u64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64_lsplat(fixed_uint64_t a, uint64_t b) {
|
||||
return b >> a;
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
|
||||
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \
|
||||
// RUN: -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
|
||||
|
||||
// REQUIRES: aarch64-registered-target
|
||||
|
||||
#include <arm_sve.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#define N 512
|
||||
|
||||
typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N)));
|
||||
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
||||
|
||||
// CHECK-LABEL: @subscript_int16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i16 [[VECEXT]]
|
||||
//
|
||||
int16_t subscript_int16(fixed_int16_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_uint16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i16 [[VECEXT]]
|
||||
//
|
||||
uint16_t subscript_uint16(fixed_uint16_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_int32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i32 [[VECEXT]]
|
||||
//
|
||||
int32_t subscript_int32(fixed_int32_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_uint32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i32 [[VECEXT]]
|
||||
//
|
||||
uint32_t subscript_uint32(fixed_uint32_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_int64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i64 [[VECEXT]]
|
||||
//
|
||||
int64_t subscript_int64(fixed_int64_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_uint64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i64 [[VECEXT]]
|
||||
//
|
||||
uint64_t subscript_uint64(fixed_uint64_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_float16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret half [[VECEXT]]
|
||||
//
|
||||
__fp16 subscript_float16(fixed_float16_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_float32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret float [[VECEXT]]
|
||||
//
|
||||
float subscript_float32(fixed_float32_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @subscript_float64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret double [[VECEXT]]
|
||||
//
|
||||
double subscript_float64(fixed_float64_t a, size_t b) {
|
||||
return a[b];
|
||||
}
|
Loading…
Reference in New Issue