forked from OSchip/llvm-project
[AArch64][SVE] Implement abs and neg intrinsics
Summary: This patch implements two arithmetic intrinsics: * int_aarch64_sve_abs * int_aarch64_sve_neg testing the support for scalable vector types in intrinsics added in D65930. Reviewed By: greened Differential Revision: https://reviews.llvm.org/D65931 llvm-svn: 371388
This commit is contained in:
parent
d936a6301b
commit
55244beeee
|
@ -749,9 +749,6 @@ def int_aarch64_ttest : GCCBuiltin<"__builtin_arm_ttest">,
|
|||
[IntrNoMem, IntrHasSideEffects]>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SVE
|
||||
|
||||
def llvm_nxv2i1_ty : LLVMType<nxv2i1>;
|
||||
def llvm_nxv4i1_ty : LLVMType<nxv4i1>;
|
||||
def llvm_nxv8i1_ty : LLVMType<nxv8i1>;
|
||||
|
@ -764,6 +761,13 @@ def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
|
|||
def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
|
||||
|
||||
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
|
||||
class AdvSIMD_Merged1VectorArg_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrNoMem]>;
|
||||
|
||||
// This class of intrinsics are not intended to be useful within LLVM IR but
|
||||
// are instead here to support some of the more regid parts of the ACLE.
|
||||
class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
|
||||
|
@ -771,8 +775,21 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
|
|||
Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SVE
|
||||
|
||||
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
|
||||
|
||||
//
|
||||
// Integer arithmetic
|
||||
//
|
||||
|
||||
def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic;
|
||||
|
||||
//
|
||||
// Floating-point comparisons
|
||||
//
|
||||
|
||||
def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
|
||||
}
|
||||
|
|
|
@ -94,8 +94,8 @@ let Predicates = [HasSVE] in {
|
|||
defm UXTH_ZPmZ : sve_int_un_pred_arit_0_w<0b011, "uxth">;
|
||||
defm SXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b100, "sxtw">;
|
||||
defm UXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b101, "uxtw">;
|
||||
defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs">;
|
||||
defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg">;
|
||||
defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs", int_aarch64_sve_abs>;
|
||||
defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg", int_aarch64_sve_neg>;
|
||||
|
||||
defm CLS_ZPmZ : sve_int_un_pred_arit_1< 0b000, "cls">;
|
||||
defm CLZ_ZPmZ : sve_int_un_pred_arit_1< 0b001, "clz">;
|
||||
|
|
|
@ -279,6 +279,14 @@ let Predicates = [HasSVE] in {
|
|||
defm PTRUES : sve_int_ptrue<0b001, "ptrues">;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SVE pattern match helpers.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class SVE_3_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
|
||||
ValueType vt2, ValueType vt3, Instruction inst>
|
||||
: Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)),
|
||||
(inst $Op1, $Op2, $Op3)>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SVE Predicate Misc Group
|
||||
|
@ -2835,11 +2843,17 @@ class sve_int_un_pred_arit<bits<2> sz8_64, bits<4> opc,
|
|||
let ElementSize = zprty.ElementSize;
|
||||
}
|
||||
|
||||
multiclass sve_int_un_pred_arit_0<bits<3> opc, string asm> {
|
||||
multiclass sve_int_un_pred_arit_0<bits<3> opc, string asm,
|
||||
SDPatternOperator op> {
|
||||
def _B : sve_int_un_pred_arit<0b00, { opc, 0b0 }, asm, ZPR8>;
|
||||
def _H : sve_int_un_pred_arit<0b01, { opc, 0b0 }, asm, ZPR16>;
|
||||
def _S : sve_int_un_pred_arit<0b10, { opc, 0b0 }, asm, ZPR32>;
|
||||
def _D : sve_int_un_pred_arit<0b11, { opc, 0b0 }, asm, ZPR64>;
|
||||
|
||||
def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv16i1, nxv16i8, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i1, nxv8i16, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1, nxv4i32, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i1, nxv2i64, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
multiclass sve_int_un_pred_arit_0_h<bits<3> opc, string asm> {
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ABS
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @abs_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: abs_i8:
|
||||
; CHECK: abs z0.b, p0/m, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @abs_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: abs_i16:
|
||||
; CHECK: abs z0.h, p0/m, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @abs_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: abs_i32:
|
||||
; CHECK: abs z0.s, p0/m, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @abs_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: abs_i64:
|
||||
; CHECK: abs z0.d, p0/m, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
;
|
||||
; NEG
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @neg_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: neg_i8:
|
||||
; CHECK: neg z0.b, p0/m, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.neg.nxv16i8(<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @neg_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: neg_i16:
|
||||
; CHECK: neg z0.h, p0/m, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.neg.nxv8i16(<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @neg_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: neg_i32:
|
||||
; CHECK: neg z0.s, p0/m, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.neg.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @neg_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: neg_i64:
|
||||
; CHECK: neg z0.d, p0/m, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.neg.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.neg.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.neg.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.neg.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.neg.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
|
Loading…
Reference in New Issue