[AArch64][SVE] Fix isel failure for FP-extending loads

DAGCombiner tries to combine a (fpext (load)) to (fround (extload))
but SVE has no FP-extending loads. By marking these as expand,
the combine no longer happens.

This also fixes a similar issue for fptrunc, where the source type
is not a legal type.

Reviewed By: bsmith, kmclaughlin

Differential Revision: https://reviews.llvm.org/D102053
This commit is contained in:
Sander de Smalen 2021-05-10 11:27:38 +01:00
parent ea64200b61
commit 407a33889d
3 changed files with 119 additions and 4 deletions

View File

@ -1185,15 +1185,20 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
}
}
for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
MVT::nxv4f32, MVT::nxv2f64}) {
for (auto InnerVT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16,
MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64}) {
for (MVT VT : MVT::fp_scalable_vector_valuetypes()) {
for (MVT InnerVT : MVT::fp_scalable_vector_valuetypes()) {
// Avoid marking truncating FP stores as legal to prevent the
// DAGCombiner from creating unsupported truncating stores.
setTruncStoreAction(VT, InnerVT, Expand);
// SVE does not have floating-point extending loads.
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
}
}
for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
MVT::nxv4f32, MVT::nxv2f64}) {
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
setOperationAction(ISD::MGATHER, VT, Custom);

View File

@ -0,0 +1,85 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
; fpext <vscale x 2 x half> -> <vscale x 2 x double>
define <vscale x 2 x double> @ext2_f16_f64(<vscale x 2 x half> *%ptr, i64 %index) {
; CHECK-LABEL: ext2_f16_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.h
; CHECK-NEXT: ret
%load = load <vscale x 2 x half>, <vscale x 2 x half>* %ptr, align 4
%load.ext = fpext <vscale x 2 x half> %load to <vscale x 2 x double>
ret <vscale x 2 x double> %load.ext
}
; fpext <vscale x 4 x half> -> <vscale x 4 x double>
define <vscale x 4 x double> @ext4_f16_f64(<vscale x 4 x half> *%ptr, i64 %index) {
; CHECK-LABEL: ext4_f16_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: uunpklo z1.d, z0.s
; CHECK-NEXT: uunpkhi z2.d, z0.s
; CHECK-NEXT: fcvt z0.d, p0/m, z1.h
; CHECK-NEXT: fcvt z1.d, p0/m, z2.h
; CHECK-NEXT: ret
%load = load <vscale x 4 x half>, <vscale x 4 x half>* %ptr, align 4
%load.ext = fpext <vscale x 4 x half> %load to <vscale x 4 x double>
ret <vscale x 4 x double> %load.ext
}
; fpext <vscale x 8 x half> -> <vscale x 8 x double>
define <vscale x 8 x double> @ext8_f16_f64(<vscale x 8 x half> *%ptr, i64 %index) {
; CHECK-LABEL: ext8_f16_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: uunpklo z1.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
; CHECK-NEXT: uunpklo z2.d, z1.s
; CHECK-NEXT: uunpkhi z1.d, z1.s
; CHECK-NEXT: uunpklo z3.d, z0.s
; CHECK-NEXT: uunpkhi z4.d, z0.s
; CHECK-NEXT: fcvt z0.d, p0/m, z2.h
; CHECK-NEXT: fcvt z1.d, p0/m, z1.h
; CHECK-NEXT: fcvt z2.d, p0/m, z3.h
; CHECK-NEXT: fcvt z3.d, p0/m, z4.h
; CHECK-NEXT: ret
%load = load <vscale x 8 x half>, <vscale x 8 x half>* %ptr, align 4
%load.ext = fpext <vscale x 8 x half> %load to <vscale x 8 x double>
ret <vscale x 8 x double> %load.ext
}
; fpext <vscale x 2 x float> -> <vscale x 2 x double>
define <vscale x 2 x double> @ext2_f32_f64(<vscale x 2 x float> *%ptr, i64 %index) {
; CHECK-LABEL: ext2_f32_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.s
; CHECK-NEXT: ret
%load = load <vscale x 2 x float>, <vscale x 2 x float>* %ptr, align 4
%load.ext = fpext <vscale x 2 x float> %load to <vscale x 2 x double>
ret <vscale x 2 x double> %load.ext
}
; fpext <vscale x 4 x float> -> <vscale x 4 x double>
define <vscale x 4 x double> @ext4_f32_f64(<vscale x 4 x float> *%ptr, i64 %index) {
; CHECK-LABEL: ext4_f32_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: uunpklo z1.d, z0.s
; CHECK-NEXT: uunpkhi z2.d, z0.s
; CHECK-NEXT: fcvt z0.d, p0/m, z1.s
; CHECK-NEXT: fcvt z1.d, p0/m, z2.s
; CHECK-NEXT: ret
%load = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 4
%load.ext = fpext <vscale x 4 x float> %load to <vscale x 4 x double>
ret <vscale x 4 x double> %load.ext
}

View File

@ -60,3 +60,28 @@ entry:
store <vscale x 2 x half> %1, <vscale x 2 x half>* %dst, align 2
ret void
}
define void @fptrunc8_f64_f16(<vscale x 8 x half> *%dst, <vscale x 8 x double> *%src) {
; CHECK-LABEL: fptrunc8_f64_f16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1]
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1, #1, mul vl]
; CHECK-NEXT: ld1d { z2.d }, p0/z, [x1, #2, mul vl]
; CHECK-NEXT: ld1d { z3.d }, p0/z, [x1, #3, mul vl]
; CHECK-NEXT: fcvt z0.h, p0/m, z0.d
; CHECK-NEXT: fcvt z1.h, p0/m, z1.d
; CHECK-NEXT: fcvt z2.h, p0/m, z2.d
; CHECK-NEXT: fcvt z3.h, p0/m, z3.d
; CHECK-NEXT: uzp1 z2.s, z2.s, z3.s
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
entry:
%0 = load <vscale x 8 x double>, <vscale x 8 x double>* %src, align 8
%1 = fptrunc <vscale x 8 x double> %0 to <vscale x 8 x half>
store <vscale x 8 x half> %1, <vscale x 8 x half>* %dst, align 2
ret void
}