2012-12-12 05:25:42 +08:00
|
|
|
//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-12-12 05:25:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// This file was originally auto-generated from a GPU register header file and
|
|
|
|
// all the instruction definitions were originally commented out. Instructions
|
|
|
|
// that are not yet supported remain commented out.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
AMDGPU: Separate R600 and GCN TableGen files
Summary:
We now have two sets of generated TableGen files, one for R600 and one
for GCN, so each sub-target now has its own tables of instructions,
registers, ISel patterns, etc. This should help reduce compile time
since each sub-target now only has to consider information that
is specific to itself. This will also help prevent the R600
sub-target from slowing down new features for GCN, like disassembler
support, GlobalISel, etc.
Reviewers: arsenm, nhaehnle, jvesely
Reviewed By: arsenm
Subscribers: MatzeB, kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46365
llvm-svn: 335942
2018-06-29 07:47:12 +08:00
|
|
|
class GCNPat<dag pattern, dag result> : Pat<pattern, result>, GCNPredicateControl {
|
2019-02-09 03:18:01 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
}
|
|
|
|
|
2016-08-30 23:20:31 +08:00
|
|
|
include "SOPInstructions.td"
|
2018-09-21 18:31:22 +08:00
|
|
|
include "VOPInstructions.td"
|
2016-09-01 17:56:47 +08:00
|
|
|
include "SMInstructions.td"
|
2016-09-05 19:22:51 +08:00
|
|
|
include "FLATInstructions.td"
|
2016-09-10 21:09:16 +08:00
|
|
|
include "BUFInstructions.td"
|
2016-08-30 23:20:31 +08:00
|
|
|
|
2014-10-01 22:44:45 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// EXP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-12-06 04:23:10 +08:00
|
|
|
defm EXP : EXP_m<0, AMDGPUexport>;
|
|
|
|
defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
|
2014-10-01 22:44:45 +08:00
|
|
|
|
2014-04-23 00:33:57 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VINTRP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-03-17 00:38:04 +08:00
|
|
|
// Used to inject printing of "_e32" suffix for VI (there are "_e64" variants for VI)
|
|
|
|
def VINTRPDst : VINTRPDstOperand <VGPR_32>;
|
|
|
|
|
2015-09-10 09:23:28 +08:00
|
|
|
let Uses = [M0, EXEC] in {
|
2015-05-12 23:00:46 +08:00
|
|
|
|
2015-01-14 09:13:19 +08:00
|
|
|
// FIXME: Specify SchedRW for VINTRP insturctions.
|
2015-05-26 00:15:54 +08:00
|
|
|
|
|
|
|
multiclass V_INTERP_P1_F32_m : VINTRP_m <
|
|
|
|
0x00000000,
|
2018-03-17 00:38:04 +08:00
|
|
|
(outs VINTRPDst:$vdst),
|
2016-12-16 04:40:20 +08:00
|
|
|
(ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
|
2018-03-17 00:38:04 +08:00
|
|
|
"v_interp_p1_f32$vdst, $vsrc, $attr$attrchan",
|
2016-12-10 08:29:55 +08:00
|
|
|
[(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
|
|
|
|
(i32 imm:$attr)))]
|
2015-05-26 00:15:54 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
let OtherPredicates = [has32BankLDS] in {
|
|
|
|
|
|
|
|
defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
|
|
|
|
|
|
|
|
} // End OtherPredicates = [has32BankLDS]
|
|
|
|
|
2016-12-07 06:29:43 +08:00
|
|
|
let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
|
2015-05-26 00:15:54 +08:00
|
|
|
|
|
|
|
defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
|
|
|
|
|
2016-12-07 06:29:43 +08:00
|
|
|
} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-12-07 06:29:43 +08:00
|
|
|
let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
|
2015-05-26 00:15:56 +08:00
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
defm V_INTERP_P2_F32 : VINTRP_m <
|
2015-05-26 00:15:50 +08:00
|
|
|
0x00000001,
|
2018-03-17 00:38:04 +08:00
|
|
|
(outs VINTRPDst:$vdst),
|
2016-12-16 04:40:20 +08:00
|
|
|
(ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
|
2018-03-17 00:38:04 +08:00
|
|
|
"v_interp_p2_f32$vdst, $vsrc, $attr$attrchan",
|
2016-12-10 08:29:55 +08:00
|
|
|
[(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
|
|
|
|
(i32 imm:$attr)))]>;
|
2015-05-26 00:15:56 +08:00
|
|
|
|
2016-12-07 06:29:43 +08:00
|
|
|
} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
defm V_INTERP_MOV_F32 : VINTRP_m <
|
2015-05-26 00:15:50 +08:00
|
|
|
0x00000002,
|
2018-03-17 00:38:04 +08:00
|
|
|
(outs VINTRPDst:$vdst),
|
2016-12-16 04:40:20 +08:00
|
|
|
(ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
|
2018-03-17 00:38:04 +08:00
|
|
|
"v_interp_mov_f32$vdst, $vsrc, $attr$attrchan",
|
2016-12-10 08:29:55 +08:00
|
|
|
[(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
|
2016-12-07 06:29:43 +08:00
|
|
|
(i32 imm:$attr)))]>;
|
2015-05-12 23:00:46 +08:00
|
|
|
|
2015-09-10 09:23:28 +08:00
|
|
|
} // End Uses = [M0, EXEC]
|
2014-04-23 00:33:57 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pseudo Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-04-25 02:26:27 +08:00
|
|
|
def ATOMIC_FENCE : SPseudoInstSI<
|
|
|
|
(outs), (ins i32imm:$ordering, i32imm:$scope),
|
|
|
|
[(atomic_fence (i32 imm:$ordering), (i32 imm:$scope))],
|
|
|
|
"ATOMIC_FENCE $ordering, $scope"> {
|
|
|
|
let hasSideEffects = 1;
|
2017-07-22 05:05:45 +08:00
|
|
|
let maybeAtomic = 1;
|
2017-04-25 02:26:27 +08:00
|
|
|
}
|
2016-07-12 08:23:17 +08:00
|
|
|
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-03-24 21:40:15 +08:00
|
|
|
// For use in patterns
|
2016-02-17 02:14:56 +08:00
|
|
|
def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
(ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
|
2016-07-12 08:23:17 +08:00
|
|
|
let isPseudo = 1;
|
|
|
|
let isCodeGenOnly = 1;
|
2016-08-27 09:00:37 +08:00
|
|
|
let usesCustomInserter = 1;
|
2016-07-12 08:23:17 +08:00
|
|
|
}
|
2015-03-24 21:40:15 +08:00
|
|
|
|
2017-11-16 05:51:43 +08:00
|
|
|
// 64-bit vector move instruction. This is mainly used by the
|
|
|
|
// SIFoldOperands pass to enable folding of inline immediates.
|
2016-12-10 08:39:12 +08:00
|
|
|
def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
|
|
|
|
(ins VSrc_b64:$src0)>;
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
// Pseudoinstruction for @llvm.amdgcn.wqm. It is turned into a copy after the
|
|
|
|
// WQM pass processes it.
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
def WQM : PseudoInstSI <(outs unknown:$vdst), (ins unknown:$src0)>;
|
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
// Pseudoinstruction for @llvm.amdgcn.wwm. It is turned into a copy post-RA, so
|
|
|
|
// that the @earlyclobber is respected. The @earlyclobber is to make sure that
|
|
|
|
// the instruction that defines $src0 (which is run in WWM) doesn't
|
|
|
|
// accidentally clobber inactive channels of $vdst.
|
|
|
|
let Constraints = "@earlyclobber $vdst" in {
|
|
|
|
def WWM : PseudoInstSI <(outs unknown:$vdst), (ins unknown:$src0)>;
|
|
|
|
}
|
|
|
|
|
2016-07-12 08:23:17 +08:00
|
|
|
} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
|
2014-09-24 09:33:24 +08:00
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
def EXIT_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins SReg_64:$src0)> {
|
|
|
|
let hasSideEffects = 0;
|
|
|
|
let mayLoad = 0;
|
|
|
|
let mayStore = 0;
|
|
|
|
}
|
|
|
|
|
2017-08-05 02:36:54 +08:00
|
|
|
// Invert the exec mask and overwrite the inactive lanes of dst with inactive,
|
|
|
|
// restoring it after we're done.
|
|
|
|
def V_SET_INACTIVE_B32 : VPseudoInstSI <(outs VGPR_32:$vdst),
|
|
|
|
(ins VGPR_32: $src, VSrc_b32:$inactive),
|
|
|
|
[(set i32:$vdst, (int_amdgcn_set_inactive i32:$src, i32:$inactive))]> {
|
|
|
|
let Constraints = "$src = $vdst";
|
|
|
|
}
|
|
|
|
|
|
|
|
def V_SET_INACTIVE_B64 : VPseudoInstSI <(outs VReg_64:$vdst),
|
|
|
|
(ins VReg_64: $src, VSrc_b64:$inactive),
|
|
|
|
[(set i64:$vdst, (int_amdgcn_set_inactive i64:$src, i64:$inactive))]> {
|
|
|
|
let Constraints = "$src = $vdst";
|
|
|
|
}
|
|
|
|
|
2017-11-16 05:51:43 +08:00
|
|
|
|
|
|
|
let usesCustomInserter = 1, Defs = [SCC] in {
|
|
|
|
def S_ADD_U64_PSEUDO : SPseudoInstSI <
|
|
|
|
(outs SReg_64:$vdst), (ins SSrc_b64:$src0, SSrc_b64:$src1),
|
|
|
|
[(set SReg_64:$vdst, (add i64:$src0, i64:$src1))]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def S_SUB_U64_PSEUDO : SPseudoInstSI <
|
|
|
|
(outs SReg_64:$vdst), (ins SSrc_b64:$src0, SSrc_b64:$src1),
|
|
|
|
[(set SReg_64:$vdst, (sub i64:$src0, i64:$src1))]
|
|
|
|
>;
|
|
|
|
|
2017-12-01 06:51:26 +08:00
|
|
|
def S_ADD_U64_CO_PSEUDO : SPseudoInstSI <
|
|
|
|
(outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def S_SUB_U64_CO_PSEUDO : SPseudoInstSI <
|
|
|
|
(outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
|
|
|
|
>;
|
|
|
|
|
2017-11-16 05:51:43 +08:00
|
|
|
} // End usesCustomInserter = 1, Defs = [SCC]
|
|
|
|
|
2018-08-02 04:49:00 +08:00
|
|
|
let usesCustomInserter = 1 in {
|
|
|
|
def GET_GROUPSTATICSIZE : SPseudoInstSI <(outs SReg_32:$sdst), (ins),
|
2016-03-16 01:28:44 +08:00
|
|
|
[(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
|
|
|
|
} // End let usesCustomInserter = 1, SALU = 1
|
|
|
|
|
2018-08-02 04:49:00 +08:00
|
|
|
def S_MOV_B64_term : SPseudoInstSI<(outs SReg_64:$dst),
|
2016-09-29 09:44:16 +08:00
|
|
|
(ins SSrc_b64:$src0)> {
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isTerminator = 1;
|
|
|
|
}
|
|
|
|
|
2018-08-02 04:49:00 +08:00
|
|
|
def S_XOR_B64_term : SPseudoInstSI<(outs SReg_64:$dst),
|
2016-09-29 09:44:16 +08:00
|
|
|
(ins SSrc_b64:$src0, SSrc_b64:$src1)> {
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isTerminator = 1;
|
2018-02-01 06:54:27 +08:00
|
|
|
let Defs = [SCC];
|
2016-09-29 09:44:16 +08:00
|
|
|
}
|
|
|
|
|
2018-08-02 04:49:00 +08:00
|
|
|
def S_ANDN2_B64_term : SPseudoInstSI<(outs SReg_64:$dst),
|
2016-09-29 09:44:16 +08:00
|
|
|
(ins SSrc_b64:$src0, SSrc_b64:$src1)> {
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isTerminator = 1;
|
|
|
|
}
|
|
|
|
|
2016-11-16 03:00:15 +08:00
|
|
|
def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
|
|
|
|
[(int_amdgcn_wave_barrier)]> {
|
|
|
|
let SchedRW = [];
|
|
|
|
let hasNoSchedulingInfo = 1;
|
|
|
|
let hasSideEffects = 1;
|
|
|
|
let mayLoad = 1;
|
|
|
|
let mayStore = 1;
|
|
|
|
let isBarrier = 1;
|
|
|
|
let isConvergent = 1;
|
2017-03-25 03:52:05 +08:00
|
|
|
let FixedSize = 1;
|
|
|
|
let Size = 0;
|
2016-11-16 03:00:15 +08:00
|
|
|
}
|
|
|
|
|
2013-10-12 05:03:36 +08:00
|
|
|
// SI pseudo instructions. These are used by the CFG structurizer pass
|
2012-12-12 05:25:42 +08:00
|
|
|
// and should be lowered to ISA instructions prior to codegen.
|
|
|
|
|
2016-06-23 04:15:28 +08:00
|
|
|
// Dummy terminator instruction to use after control flow instructions
|
|
|
|
// replaced with exec mask operations.
|
2017-03-25 03:52:05 +08:00
|
|
|
def SI_MASK_BRANCH : VPseudoInstSI <
|
2016-08-27 08:42:21 +08:00
|
|
|
(outs), (ins brtarget:$target)> {
|
2016-08-11 03:11:42 +08:00
|
|
|
let isBranch = 0;
|
2016-06-23 04:15:28 +08:00
|
|
|
let isTerminator = 1;
|
2016-08-11 03:11:42 +08:00
|
|
|
let isBarrier = 0;
|
2016-10-07 02:12:07 +08:00
|
|
|
let SchedRW = [];
|
|
|
|
let hasNoSchedulingInfo = 1;
|
2017-03-25 03:52:05 +08:00
|
|
|
let FixedSize = 1;
|
|
|
|
let Size = 0;
|
2016-06-23 04:15:28 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
let isTerminator = 1 in {
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2017-10-11 04:22:07 +08:00
|
|
|
let OtherPredicates = [EnableLateCFGStructurize] in {
|
2017-05-16 04:18:37 +08:00
|
|
|
def SI_NON_UNIFORM_BRCOND_PSEUDO : CFPseudoInstSI <
|
|
|
|
(outs),
|
|
|
|
(ins SReg_64:$vcc, brtarget:$target),
|
|
|
|
[(brcond i1:$vcc, bb:$target)]> {
|
|
|
|
let Size = 12;
|
|
|
|
}
|
2017-10-11 04:22:07 +08:00
|
|
|
}
|
2017-05-16 04:18:37 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_IF: CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
|
2017-03-18 04:41:45 +08:00
|
|
|
[(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> {
|
2016-07-12 08:23:17 +08:00
|
|
|
let Constraints = "";
|
2016-09-29 09:44:16 +08:00
|
|
|
let Size = 12;
|
2016-09-17 06:11:18 +08:00
|
|
|
let hasSideEffects = 1;
|
2016-07-12 08:23:17 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_ELSE : CFPseudoInstSI <
|
2017-03-18 04:41:45 +08:00
|
|
|
(outs SReg_64:$dst),
|
|
|
|
(ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 12;
|
2016-09-17 06:11:18 +08:00
|
|
|
let hasSideEffects = 1;
|
2012-12-20 06:10:31 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_LOOP : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs), (ins SReg_64:$saved, brtarget:$target),
|
2017-03-18 04:41:45 +08:00
|
|
|
[(AMDGPUloop i64:$saved, bb:$target)], 1, 1> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 8;
|
2018-11-16 13:03:02 +08:00
|
|
|
let isBranch = 1;
|
2016-09-17 06:11:18 +08:00
|
|
|
let hasSideEffects = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-03-18 04:41:45 +08:00
|
|
|
} // End isTerminator = 1
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_END_CF : CFPseudoInstSI <
|
|
|
|
(outs), (ins SReg_64:$saved),
|
|
|
|
[(int_amdgcn_end_cf i64:$saved)], 1, 1> {
|
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
|
|
|
let hasSideEffects = 1;
|
2017-03-18 04:41:45 +08:00
|
|
|
let mayLoad = 1; // FIXME: Should not need memory flags
|
|
|
|
let mayStore = 1;
|
2016-08-27 11:00:51 +08:00
|
|
|
}
|
2016-07-12 08:23:17 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_IF_BREAK : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
|
2016-08-27 11:00:51 +08:00
|
|
|
[(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
AMDGPU: Add implicit def of SCC to kill and indirect pseudos
Summary:
Kill instructions sometimes do use SCC in unusual circumstances, when
v_cmpx cannot be used due to the operands that are involved.
Additionally, even if SCC was never defined by the expansion, kill pseudos
could previously occur between an s_cmp and an s_cbranch_scc, which breaks
the SCC liveness tracking when the pseudo is expanded to split the basic
block. While it would be possible to explicitly mark the SCC as live-in for
the successor basic block, it's simpler to just mark the pseudo as using SCC,
so that such a sequence is never emitted by instruction selection in the
first place.
A similar issue affects indirect source/dest pseudos in principle, although
I haven't been able to come up with a test case where it actually matters
(this affects instruction selection, so a MIR test can't be used).
Fixes: dEQP-GLES3.functional.shaders.discard.dynamic_loop_always
Change-Id: Ica8d82ecff1a763b892a1112cf1b06c948863a4f
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47761
llvm-svn: 335223
2018-06-21 21:36:08 +08:00
|
|
|
let Uses = [EXEC] in {
|
2016-07-13 05:41:32 +08:00
|
|
|
|
2017-10-24 18:27:13 +08:00
|
|
|
multiclass PseudoInstKill <dag ins> {
|
AMDGPU: Add implicit def of SCC to kill and indirect pseudos
Summary:
Kill instructions sometimes do use SCC in unusual circumstances, when
v_cmpx cannot be used due to the operands that are involved.
Additionally, even if SCC was never defined by the expansion, kill pseudos
could previously occur between an s_cmp and an s_cbranch_scc, which breaks
the SCC liveness tracking when the pseudo is expanded to split the basic
block. While it would be possible to explicitly mark the SCC as live-in for
the successor basic block, it's simpler to just mark the pseudo as using SCC,
so that such a sequence is never emitted by instruction selection in the
first place.
A similar issue affects indirect source/dest pseudos in principle, although
I haven't been able to come up with a test case where it actually matters
(this affects instruction selection, so a MIR test can't be used).
Fixes: dEQP-GLES3.functional.shaders.discard.dynamic_loop_always
Change-Id: Ica8d82ecff1a763b892a1112cf1b06c948863a4f
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47761
llvm-svn: 335223
2018-06-21 21:36:08 +08:00
|
|
|
// Even though this pseudo can usually be expanded without an SCC def, we
|
|
|
|
// conservatively assume that it has an SCC def, both because it is sometimes
|
|
|
|
// required in degenerate cases (when V_CMPX cannot be used due to constant
|
|
|
|
// bus limitations) and because it allows us to avoid having to track SCC
|
|
|
|
// liveness across basic blocks.
|
|
|
|
let Defs = [EXEC,VCC,SCC] in
|
2017-10-24 18:27:13 +08:00
|
|
|
def _PSEUDO : PseudoInstSI <(outs), ins> {
|
|
|
|
let isConvergent = 1;
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
AMDGPU: Add implicit def of SCC to kill and indirect pseudos
Summary:
Kill instructions sometimes do use SCC in unusual circumstances, when
v_cmpx cannot be used due to the operands that are involved.
Additionally, even if SCC was never defined by the expansion, kill pseudos
could previously occur between an s_cmp and an s_cbranch_scc, which breaks
the SCC liveness tracking when the pseudo is expanded to split the basic
block. While it would be possible to explicitly mark the SCC as live-in for
the successor basic block, it's simpler to just mark the pseudo as using SCC,
so that such a sequence is never emitted by instruction selection in the
first place.
A similar issue affects indirect source/dest pseudos in principle, although
I haven't been able to come up with a test case where it actually matters
(this affects instruction selection, so a MIR test can't be used).
Fixes: dEQP-GLES3.functional.shaders.discard.dynamic_loop_always
Change-Id: Ica8d82ecff1a763b892a1112cf1b06c948863a4f
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47761
llvm-svn: 335223
2018-06-21 21:36:08 +08:00
|
|
|
let Defs = [EXEC,VCC,SCC] in
|
2017-10-24 18:27:13 +08:00
|
|
|
def _TERMINATOR : SPseudoInstSI <(outs), ins> {
|
|
|
|
let isTerminator = 1;
|
|
|
|
}
|
2016-07-13 05:41:32 +08:00
|
|
|
}
|
|
|
|
|
2017-10-24 18:27:13 +08:00
|
|
|
defm SI_KILL_I1 : PseudoInstKill <(ins SSrc_b64:$src, i1imm:$killvalue)>;
|
|
|
|
defm SI_KILL_F32_COND_IMM : PseudoInstKill <(ins VSrc_b32:$src0, i32imm:$src1, i32imm:$cond)>;
|
|
|
|
|
AMDGPU: Add implicit def of SCC to kill and indirect pseudos
Summary:
Kill instructions sometimes do use SCC in unusual circumstances, when
v_cmpx cannot be used due to the operands that are involved.
Additionally, even if SCC was never defined by the expansion, kill pseudos
could previously occur between an s_cmp and an s_cbranch_scc, which breaks
the SCC liveness tracking when the pseudo is expanded to split the basic
block. While it would be possible to explicitly mark the SCC as live-in for
the successor basic block, it's simpler to just mark the pseudo as using SCC,
so that such a sequence is never emitted by instruction selection in the
first place.
A similar issue affects indirect source/dest pseudos in principle, although
I haven't been able to come up with a test case where it actually matters
(this affects instruction selection, so a MIR test can't be used).
Fixes: dEQP-GLES3.functional.shaders.discard.dynamic_loop_always
Change-Id: Ica8d82ecff1a763b892a1112cf1b06c948863a4f
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47761
llvm-svn: 335223
2018-06-21 21:36:08 +08:00
|
|
|
let Defs = [EXEC,VCC] in
|
2017-04-07 05:09:53 +08:00
|
|
|
def SI_ILLEGAL_COPY : SPseudoInstSI <
|
|
|
|
(outs unknown:$dst), (ins unknown:$src),
|
|
|
|
[], " ; illegal copy $src to $dst">;
|
|
|
|
|
2015-05-01 11:44:09 +08:00
|
|
|
} // End Uses = [EXEC], Defs = [EXEC,VCC]
|
2013-01-19 05:15:50 +08:00
|
|
|
|
2016-12-16 05:57:11 +08:00
|
|
|
// Branch on undef scc. Used to avoid intermediate copy from
|
|
|
|
// IMPLICIT_DEF to SCC.
|
|
|
|
def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
|
|
|
|
let isTerminator = 1;
|
|
|
|
let usesCustomInserter = 1;
|
2018-11-16 13:03:02 +08:00
|
|
|
let isBranch = 1;
|
2016-12-16 05:57:11 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-07-12 08:23:17 +08:00
|
|
|
def SI_PS_LIVE : PseudoInstSI <
|
|
|
|
(outs SReg_64:$dst), (ins),
|
2016-06-23 04:15:28 +08:00
|
|
|
[(set i1:$dst, (int_amdgcn_ps_live))]> {
|
|
|
|
let SALU = 1;
|
|
|
|
}
|
2016-04-22 12:04:08 +08:00
|
|
|
|
2017-03-25 03:52:05 +08:00
|
|
|
def SI_MASKED_UNREACHABLE : SPseudoInstSI <(outs), (ins),
|
|
|
|
[(int_amdgcn_unreachable)],
|
|
|
|
"; divergent unreachable"> {
|
|
|
|
let Size = 0;
|
|
|
|
let hasNoSchedulingInfo = 1;
|
|
|
|
let FixedSize = 1;
|
|
|
|
}
|
|
|
|
|
2016-04-15 05:58:15 +08:00
|
|
|
// Used as an isel pseudo to directly emit initialization with an
|
|
|
|
// s_mov_b32 rather than a copy of another initialized
|
|
|
|
// register. MachineCSE skips copies, and we don't want to have to
|
|
|
|
// fold operands before it runs.
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
|
2016-04-15 05:58:15 +08:00
|
|
|
let Defs = [M0];
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
|
|
|
}
|
|
|
|
|
AMDGPU: Add new amdgcn.init.exec intrinsics
v2: More tests, bug fixes, cosmetic changes.
Subscribers: arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D31762
llvm-svn: 301677
2017-04-29 04:21:58 +08:00
|
|
|
def SI_INIT_EXEC : SPseudoInstSI <
|
|
|
|
(outs), (ins i64imm:$src), []> {
|
|
|
|
let Defs = [EXEC];
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
def SI_INIT_EXEC_FROM_INPUT : SPseudoInstSI <
|
|
|
|
(outs), (ins SSrc_b32:$input, i32imm:$shift), []> {
|
|
|
|
let Defs = [EXEC];
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
2017-03-22 06:18:10 +08:00
|
|
|
// Return for returning shaders to a shader variant epilog.
|
|
|
|
def SI_RETURN_TO_EPILOG : SPseudoInstSI <
|
|
|
|
(outs), (ins variable_ops), [(AMDGPUreturn_to_epilog)]> {
|
2016-06-23 04:15:28 +08:00
|
|
|
let isTerminator = 1;
|
|
|
|
let isBarrier = 1;
|
|
|
|
let isReturn = 1;
|
|
|
|
let hasNoSchedulingInfo = 1;
|
2016-09-03 20:26:32 +08:00
|
|
|
let DisableWQM = 1;
|
2018-07-27 17:15:03 +08:00
|
|
|
let FixedSize = 1;
|
2016-06-23 04:15:28 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 03:54:18 +08:00
|
|
|
// Return for returning function calls.
|
|
|
|
def SI_RETURN : SPseudoInstSI <
|
|
|
|
(outs), (ins), [],
|
|
|
|
"; return"> {
|
|
|
|
let isTerminator = 1;
|
|
|
|
let isBarrier = 1;
|
|
|
|
let isReturn = 1;
|
|
|
|
let SchedRW = [WriteBranch];
|
|
|
|
}
|
|
|
|
|
2017-08-02 09:31:28 +08:00
|
|
|
// Return for returning function calls without output register.
|
|
|
|
//
|
|
|
|
// This version is only needed so we can fill in the output regiter in
|
|
|
|
// the custom inserter.
|
|
|
|
def SI_CALL_ISEL : SPseudoInstSI <
|
2019-02-05 04:00:07 +08:00
|
|
|
(outs), (ins SSrc_b64:$src0, unknown:$callee),
|
|
|
|
[(AMDGPUcall i64:$src0, tglobaladdr:$callee)]> {
|
2017-08-02 03:54:18 +08:00
|
|
|
let Size = 4;
|
|
|
|
let isCall = 1;
|
|
|
|
let SchedRW = [WriteBranch];
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
2017-08-02 09:31:28 +08:00
|
|
|
// Wrapper around s_swappc_b64 with extra $callee parameter to track
|
|
|
|
// the called function after regalloc.
|
|
|
|
def SI_CALL : SPseudoInstSI <
|
|
|
|
(outs SReg_64:$dst), (ins SSrc_b64:$src0, unknown:$callee)> {
|
|
|
|
let Size = 4;
|
|
|
|
let isCall = 1;
|
2017-08-12 04:42:08 +08:00
|
|
|
let UseNamedOperandTable = 1;
|
|
|
|
let SchedRW = [WriteBranch];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tail call handling pseudo
|
2019-02-05 04:00:07 +08:00
|
|
|
def SI_TCRETURN : SPseudoInstSI <(outs),
|
|
|
|
(ins SSrc_b64:$src0, unknown:$callee, i32imm:$fpdiff),
|
|
|
|
[(AMDGPUtc_return i64:$src0, tglobaladdr:$callee, i32:$fpdiff)]> {
|
2017-08-12 04:42:08 +08:00
|
|
|
let Size = 4;
|
|
|
|
let isCall = 1;
|
|
|
|
let isTerminator = 1;
|
|
|
|
let isReturn = 1;
|
|
|
|
let isBarrier = 1;
|
|
|
|
let UseNamedOperandTable = 1;
|
2017-08-02 09:31:28 +08:00
|
|
|
let SchedRW = [WriteBranch];
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-02 03:54:18 +08:00
|
|
|
def ADJCALLSTACKUP : SPseudoInstSI<
|
|
|
|
(outs), (ins i32imm:$amt0, i32imm:$amt1),
|
|
|
|
[(callseq_start timm:$amt0, timm:$amt1)],
|
|
|
|
"; adjcallstackup $amt0 $amt1"> {
|
|
|
|
let Size = 8; // Worst case. (s_add_u32 + constant)
|
|
|
|
let FixedSize = 1;
|
|
|
|
let hasSideEffects = 1;
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
def ADJCALLSTACKDOWN : SPseudoInstSI<
|
|
|
|
(outs), (ins i32imm:$amt1, i32imm:$amt2),
|
|
|
|
[(callseq_end timm:$amt1, timm:$amt2)],
|
|
|
|
"; adjcallstackdown $amt1"> {
|
|
|
|
let Size = 8; // Worst case. (s_add_u32 + constant)
|
|
|
|
let hasSideEffects = 1;
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
AMDGPU: Add implicit def of SCC to kill and indirect pseudos
Summary:
Kill instructions sometimes do use SCC in unusual circumstances, when
v_cmpx cannot be used due to the operands that are involved.
Additionally, even if SCC was never defined by the expansion, kill pseudos
could previously occur between an s_cmp and an s_cbranch_scc, which breaks
the SCC liveness tracking when the pseudo is expanded to split the basic
block. While it would be possible to explicitly mark the SCC as live-in for
the successor basic block, it's simpler to just mark the pseudo as using SCC,
so that such a sequence is never emitted by instruction selection in the
first place.
A similar issue affects indirect source/dest pseudos in principle, although
I haven't been able to come up with a test case where it actually matters
(this affects instruction selection, so a MIR test can't be used).
Fixes: dEQP-GLES3.functional.shaders.discard.dynamic_loop_always
Change-Id: Ica8d82ecff1a763b892a1112cf1b06c948863a4f
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47761
llvm-svn: 335223
2018-06-21 21:36:08 +08:00
|
|
|
let Defs = [M0, EXEC, SCC],
|
2016-06-23 07:40:57 +08:00
|
|
|
UseNamedOperandTable = 1 in {
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
|
2016-07-19 08:35:03 +08:00
|
|
|
(outs VGPR_32:$vdst),
|
|
|
|
(ins rc:$src, VS_32:$idx, i32imm:$offset)> {
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
|
2016-07-19 08:35:03 +08:00
|
|
|
(outs rc:$vdst),
|
|
|
|
(ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
|
2016-06-23 07:40:57 +08:00
|
|
|
let Constraints = "$src = $vdst";
|
2016-07-19 08:35:03 +08:00
|
|
|
let usesCustomInserter = 1;
|
2013-03-18 19:34:16 +08:00
|
|
|
}
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
// TODO: We can support indirect SGPR access.
|
|
|
|
def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
|
|
|
|
def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
|
|
|
|
def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
|
|
|
|
def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
|
|
|
|
def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
|
|
|
|
|
2015-01-08 04:59:25 +08:00
|
|
|
def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
|
2013-03-18 19:34:16 +08:00
|
|
|
def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
|
|
|
|
def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
|
|
|
|
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
|
|
|
|
def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
|
|
|
|
|
2016-07-19 08:35:03 +08:00
|
|
|
} // End Uses = [EXEC], Defs = [M0, EXEC]
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2014-05-02 23:41:42 +08:00
|
|
|
multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
|
2016-09-10 09:20:33 +08:00
|
|
|
let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
|
2016-07-12 08:23:17 +08:00
|
|
|
def _SAVE : PseudoInstSI <
|
2015-01-14 23:42:31 +08:00
|
|
|
(outs),
|
2016-09-10 09:20:33 +08:00
|
|
|
(ins sgpr_class:$data, i32imm:$addr)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 0;
|
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
|
2016-07-12 08:23:17 +08:00
|
|
|
def _RESTORE : PseudoInstSI <
|
2016-09-10 09:20:33 +08:00
|
|
|
(outs sgpr_class:$data),
|
|
|
|
(ins i32imm:$addr)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 0;
|
|
|
|
let mayLoad = 1;
|
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
} // End UseNamedOperandTable = 1
|
2014-05-02 23:41:42 +08:00
|
|
|
}
|
|
|
|
|
2016-09-03 14:57:55 +08:00
|
|
|
// You cannot use M0 as the output of v_readlane_b32 instructions or
|
|
|
|
// use it in the sdata operand of SMEM instructions. We still need to
|
|
|
|
// be able to spill the physical register m0, so allow it for
|
|
|
|
// SI_SPILL_32_* instructions.
|
|
|
|
defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
|
2014-05-02 23:41:42 +08:00
|
|
|
defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
|
|
|
|
defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
|
|
|
|
defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
|
|
|
|
defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
|
|
|
|
|
2014-09-24 09:33:17 +08:00
|
|
|
multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
|
2016-09-10 09:20:28 +08:00
|
|
|
let UseNamedOperandTable = 1, VGPRSpill = 1,
|
|
|
|
SchedRW = [WriteVMEM] in {
|
2016-08-27 11:00:51 +08:00
|
|
|
def _SAVE : VPseudoInstSI <
|
2015-01-14 23:42:31 +08:00
|
|
|
(outs),
|
2016-09-17 23:52:37 +08:00
|
|
|
(ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
|
|
|
|
SReg_32:$soffset, i32imm:$offset)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 0;
|
2016-09-04 01:25:44 +08:00
|
|
|
// (2 * 4) + (8 * num_subregs) bytes maximum
|
|
|
|
let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
|
2015-08-29 14:48:57 +08:00
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def _RESTORE : VPseudoInstSI <
|
2016-09-10 09:20:33 +08:00
|
|
|
(outs vgpr_class:$vdata),
|
2016-09-17 23:52:37 +08:00
|
|
|
(ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
|
2016-06-23 04:15:28 +08:00
|
|
|
i32imm:$offset)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 0;
|
|
|
|
let mayLoad = 1;
|
2016-09-04 01:25:44 +08:00
|
|
|
|
|
|
|
// (2 * 4) + (8 * num_subregs) bytes maximum
|
|
|
|
let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
|
2015-08-29 14:48:57 +08:00
|
|
|
}
|
2016-09-10 09:20:28 +08:00
|
|
|
} // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
|
2014-09-24 09:33:17 +08:00
|
|
|
}
|
|
|
|
|
2015-01-08 04:59:25 +08:00
|
|
|
defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
|
2014-09-24 09:33:17 +08:00
|
|
|
defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
|
|
|
|
defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
|
|
|
|
defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
|
|
|
|
defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
|
|
|
|
defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
|
2014-07-21 22:01:14 +08:00
|
|
|
(outs SReg_64:$dst),
|
2016-10-14 12:37:34 +08:00
|
|
|
(ins si_ga:$ptr_lo, si_ga:$ptr_hi),
|
|
|
|
[(set SReg_64:$dst,
|
|
|
|
(i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
|
2016-08-27 11:00:51 +08:00
|
|
|
let Defs = [SCC];
|
2015-10-03 02:58:37 +08:00
|
|
|
}
|
2014-07-21 22:01:14 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
AMDGPU: Add new amdgcn.init.exec intrinsics
v2: More tests, bug fixes, cosmetic changes.
Subscribers: arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D31762
llvm-svn: 301677
2017-04-29 04:21:58 +08:00
|
|
|
(AMDGPUinit_exec i64:$src),
|
|
|
|
(SI_INIT_EXEC (as_i64imm $src))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
AMDGPU: Add new amdgcn.init.exec intrinsics
v2: More tests, bug fixes, cosmetic changes.
Subscribers: arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D31762
llvm-svn: 301677
2017-04-29 04:21:58 +08:00
|
|
|
(AMDGPUinit_exec_from_input i32:$input, i32:$shift),
|
|
|
|
(SI_INIT_EXEC_FROM_INPUT (i32 $input), (as_i32imm $shift))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2017-04-25 01:49:13 +08:00
|
|
|
(AMDGPUtrap timm:$trapid),
|
|
|
|
(S_TRAP $trapid)
|
2017-02-10 10:15:29 +08:00
|
|
|
>;
|
2014-05-17 04:56:45 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2017-03-18 04:41:45 +08:00
|
|
|
(AMDGPUelse i64:$src, bb:$target),
|
AMDGPU: add execfix flag to SI_ELSE
Summary:
SI_ELSE is lowered into two parts:
s_or_saveexec_b64 dst, src (at the start of the basic block)
s_xor_b64 exec, exec, dst (at the end of the basic block)
The idea is that dst contains the exec mask of the preceding IF block. It can
happen that SIWholeQuadMode decides to switch from WQM to Exact mode inside
the basic block that contains SI_ELSE, in which case it introduces an instruction
s_and_b64 exec, exec, s[...]
which masks out bits that can correspond to both the IF and the ELSE paths.
So the resulting sequence must be:
s_or_savexec_b64 dst, src
s_and_b64 exec, exec, s[...] <-- added by SIWholeQuadMode
s_and_b64 dst, dst, exec <-- added by SILowerControlFlow
s_xor_b64 exec, exec, dst
Whether to add the additional s_and_b64 dst, dst, exec is currently determined
via the ExecModified tracking. With this change, it is instead determined by
an additional flag on SI_ELSE which is set by SIWholeQuadMode.
Finally: It also occured to me that an alternative approach for the long run
is for SILowerControlFlow to unconditionally emit
s_or_saveexec_b64 dst, src
...
s_and_b64 dst, dst, exec
s_xor_b64 exec, exec, dst
and have a pass that detects and cleans up the "redundant AND with exec"
pattern where possible. This could be useful anyway, because we also add
instructions
s_and_b64 vcc, exec, vcc
before s_cbranch_scc (in moveToALU), and those are often redundant. I have
some pending changes to how KILL is lowered that could also benefit from
such a cleanup pass.
In any case, this current patch could help in the short term with the whole
ExecModified business.
Reviewers: tstellarAMD, arsenm
Subscribers: arsenm, llvm-commits, kzhuravl
Differential Revision: https://reviews.llvm.org/D22846
llvm-svn: 276972
2016-07-28 19:39:24 +08:00
|
|
|
(SI_ELSE $src, $target, 0)
|
|
|
|
>;
|
|
|
|
|
2017-10-24 18:27:13 +08:00
|
|
|
def : Pat <
|
|
|
|
// -1.0 as i32 (LowerINTRINSIC_VOID converts all other constants to -1.0)
|
|
|
|
(AMDGPUkill (i32 -1082130432)),
|
|
|
|
(SI_KILL_I1_PSEUDO (i1 0), 0)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(int_amdgcn_kill i1:$src),
|
|
|
|
(SI_KILL_I1_PSEUDO $src, 0)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(int_amdgcn_kill (i1 (not i1:$src))),
|
|
|
|
(SI_KILL_I1_PSEUDO $src, -1)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(AMDGPUkill i32:$src),
|
|
|
|
(SI_KILL_F32_COND_IMM_PSEUDO $src, 0, 3) // 3 means SETOGE
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(int_amdgcn_kill (i1 (setcc f32:$src, InlineFPImm<f32>:$imm, cond:$cond))),
|
|
|
|
(SI_KILL_F32_COND_IMM_PSEUDO $src, (bitcast_fpimm_to_i32 $imm), (cond_as_i32imm $cond))
|
2013-01-19 05:15:50 +08:00
|
|
|
>;
|
2018-11-13 05:04:06 +08:00
|
|
|
|
|
|
|
// TODO: we could add more variants for other types of conditionals
|
2013-01-19 05:15:50 +08:00
|
|
|
|
AMDGPU: Add a fast path for icmp.i1(src, false, NE)
Summary:
This allows moving the condition from the intrinsic to the standard ICmp
opcode, so that LLVM can do simplifications on it. The icmp.i1 intrinsic
is an identity for retrieving the SGPR mask.
And we can also get the mask from and i1, or i1, xor i1.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D52060
llvm-svn: 351150
2019-01-15 10:13:18 +08:00
|
|
|
def : Pat <
|
|
|
|
(int_amdgcn_icmp i1:$src, (i1 0), (i32 33)),
|
|
|
|
(COPY $src) // Return the SGPRs representing i1 src
|
|
|
|
>;
|
|
|
|
|
2014-04-30 07:12:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2014-06-19 09:19:19 +08:00
|
|
|
// VOP1 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-02-09 03:18:01 +08:00
|
|
|
let OtherPredicates = [UnsafeFPMath] in {
|
2015-02-14 12:30:08 +08:00
|
|
|
|
|
|
|
//def : RcpPat<V_RCP_F64_e32, f64>;
|
|
|
|
//defm : RsqPat<V_RSQ_F64_e32, f64>;
|
|
|
|
//defm : RsqPat<V_RSQ_F32_e32, f32>;
|
|
|
|
|
|
|
|
def : RsqPat<V_RSQ_F32_e32, f32>;
|
|
|
|
def : RsqPat<V_RSQ_F64_e32, f64>;
|
2016-05-28 08:19:52 +08:00
|
|
|
|
|
|
|
// Convert (x - floor(x)) to fract(x)
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-05-28 08:19:52 +08:00
|
|
|
(f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
|
|
|
|
(f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
|
|
|
|
(V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
|
|
|
// Convert (x + (-floor(x))) to fract(x)
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-05-28 08:19:52 +08:00
|
|
|
(f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
|
|
|
|
(f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
|
|
|
|
(V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2019-02-09 03:18:01 +08:00
|
|
|
} // End OtherPredicates = [UnsafeFPMath]
|
2014-07-16 04:18:31 +08:00
|
|
|
|
2017-02-02 10:27:04 +08:00
|
|
|
|
|
|
|
// f16_to_fp patterns
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-02 10:27:04 +08:00
|
|
|
(f32 (f16_to_fp i32:$src0)),
|
|
|
|
(V_CVT_F32_F16_e64 SRCMODS.NONE, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-02 10:27:04 +08:00
|
|
|
(f32 (f16_to_fp (and_oneuse i32:$src0, 0x7fff))),
|
|
|
|
(V_CVT_F32_F16_e64 SRCMODS.ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2018-06-07 05:28:11 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(f32 (f16_to_fp (i32 (srl_oneuse (and_oneuse i32:$src0, 0x7fff0000), (i32 16))))),
|
|
|
|
(V_CVT_F32_F16_e64 SRCMODS.ABS, (i32 (V_LSHRREV_B32_e64 (i32 16), i32:$src0)), DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-02 10:27:04 +08:00
|
|
|
(f32 (f16_to_fp (or_oneuse i32:$src0, 0x8000))),
|
|
|
|
(V_CVT_F32_F16_e64 SRCMODS.NEG_ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-02 10:27:04 +08:00
|
|
|
(f32 (f16_to_fp (xor_oneuse i32:$src0, 0x8000))),
|
|
|
|
(V_CVT_F32_F16_e64 SRCMODS.NEG, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
|
2016-11-13 15:01:11 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-13 15:01:11 +08:00
|
|
|
(f64 (fpextend f16:$src)),
|
|
|
|
(V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
|
|
|
|
>;
|
|
|
|
|
2017-02-02 10:27:04 +08:00
|
|
|
// fp_to_fp16 patterns
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-04-26 05:17:38 +08:00
|
|
|
(i32 (AMDGPUfp_to_f16 (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))),
|
|
|
|
(V_CVT_F16_F32_e64 $src0_modifiers, f32:$src0, DSTCLAMP.NONE, DSTOMOD.NONE)
|
2016-11-13 15:01:11 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-13 15:01:11 +08:00
|
|
|
(i32 (fp_to_sint f16:$src)),
|
|
|
|
(V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-13 15:01:11 +08:00
|
|
|
(i32 (fp_to_uint f16:$src)),
|
|
|
|
(V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-13 15:01:11 +08:00
|
|
|
(f16 (sint_to_fp i32:$src)),
|
|
|
|
(V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-13 15:01:11 +08:00
|
|
|
(f16 (uint_to_fp i32:$src)),
|
|
|
|
(V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
|
|
|
|
>;
|
|
|
|
|
2014-06-19 09:19:19 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2014-04-30 07:12:48 +08:00
|
|
|
// VOP2 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-11-16 11:39:12 +08:00
|
|
|
multiclass FMADPat <ValueType vt, Instruction inst> {
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-04-26 05:17:38 +08:00
|
|
|
(vt (fmad (VOP3NoMods vt:$src0),
|
|
|
|
(VOP3NoMods vt:$src1),
|
|
|
|
(VOP3NoMods vt:$src2))),
|
|
|
|
(inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1,
|
|
|
|
SRCMODS.NONE, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
|
2016-11-16 11:39:12 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm : FMADPat <f16, V_MAC_F16_e64>;
|
|
|
|
defm : FMADPat <f32, V_MAC_F32_e64>;
|
|
|
|
|
2018-06-28 23:24:46 +08:00
|
|
|
class FMADModsPat<Instruction inst, SDPatternOperator mad_opr, ValueType Ty>
|
|
|
|
: GCNPat<
|
|
|
|
(Ty (mad_opr (VOP3Mods Ty:$src0, i32:$src0_mod),
|
|
|
|
(VOP3Mods Ty:$src1, i32:$src1_mod),
|
|
|
|
(VOP3Mods Ty:$src2, i32:$src2_mod))),
|
2017-02-25 07:00:29 +08:00
|
|
|
(inst $src0_mod, $src0, $src1_mod, $src1,
|
|
|
|
$src2_mod, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2018-06-28 23:24:46 +08:00
|
|
|
def : FMADModsPat<V_MAD_F32, AMDGPUfmad_ftz, f32>;
|
|
|
|
def : FMADModsPat<V_MAD_F16, AMDGPUfmad_ftz, f16> {
|
|
|
|
let SubtargetPredicate = Has16BitInsts;
|
|
|
|
}
|
2017-02-25 07:00:29 +08:00
|
|
|
|
2016-11-16 11:39:12 +08:00
|
|
|
multiclass SelectPat <ValueType vt, Instruction inst> {
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-16 11:16:26 +08:00
|
|
|
(vt (select i1:$src0, vt:$src1, vt:$src2)),
|
|
|
|
(inst $src2, $src1, $src0)
|
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
2016-11-16 11:39:12 +08:00
|
|
|
defm : SelectPat <i16, V_CNDMASK_B32_e64>;
|
|
|
|
defm : SelectPat <i32, V_CNDMASK_B32_e64>;
|
|
|
|
defm : SelectPat <f16, V_CNDMASK_B32_e64>;
|
|
|
|
defm : SelectPat <f32, V_CNDMASK_B32_e64>;
|
2016-11-16 11:16:26 +08:00
|
|
|
|
2018-09-21 18:31:22 +08:00
|
|
|
let AddedComplexity = 1 in {
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2018-09-21 18:31:22 +08:00
|
|
|
(i32 (add (i32 (getDivergentFrag<ctpop>.ret i32:$popcnt)), i32:$val)),
|
2014-09-16 01:15:02 +08:00
|
|
|
(V_BCNT_U32_B32_e64 $popcnt, $val)
|
2014-06-21 01:06:11 +08:00
|
|
|
>;
|
2018-09-21 18:31:22 +08:00
|
|
|
}
|
2018-03-02 10:50:22 +08:00
|
|
|
def : GCNPat <
|
2018-09-21 18:31:22 +08:00
|
|
|
(i16 (add (i16 (trunc (getDivergentFrag<ctpop>.ret i32:$popcnt))), i16:$val)),
|
2018-03-02 10:50:22 +08:00
|
|
|
(V_BCNT_U32_B32_e64 $popcnt, $val)
|
|
|
|
>;
|
2014-06-21 01:06:11 +08:00
|
|
|
|
2013-03-18 19:34:10 +08:00
|
|
|
/********** ============================================ **********/
|
|
|
|
/********** Extraction, Insertion, Building and Casting **********/
|
|
|
|
/********** ============================================ **********/
|
|
|
|
|
|
|
|
foreach Index = 0-2 in {
|
|
|
|
def Extract_Element_v2i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v2i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v2f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v2f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach Index = 0-3 in {
|
|
|
|
def Extract_Element_v4i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v4i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v4f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v4f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2013-03-18 19:34:10 +08:00
|
|
|
foreach Index = 0-7 in {
|
|
|
|
def Extract_Element_v8i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v8i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v8f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v8f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach Index = 0-15 in {
|
|
|
|
def Extract_Element_v16i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v16i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v16f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v16f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2018-06-15 23:15:46 +08:00
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(extract_subvector v4i16:$vec, (i32 0)),
|
|
|
|
(v2i16 (EXTRACT_SUBREG v4i16:$vec, sub0))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(extract_subvector v4i16:$vec, (i32 2)),
|
|
|
|
(v2i16 (EXTRACT_SUBREG v4i16:$vec, sub1))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(extract_subvector v4f16:$vec, (i32 0)),
|
|
|
|
(v2f16 (EXTRACT_SUBREG v4f16:$vec, sub0))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(extract_subvector v4f16:$vec, (i32 2)),
|
|
|
|
(v2f16 (EXTRACT_SUBREG v4f16:$vec, sub1))
|
|
|
|
>;
|
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// FIXME: Why do only some of these type combinations for SReg and
|
|
|
|
// VReg?
|
2016-11-13 15:01:11 +08:00
|
|
|
// 16-bit bitcast
|
|
|
|
def : BitConvert <i16, f16, VGPR_32>;
|
|
|
|
def : BitConvert <f16, i16, VGPR_32>;
|
|
|
|
def : BitConvert <i16, f16, SReg_32>;
|
|
|
|
def : BitConvert <f16, i16, SReg_32>;
|
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 32-bit bitcast
|
2015-01-08 04:59:25 +08:00
|
|
|
def : BitConvert <i32, f32, VGPR_32>;
|
|
|
|
def : BitConvert <f32, i32, VGPR_32>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <i32, f32, SReg_32>;
|
|
|
|
def : BitConvert <f32, i32, SReg_32>;
|
2017-02-28 02:49:11 +08:00
|
|
|
def : BitConvert <v2i16, i32, SReg_32>;
|
|
|
|
def : BitConvert <i32, v2i16, SReg_32>;
|
|
|
|
def : BitConvert <v2f16, i32, SReg_32>;
|
|
|
|
def : BitConvert <i32, v2f16, SReg_32>;
|
|
|
|
def : BitConvert <v2i16, v2f16, SReg_32>;
|
|
|
|
def : BitConvert <v2f16, v2i16, SReg_32>;
|
2017-02-28 06:15:25 +08:00
|
|
|
def : BitConvert <v2f16, f32, SReg_32>;
|
|
|
|
def : BitConvert <f32, v2f16, SReg_32>;
|
|
|
|
def : BitConvert <v2i16, f32, SReg_32>;
|
|
|
|
def : BitConvert <f32, v2i16, SReg_32>;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 64-bit bitcast
|
2013-07-13 02:14:56 +08:00
|
|
|
def : BitConvert <i64, f64, VReg_64>;
|
|
|
|
def : BitConvert <f64, i64, VReg_64>;
|
2013-07-19 05:43:42 +08:00
|
|
|
def : BitConvert <v2i32, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, v2i32, VReg_64>;
|
2014-03-31 22:01:55 +08:00
|
|
|
def : BitConvert <i64, v2i32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2i32, i64, VReg_64>;
|
2014-06-12 01:40:32 +08:00
|
|
|
def : BitConvert <i64, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, i64, VReg_64>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <f64, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, f64, VReg_64>;
|
2014-06-12 03:31:13 +08:00
|
|
|
def : BitConvert <f64, v2i32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2i32, f64, VReg_64>;
|
2018-12-20 06:53:33 +08:00
|
|
|
def : BitConvert <v4i16, v4f16, VReg_64>;
|
|
|
|
def : BitConvert <v4f16, v4i16, VReg_64>;
|
2018-06-15 23:15:46 +08:00
|
|
|
|
|
|
|
// FIXME: Make SGPR
|
|
|
|
def : BitConvert <v2i32, v4f16, VReg_64>;
|
|
|
|
def : BitConvert <v4f16, v2i32, VReg_64>;
|
|
|
|
def : BitConvert <v2i32, v4f16, VReg_64>;
|
|
|
|
def : BitConvert <v2i32, v4i16, VReg_64>;
|
|
|
|
def : BitConvert <v4i16, v2i32, VReg_64>;
|
|
|
|
def : BitConvert <v2f32, v4f16, VReg_64>;
|
|
|
|
def : BitConvert <v4f16, v2f32, VReg_64>;
|
|
|
|
def : BitConvert <v2f32, v4i16, VReg_64>;
|
|
|
|
def : BitConvert <v4i16, v2f32, VReg_64>;
|
|
|
|
def : BitConvert <v4i16, f64, VReg_64>;
|
|
|
|
def : BitConvert <v4f16, f64, VReg_64>;
|
|
|
|
def : BitConvert <f64, v4i16, VReg_64>;
|
|
|
|
def : BitConvert <f64, v4f16, VReg_64>;
|
|
|
|
def : BitConvert <v4i16, i64, VReg_64>;
|
|
|
|
def : BitConvert <v4f16, i64, VReg_64>;
|
|
|
|
def : BitConvert <i64, v4i16, VReg_64>;
|
|
|
|
def : BitConvert <i64, v4f16, VReg_64>;
|
|
|
|
|
2013-07-19 05:43:53 +08:00
|
|
|
def : BitConvert <v4i32, v4f32, VReg_128>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v4f32, v4i32, VReg_128>;
|
2013-07-19 05:43:53 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 128-bit bitcast
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v2i64, v4i32, SReg_128>;
|
|
|
|
def : BitConvert <v4i32, v2i64, SReg_128>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <v2f64, v4f32, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v2f64, v4i32, VReg_128>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <v4f32, v2f64, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v4i32, v2f64, VReg_128>;
|
2016-05-26 02:07:36 +08:00
|
|
|
def : BitConvert <v2i64, v2f64, VReg_128>;
|
|
|
|
def : BitConvert <v2f64, v2i64, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 256-bit bitcast
|
2014-02-14 07:34:15 +08:00
|
|
|
def : BitConvert <v8i32, v8f32, SReg_256>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v8f32, v8i32, SReg_256>;
|
2014-02-02 08:05:35 +08:00
|
|
|
def : BitConvert <v8i32, v8f32, VReg_256>;
|
|
|
|
def : BitConvert <v8f32, v8i32, VReg_256>;
|
2013-08-15 06:22:09 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 512-bit bitcast
|
2014-02-02 08:05:35 +08:00
|
|
|
def : BitConvert <v16i32, v16f32, VReg_512>;
|
|
|
|
def : BitConvert <v16f32, v16i32, VReg_512>;
|
|
|
|
|
2013-02-21 23:17:27 +08:00
|
|
|
/********** =================== **********/
|
|
|
|
/********** Src & Dst modifiers **********/
|
|
|
|
/********** =================== **********/
|
|
|
|
|
2017-02-22 07:35:48 +08:00
|
|
|
|
|
|
|
// If denormals are not enabled, it only impacts the compare of the
|
|
|
|
// inputs. The output result is not flushed.
|
2017-10-03 08:06:41 +08:00
|
|
|
class ClampPat<Instruction inst, ValueType vt> : GCNPat <
|
2017-04-26 05:17:38 +08:00
|
|
|
(vt (AMDGPUclamp (VOP3Mods vt:$src0, i32:$src0_modifiers))),
|
2017-02-22 07:35:48 +08:00
|
|
|
(inst i32:$src0_modifiers, vt:$src0,
|
2017-04-26 05:17:38 +08:00
|
|
|
i32:$src0_modifiers, vt:$src0, DSTCLAMP.ENABLE, DSTOMOD.NONE)
|
2013-02-21 23:17:27 +08:00
|
|
|
>;
|
|
|
|
|
2017-02-22 07:35:48 +08:00
|
|
|
def : ClampPat<V_MAX_F32_e64, f32>;
|
2017-02-23 07:53:37 +08:00
|
|
|
def : ClampPat<V_MAX_F64, f64>;
|
2017-02-22 07:35:48 +08:00
|
|
|
def : ClampPat<V_MAX_F16_e64, f16>;
|
|
|
|
|
2018-05-22 14:32:10 +08:00
|
|
|
let SubtargetPredicate = HasVOP3PInsts in {
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-08-30 09:20:17 +08:00
|
|
|
(v2f16 (AMDGPUclamp (VOP3PMods v2f16:$src0, i32:$src0_modifiers))),
|
|
|
|
(V_PK_MAX_F16 $src0_modifiers, $src0,
|
|
|
|
$src0_modifiers, $src0, DSTCLAMP.ENABLE)
|
|
|
|
>;
|
2018-05-22 14:32:10 +08:00
|
|
|
}
|
2017-08-30 09:20:17 +08:00
|
|
|
|
2014-02-04 15:12:38 +08:00
|
|
|
/********** ================================ **********/
|
|
|
|
/********** Floating point absolute/negative **********/
|
|
|
|
/********** ================================ **********/
|
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
// Prevent expanding both fneg and fabs.
|
2014-02-04 15:12:38 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-02-04 15:12:38 +08:00
|
|
|
(fneg (fabs f32:$src)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
|
2014-02-04 15:12:38 +08:00
|
|
|
>;
|
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
// FIXME: Should use S_OR_B32
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-08-16 02:42:18 +08:00
|
|
|
(fneg (fabs f64:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
|
|
|
|
(V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
|
2014-11-03 07:46:54 +08:00
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2014-05-11 03:18:25 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-08-16 02:42:22 +08:00
|
|
|
(fabs f32:$src),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_AND_B32 $src, (S_MOV_B32 (i32 0x7fffffff)))
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2013-02-21 23:17:27 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-08-16 02:42:22 +08:00
|
|
|
(fneg f32:$src),
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-08-16 02:42:22 +08:00
|
|
|
(fabs f64:$src),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
|
|
|
|
(V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
|
2014-11-03 07:46:54 +08:00
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2014-05-11 03:18:25 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-08-16 02:42:22 +08:00
|
|
|
(fneg f64:$src),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
|
|
|
|
(i32 (V_MOV_B32_e32 (i32 0x80000000)))),
|
2014-11-03 07:46:54 +08:00
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2013-02-21 23:17:27 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-14 03:49:25 +08:00
|
|
|
(fcopysign f16:$src0, f16:$src1),
|
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-14 03:49:25 +08:00
|
|
|
(fcopysign f32:$src0, f16:$src1),
|
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
|
|
|
|
(V_LSHLREV_B32_e64 (i32 16), $src1))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-14 03:49:25 +08:00
|
|
|
(fcopysign f64:$src0, f16:$src1),
|
|
|
|
(REG_SEQUENCE SReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
|
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
|
|
|
|
(V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-14 03:49:25 +08:00
|
|
|
(fcopysign f16:$src0, f32:$src1),
|
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
|
|
|
|
(V_LSHRREV_B32_e64 (i32 16), $src1))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-14 03:49:25 +08:00
|
|
|
(fcopysign f16:$src0, f64:$src1),
|
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
|
|
|
|
(V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-15 10:25:28 +08:00
|
|
|
(fneg f16:$src),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_XOR_B32 $src, (S_MOV_B32 (i32 0x00008000)))
|
2016-11-15 10:25:28 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-15 10:25:28 +08:00
|
|
|
(fabs f16:$src),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_AND_B32 $src, (S_MOV_B32 (i32 0x00007fff)))
|
2016-11-15 10:25:28 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-15 10:25:28 +08:00
|
|
|
(fneg (fabs f16:$src)),
|
|
|
|
(S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(fneg v2f16:$src),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_XOR_B32 $src, (S_MOV_B32 (i32 0x80008000)))
|
2017-02-28 06:15:25 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(fabs v2f16:$src),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_AND_B32 $src, (S_MOV_B32 (i32 0x7fff7fff)))
|
2017-02-28 06:15:25 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
// This is really (fneg (fabs v2f16:$src))
|
|
|
|
//
|
|
|
|
// fabs is not reported as free because there is modifier for it in
|
|
|
|
// VOP3P instructions, so it is turned into the bit op.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(fneg (v2f16 (bitconvert (and_oneuse i32:$src, 0x7fff7fff)))),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_OR_B32 $src, (S_MOV_B32 (i32 0x80008000))) // Set sign bit
|
2017-02-28 06:15:25 +08:00
|
|
|
>;
|
|
|
|
|
2018-05-23 04:13:34 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(fneg (v2f16 (fabs v2f16:$src))),
|
2018-06-07 18:15:20 +08:00
|
|
|
(S_OR_B32 $src, (S_MOV_B32 (i32 0x80008000))) // Set sign bit
|
2018-05-23 04:13:34 +08:00
|
|
|
>;
|
|
|
|
|
2013-02-16 19:28:22 +08:00
|
|
|
/********** ================== **********/
|
|
|
|
/********** Immediate Patterns **********/
|
|
|
|
/********** ================== **********/
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-02 06:55:07 +08:00
|
|
|
(VGPRImm<(i32 imm)>:$imm),
|
|
|
|
(V_MOV_B32_e32 imm:$imm)
|
2013-08-15 07:24:24 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-02 06:55:07 +08:00
|
|
|
(VGPRImm<(f32 fpimm)>:$imm),
|
|
|
|
(V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
|
2013-08-15 07:24:24 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-02-16 19:28:22 +08:00
|
|
|
(i32 imm:$imm),
|
2016-11-02 06:55:07 +08:00
|
|
|
(S_MOV_B32 imm:$imm)
|
2013-02-16 19:28:22 +08:00
|
|
|
>;
|
|
|
|
|
2016-12-09 04:14:46 +08:00
|
|
|
// FIXME: Workaround for ordering issue with peephole optimizer where
|
|
|
|
// a register class copy interferes with immediate folding. Should
|
|
|
|
// use s_mov_b32, which can be shrunk to s_movk_i32
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-12-09 04:14:46 +08:00
|
|
|
(VGPRImm<(f16 fpimm)>:$imm),
|
|
|
|
(V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-02-16 19:28:22 +08:00
|
|
|
(f32 fpimm:$imm),
|
2016-11-02 06:55:07 +08:00
|
|
|
(S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
|
2013-02-16 19:28:22 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-12-09 04:14:46 +08:00
|
|
|
(f16 fpimm:$imm),
|
|
|
|
(S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-09-18 00:09:55 +08:00
|
|
|
(i32 frameindex:$fi),
|
|
|
|
(V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-02-16 19:28:36 +08:00
|
|
|
(i64 InlineImm<i64>:$imm),
|
|
|
|
(S_MOV_B64 InlineImm<i64>:$imm)
|
|
|
|
>;
|
|
|
|
|
2014-12-03 13:22:35 +08:00
|
|
|
// XXX - Should this use a s_cmp to set SCC?
|
|
|
|
|
|
|
|
// Set to sign-extended 64-bit value (true = -1, false = 0)
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-12-03 13:22:35 +08:00
|
|
|
(i1 imm:$imm),
|
|
|
|
(S_MOV_B64 (i64 (as_i64imm $imm)))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-12-18 05:04:08 +08:00
|
|
|
(f64 InlineFPImm<f64>:$imm),
|
2015-01-14 06:59:41 +08:00
|
|
|
(S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
|
2014-12-18 05:04:08 +08:00
|
|
|
>;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
/********** ================== **********/
|
|
|
|
/********** Intrinsic Patterns **********/
|
|
|
|
/********** ================== **********/
|
|
|
|
|
2013-05-02 23:30:12 +08:00
|
|
|
def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-05-02 23:30:12 +08:00
|
|
|
(i32 (sext i1:$src0)),
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
|
2013-02-22 19:22:58 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
class Ext32Pat <SDNode ext> : GCNPat <
|
2014-02-14 07:34:13 +08:00
|
|
|
(i32 (ext i1:$src0)),
|
2014-02-05 17:48:05 +08:00
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
|
|
|
|
>;
|
|
|
|
|
2014-02-14 07:34:13 +08:00
|
|
|
def : Ext32Pat <zext>;
|
|
|
|
def : Ext32Pat <anyext>;
|
|
|
|
|
2013-04-11 01:17:56 +08:00
|
|
|
// The multiplication scales from [0,1] to the unsigned integer range
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-04-11 01:17:56 +08:00
|
|
|
(AMDGPUurecip i32:$src0),
|
|
|
|
(V_CVT_U32_F32_e32
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
|
2013-04-11 01:17:56 +08:00
|
|
|
(V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
|
|
|
|
>;
|
|
|
|
|
2014-05-17 04:56:44 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VOP3 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-08-16 21:51:56 +08:00
|
|
|
def : IMad24Pat<V_MAD_I32_I24, 1>;
|
|
|
|
def : UMad24Pat<V_MAD_U32_U24, 1>;
|
2014-05-23 02:00:15 +08:00
|
|
|
|
2018-02-07 08:21:34 +08:00
|
|
|
// FIXME: This should only be done for VALU inputs
|
2014-11-03 07:46:54 +08:00
|
|
|
defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
|
2014-05-17 04:56:44 +08:00
|
|
|
def : ROTRPattern <V_ALIGNBIT_B32>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))),
|
2017-06-28 03:10:47 +08:00
|
|
|
(V_ALIGNBIT_B32 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)),
|
|
|
|
(i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
|
2017-06-28 03:10:47 +08:00
|
|
|
(V_ALIGNBIT_B32 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)),
|
2017-06-28 10:52:39 +08:00
|
|
|
(i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>;
|
2017-06-28 03:10:47 +08:00
|
|
|
|
2013-03-18 19:34:16 +08:00
|
|
|
/********** ====================== **********/
|
2016-11-18 19:04:02 +08:00
|
|
|
/********** Indirect addressing **********/
|
2013-03-18 19:34:16 +08:00
|
|
|
/********** ====================== **********/
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
|
2016-07-09 09:13:56 +08:00
|
|
|
// Extract with offset
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2016-07-12 16:12:16 +08:00
|
|
|
(eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
|
2016-07-09 09:13:56 +08:00
|
|
|
(!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
|
2013-03-18 19:34:16 +08:00
|
|
|
>;
|
|
|
|
|
2016-07-09 09:13:56 +08:00
|
|
|
// Insert with offset
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2016-07-12 16:12:16 +08:00
|
|
|
(insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
|
2016-07-09 09:13:56 +08:00
|
|
|
(!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
|
2013-03-18 19:34:16 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
|
2014-02-02 08:05:35 +08:00
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-24 22:59:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SAD Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-08-24 22:59:47 +08:00
|
|
|
(add (sub_oneuse (umax i32:$src0, i32:$src1),
|
|
|
|
(umin i32:$src0, i32:$src1)),
|
|
|
|
i32:$src2),
|
2017-08-16 21:51:56 +08:00
|
|
|
(V_SAD_U32 $src0, $src1, $src2, (i1 0))
|
2016-08-24 22:59:47 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-08-24 22:59:47 +08:00
|
|
|
(add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
|
|
|
|
(sub i32:$src0, i32:$src1),
|
|
|
|
(sub i32:$src1, i32:$src0)),
|
|
|
|
i32:$src2),
|
2017-08-16 21:51:56 +08:00
|
|
|
(V_SAD_U32 $src0, $src1, $src2, (i1 0))
|
2016-08-24 22:59:47 +08:00
|
|
|
>;
|
|
|
|
|
2014-04-22 11:49:30 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Conversion Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<(i32 (sext_inreg i32:$src, i1)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
|
|
|
|
// Handle sext_inreg in i64
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-04-22 11:49:30 +08:00
|
|
|
(i64 (sext_inreg i64:$src, i1)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-01-12 06:35:22 +08:00
|
|
|
(i16 (sext_inreg i16:$src, i1)),
|
|
|
|
(S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-11-11 00:02:37 +08:00
|
|
|
(i16 (sext_inreg i16:$src, i8)),
|
|
|
|
(S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-04-22 11:49:30 +08:00
|
|
|
(i64 (sext_inreg i64:$src, i8)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-04-22 11:49:30 +08:00
|
|
|
(i64 (sext_inreg i64:$src, i16)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
|
2014-11-15 02:18:16 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-11-15 02:18:16 +08:00
|
|
|
(i64 (sext_inreg i64:$src, i32)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-07-27 07:06:33 +08:00
|
|
|
(i64 (zext i32:$src)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2016-07-27 07:06:33 +08:00
|
|
|
(i64 (anyext i32:$src)),
|
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
class ZExt_i64_i1_Pat <SDNode ext> : GCNPat <
|
2014-06-11 02:54:59 +08:00
|
|
|
(i64 (ext i1:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
|
2016-11-11 00:02:37 +08:00
|
|
|
(S_MOV_B32 (i32 0)), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
|
|
|
|
def : ZExt_i64_i1_Pat<zext>;
|
|
|
|
def : ZExt_i64_i1_Pat<anyext>;
|
|
|
|
|
2016-02-13 07:45:29 +08:00
|
|
|
// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
|
|
|
|
// REG_SEQUENCE patterns don't support instructions with multiple outputs.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-06-11 02:54:59 +08:00
|
|
|
(i64 (sext i32:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0,
|
2016-11-11 00:02:37 +08:00
|
|
|
(i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-06-11 02:54:59 +08:00
|
|
|
(i64 (sext i1:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : GCNPat <
|
2016-07-23 01:01:21 +08:00
|
|
|
(i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
|
2017-07-18 22:23:26 +08:00
|
|
|
(i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE))
|
2016-07-23 01:01:21 +08:00
|
|
|
>;
|
|
|
|
|
2016-11-11 00:02:37 +08:00
|
|
|
def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
|
2016-07-23 01:01:21 +08:00
|
|
|
|
2014-12-03 13:22:35 +08:00
|
|
|
// If we need to perform a logical operation on i1 values, we need to
|
|
|
|
// use vector comparisons since there is only one SCC register. Vector
|
2016-11-18 19:04:02 +08:00
|
|
|
// comparisons still write to a pair of SGPRs, so treat these as
|
2014-12-03 13:22:35 +08:00
|
|
|
// 64-bit comparisons. When legalizing SGPR copies, instructions
|
|
|
|
// resulting in the copies from SCC to these instructions will be
|
|
|
|
// moved to the VALU.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-12-03 13:22:35 +08:00
|
|
|
(i1 (and i1:$src0, i1:$src1)),
|
|
|
|
(S_AND_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-12-03 13:22:35 +08:00
|
|
|
(i1 (or i1:$src0, i1:$src1)),
|
|
|
|
(S_OR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-12-03 13:22:35 +08:00
|
|
|
(i1 (xor i1:$src0, i1:$src1)),
|
|
|
|
(S_XOR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2018-09-26 05:21:18 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(i1 (add i1:$src0, i1:$src1)),
|
|
|
|
(S_XOR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2018-10-20 05:09:21 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(i1 (sub i1:$src0, i1:$src1)),
|
|
|
|
(S_XOR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2018-09-26 05:21:18 +08:00
|
|
|
let AddedComplexity = 1 in {
|
|
|
|
def : GCNPat <
|
|
|
|
(i1 (add i1:$src0, (i1 -1))),
|
|
|
|
(S_NOT_B64 $src0)
|
|
|
|
>;
|
2018-10-20 05:09:21 +08:00
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(i1 (sub i1:$src0, (i1 -1))),
|
|
|
|
(S_NOT_B64 $src0)
|
|
|
|
>;
|
2018-09-26 05:21:18 +08:00
|
|
|
}
|
|
|
|
|
2018-09-20 00:32:12 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(f16 (sint_to_fp i1:$src)),
|
|
|
|
(V_CVT_F16_F32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(f16 (uint_to_fp i1:$src)),
|
|
|
|
(V_CVT_F16_F32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-05-31 14:47:42 +08:00
|
|
|
(f32 (sint_to_fp i1:$src)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
|
2014-05-31 14:47:42 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-05-31 14:47:42 +08:00
|
|
|
(f32 (uint_to_fp i1:$src)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
|
2014-05-31 14:47:42 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-05-31 14:47:42 +08:00
|
|
|
(f64 (sint_to_fp i1:$src)),
|
2014-12-03 13:22:35 +08:00
|
|
|
(V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
|
2014-05-31 14:47:42 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-05-31 14:47:42 +08:00
|
|
|
(f64 (uint_to_fp i1:$src)),
|
|
|
|
(V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
|
|
|
|
>;
|
|
|
|
|
2013-11-14 07:36:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2013-10-23 08:44:19 +08:00
|
|
|
// Miscellaneous Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-04-01 03:53:03 +08:00
|
|
|
(i32 (AMDGPUfp16_zext f16:$src)),
|
|
|
|
(COPY $src)
|
|
|
|
>;
|
|
|
|
|
2013-10-23 08:44:19 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2013-11-14 07:36:50 +08:00
|
|
|
(i32 (trunc i64:$a)),
|
|
|
|
(EXTRACT_SUBREG $a, sub0)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-01-28 11:01:16 +08:00
|
|
|
(i1 (trunc i32:$a)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
|
2014-01-28 11:01:16 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-24 00:12:21 +08:00
|
|
|
(i1 (trunc i16:$a)),
|
|
|
|
(V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2015-02-05 14:05:13 +08:00
|
|
|
(i1 (trunc i64:$a)),
|
2016-09-30 09:50:20 +08:00
|
|
|
(V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
|
2016-11-11 00:02:37 +08:00
|
|
|
(i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
|
2015-02-05 14:05:13 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2014-10-22 00:25:08 +08:00
|
|
|
(i32 (bswap i32:$a)),
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
|
|
|
|
(V_ALIGNBIT_B32 $a, $a, (i32 24)),
|
|
|
|
(V_ALIGNBIT_B32 $a, $a, (i32 8)))
|
2014-10-22 00:25:08 +08:00
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [NoFP16Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-02-01 01:28:40 +08:00
|
|
|
(fcanonicalize (f16 (VOP3Mods f16:$src, i32:$src_mods))),
|
|
|
|
(V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), $src_mods, $src, 0, 0)
|
2016-12-22 11:05:37 +08:00
|
|
|
>;
|
2017-09-07 06:27:29 +08:00
|
|
|
|
2018-07-30 20:16:58 +08:00
|
|
|
def : GCNPat<
|
|
|
|
(fcanonicalize (f16 (fneg (VOP3Mods f16:$src, i32:$src_mods)))),
|
|
|
|
(V_MUL_F16_e64 0, (i32 CONST.FP16_NEG_ONE), $src_mods, $src, 0, 0)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2017-09-07 06:27:29 +08:00
|
|
|
(fcanonicalize (v2f16 (VOP3PMods v2f16:$src, i32:$src_mods))),
|
|
|
|
(V_PK_MUL_F16 0, (i32 CONST.V2FP16_ONE), $src_mods, $src, DSTCLAMP.NONE)
|
|
|
|
>;
|
2017-08-30 11:03:38 +08:00
|
|
|
}
|
2016-12-22 11:05:37 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [FP16Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-08-30 11:03:38 +08:00
|
|
|
(fcanonicalize (f16 (VOP3Mods f16:$src, i32:$src_mods))),
|
|
|
|
(V_MAX_F16_e64 $src_mods, $src, $src_mods, $src, 0, 0)
|
|
|
|
>;
|
2017-09-07 06:27:29 +08:00
|
|
|
|
2018-05-22 14:32:10 +08:00
|
|
|
let SubtargetPredicate = HasVOP3PInsts in {
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2017-09-07 06:27:29 +08:00
|
|
|
(fcanonicalize (v2f16 (VOP3PMods v2f16:$src, i32:$src_mods))),
|
|
|
|
(V_PK_MAX_F16 $src_mods, $src, $src_mods, $src, DSTCLAMP.NONE)
|
|
|
|
>;
|
2017-08-30 11:03:38 +08:00
|
|
|
}
|
2018-05-22 14:32:10 +08:00
|
|
|
}
|
2017-08-30 11:03:38 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [NoFP32Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-02-01 01:28:40 +08:00
|
|
|
(fcanonicalize (f32 (VOP3Mods f32:$src, i32:$src_mods))),
|
|
|
|
(V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), $src_mods, $src, 0, 0)
|
2016-04-14 09:42:16 +08:00
|
|
|
>;
|
2018-07-30 20:16:58 +08:00
|
|
|
|
|
|
|
def : GCNPat<
|
|
|
|
(fcanonicalize (f32 (fneg (VOP3Mods f32:$src, i32:$src_mods)))),
|
|
|
|
(V_MUL_F32_e64 0, (i32 CONST.FP32_NEG_ONE), $src_mods, $src, 0, 0)
|
|
|
|
>;
|
2017-08-30 11:03:38 +08:00
|
|
|
}
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [FP32Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-08-30 11:03:38 +08:00
|
|
|
(fcanonicalize (f32 (VOP3Mods f32:$src, i32:$src_mods))),
|
|
|
|
(V_MAX_F32_e64 $src_mods, $src, $src_mods, $src, 0, 0)
|
|
|
|
>;
|
|
|
|
}
|
2016-04-14 09:42:16 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [NoFP64Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-02-01 01:28:40 +08:00
|
|
|
(fcanonicalize (f64 (VOP3Mods f64:$src, i32:$src_mods))),
|
|
|
|
(V_MUL_F64 0, CONST.FP64_ONE, $src_mods, $src, 0, 0)
|
2016-04-14 09:42:16 +08:00
|
|
|
>;
|
2017-08-30 11:03:38 +08:00
|
|
|
}
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let OtherPredicates = [FP64Denormals] in {
|
|
|
|
def : GCNPat<
|
2017-08-30 11:03:38 +08:00
|
|
|
(fcanonicalize (f64 (VOP3Mods f64:$src, i32:$src_mods))),
|
|
|
|
(V_MAX_F64 $src_mods, $src, $src_mods, $src, 0, 0)
|
|
|
|
>;
|
|
|
|
}
|
2016-04-14 09:42:16 +08:00
|
|
|
|
2018-05-01 03:08:16 +08:00
|
|
|
let OtherPredicates = [HasDLInsts] in {
|
|
|
|
def : GCNPat <
|
|
|
|
(fma (f32 (VOP3Mods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)),
|
|
|
|
(f32 (VOP3Mods f32:$src1, i32:$src1_modifiers)),
|
|
|
|
(f32 (VOP3NoMods f32:$src2))),
|
|
|
|
(V_FMAC_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1,
|
|
|
|
SRCMODS.NONE, $src2, $clamp, $omod)
|
|
|
|
>;
|
|
|
|
} // End OtherPredicates = [HasDLInsts]
|
|
|
|
|
2017-02-28 06:15:25 +08:00
|
|
|
|
2017-01-17 15:26:53 +08:00
|
|
|
// Allow integer inputs
|
2017-10-03 08:06:41 +08:00
|
|
|
class ExpPattern<SDPatternOperator node, ValueType vt, Instruction Inst> : GCNPat<
|
2017-01-17 15:26:53 +08:00
|
|
|
(node (i8 timm:$tgt), (i8 timm:$en), vt:$src0, vt:$src1, vt:$src2, vt:$src3, (i1 timm:$compr), (i1 timm:$vm)),
|
|
|
|
(Inst i8:$tgt, vt:$src0, vt:$src1, vt:$src2, vt:$src3, i1:$vm, i1:$compr, i8:$en)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : ExpPattern<AMDGPUexport, i32, EXP>;
|
|
|
|
def : ExpPattern<AMDGPUexport_done, i32, EXP_DONE>;
|
|
|
|
|
2018-08-12 16:42:46 +08:00
|
|
|
// COPY is workaround tablegen bug from multiple outputs
|
2017-09-01 05:17:22 +08:00
|
|
|
// from S_LSHL_B32's multiple outputs from implicit scc def.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-09-01 05:17:22 +08:00
|
|
|
(v2i16 (build_vector (i16 0), i16:$src1)),
|
2018-08-12 16:42:46 +08:00
|
|
|
(v2i16 (COPY (S_LSHL_B32 i16:$src1, (i16 16))))
|
2017-09-01 05:17:22 +08:00
|
|
|
>;
|
|
|
|
|
2018-08-12 16:42:46 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(v2i16 (build_vector i16:$src0, (i16 undef))),
|
|
|
|
(v2i16 (COPY $src0))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(v2f16 (build_vector f16:$src0, (f16 undef))),
|
|
|
|
(v2f16 (COPY $src0))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(v2i16 (build_vector (i16 undef), i16:$src1)),
|
|
|
|
(v2i16 (COPY (S_LSHL_B32 $src1, (i32 16))))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(v2f16 (build_vector (f16 undef), f16:$src1)),
|
|
|
|
(v2f16 (COPY (S_LSHL_B32 $src1, (i32 16))))
|
|
|
|
>;
|
2018-05-22 14:32:10 +08:00
|
|
|
|
|
|
|
let SubtargetPredicate = HasVOP3PInsts in {
|
|
|
|
def : GCNPat <
|
|
|
|
(v2i16 (build_vector i16:$src0, i16:$src1)),
|
|
|
|
(v2i16 (S_PACK_LL_B32_B16 $src0, $src1))
|
|
|
|
>;
|
|
|
|
|
2017-02-28 06:15:25 +08:00
|
|
|
// With multiple uses of the shift, this will duplicate the shift and
|
|
|
|
// increase register pressure.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(v2i16 (build_vector i16:$src0, (i16 (trunc (srl_oneuse i32:$src1, (i32 16)))))),
|
|
|
|
(v2i16 (S_PACK_LH_B32_B16 i16:$src0, i32:$src1))
|
|
|
|
>;
|
|
|
|
|
2018-05-22 14:32:10 +08:00
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(v2i16 (build_vector (i16 (trunc (srl_oneuse i32:$src0, (i32 16)))),
|
|
|
|
(i16 (trunc (srl_oneuse i32:$src1, (i32 16)))))),
|
|
|
|
(v2i16 (S_PACK_HH_B32_B16 $src0, $src1))
|
|
|
|
>;
|
|
|
|
|
|
|
|
// TODO: Should source modifiers be matched to v_pack_b32_f16?
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2017-02-28 06:15:25 +08:00
|
|
|
(v2f16 (build_vector f16:$src0, f16:$src1)),
|
|
|
|
(v2f16 (S_PACK_LL_B32_B16 $src0, $src1))
|
|
|
|
>;
|
|
|
|
|
2018-05-22 14:32:10 +08:00
|
|
|
} // End SubtargetPredicate = HasVOP3PInsts
|
|
|
|
|
|
|
|
|
2018-11-20 03:58:13 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(v2f16 (scalar_to_vector f16:$src0)),
|
|
|
|
(COPY $src0)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(v2i16 (scalar_to_vector i16:$src0)),
|
|
|
|
(COPY $src0)
|
|
|
|
>;
|
2017-02-28 06:15:25 +08:00
|
|
|
|
2018-06-21 03:45:48 +08:00
|
|
|
def : GCNPat <
|
|
|
|
(v4i16 (scalar_to_vector i16:$src0)),
|
|
|
|
(INSERT_SUBREG (IMPLICIT_DEF), $src0, sub0)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(v4f16 (scalar_to_vector f16:$src0)),
|
|
|
|
(INSERT_SUBREG (IMPLICIT_DEF), $src0, sub0)
|
|
|
|
>;
|
|
|
|
|
2015-03-24 21:40:08 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Fract Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
let SubtargetPredicate = isSI in {
|
2015-03-24 21:40:15 +08:00
|
|
|
|
|
|
|
// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
|
|
|
|
// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
|
|
|
|
// way to implement it is using V_FRACT_F64.
|
|
|
|
// The workaround for the V_FRACT bug is:
|
|
|
|
// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
|
|
|
|
|
|
|
|
// Convert floor(x) to (x - fract(x))
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat <
|
2015-03-24 21:40:15 +08:00
|
|
|
(f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
|
|
|
|
(V_ADD_F64
|
|
|
|
$mods,
|
|
|
|
$x,
|
|
|
|
SRCMODS.NEG,
|
|
|
|
(V_CNDMASK_B64_PSEUDO
|
|
|
|
(V_MIN_F64
|
|
|
|
SRCMODS.NONE,
|
|
|
|
(V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
|
|
|
|
SRCMODS.NONE,
|
|
|
|
(V_MOV_B64_PSEUDO 0x3fefffffffffffff),
|
|
|
|
DSTCLAMP.NONE, DSTOMOD.NONE),
|
2015-07-27 19:37:42 +08:00
|
|
|
$x,
|
2016-11-11 00:02:37 +08:00
|
|
|
(V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
|
2015-03-24 21:40:15 +08:00
|
|
|
DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
} // End SubtargetPredicates = isSI
|
2015-03-24 21:40:15 +08:00
|
|
|
|
2013-05-04 01:21:20 +08:00
|
|
|
//============================================================================//
|
|
|
|
// Miscellaneous Optimization Patterns
|
|
|
|
//============================================================================//
|
|
|
|
|
2017-02-28 06:40:39 +08:00
|
|
|
// Undo sub x, c -> add x, -c canonicalization since c is more likely
|
|
|
|
// an inline immediate than -c.
|
|
|
|
// TODO: Also do for 64-bit.
|
2017-10-03 08:06:41 +08:00
|
|
|
def : GCNPat<
|
2017-02-28 06:40:39 +08:00
|
|
|
(add i32:$src0, (i32 NegSubInlineConst32:$src1)),
|
|
|
|
(S_SUB_I32 $src0, NegSubInlineConst32:$src1)
|
|
|
|
>;
|
|
|
|
|
2017-10-03 08:06:41 +08:00
|
|
|
|
|
|
|
multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
|
|
|
|
def : GCNPat <
|
|
|
|
(vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
|
|
|
|
(BFM $a, $b)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : GCNPat <
|
|
|
|
(vt (add (vt (shl 1, vt:$a)), -1)),
|
|
|
|
(BFM $a, (MOV (i32 0)))
|
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
|
|
|
|
// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
|
|
|
|
|
|
|
|
defm : BFEPattern <V_BFE_U32, V_BFE_I32, S_MOV_B32>;
|
2018-02-07 08:21:34 +08:00
|
|
|
defm : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64, SReg_64>;
|
2013-05-04 01:21:20 +08:00
|
|
|
|
2018-11-13 05:04:06 +08:00
|
|
|
defm : IntMed3Pat<V_MED3_I32, smin, smax, smin_oneuse, smax_oneuse>;
|
|
|
|
defm : IntMed3Pat<V_MED3_U32, umin, umax, umin_oneuse, umax_oneuse>;
|
2016-03-08 05:54:48 +08:00
|
|
|
|
2017-01-31 11:07:46 +08:00
|
|
|
// This matches 16 permutations of
|
|
|
|
// max(min(x, y), min(max(x, y), z))
|
|
|
|
class FPMed3Pat<ValueType vt,
|
2018-10-23 00:27:27 +08:00
|
|
|
//SDPatternOperator max, SDPatternOperator min,
|
2017-10-03 08:06:41 +08:00
|
|
|
Instruction med3Inst> : GCNPat<
|
2018-10-23 00:27:27 +08:00
|
|
|
(fmaxnum_like (fminnum_like_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
|
2017-01-31 11:07:46 +08:00
|
|
|
(VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
|
2018-10-23 00:27:27 +08:00
|
|
|
(fminnum_like_oneuse (fmaxnum_like_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
|
2017-01-31 11:07:46 +08:00
|
|
|
(VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
|
|
|
|
(vt (VOP3Mods_nnan vt:$src2, i32:$src2_mods)))),
|
|
|
|
(med3Inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
2017-07-21 21:54:11 +08:00
|
|
|
class FP16Med3Pat<ValueType vt,
|
2017-10-03 08:06:41 +08:00
|
|
|
Instruction med3Inst> : GCNPat<
|
2018-10-23 00:27:27 +08:00
|
|
|
(fmaxnum_like (fminnum_like_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
|
|
|
|
(VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
|
|
|
|
(fminnum_like_oneuse (fmaxnum_like_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
|
|
|
|
(VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
|
2017-07-21 21:54:11 +08:00
|
|
|
(vt (VOP3Mods_nnan vt:$src2, i32:$src2_mods)))),
|
|
|
|
(med3Inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, DSTCLAMP.NONE)
|
|
|
|
>;
|
|
|
|
|
2018-11-15 04:10:41 +08:00
|
|
|
multiclass Int16Med3Pat<Instruction med3Inst,
|
|
|
|
SDPatternOperator min,
|
2017-07-21 21:54:11 +08:00
|
|
|
SDPatternOperator max,
|
|
|
|
SDPatternOperator max_oneuse,
|
|
|
|
SDPatternOperator min_oneuse,
|
2018-11-15 04:10:41 +08:00
|
|
|
ValueType vt = i16> {
|
|
|
|
// This matches 16 permutations of
|
|
|
|
// max(min(x, y), min(max(x, y), z))
|
|
|
|
def : GCNPat <
|
2017-07-21 21:54:11 +08:00
|
|
|
(max (min_oneuse vt:$src0, vt:$src1),
|
|
|
|
(min_oneuse (max_oneuse vt:$src0, vt:$src1), vt:$src2)),
|
|
|
|
(med3Inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE)
|
|
|
|
>;
|
|
|
|
|
2018-11-15 04:10:41 +08:00
|
|
|
// This matches 16 permutations of
|
|
|
|
// min(max(a, b), max(min(a, b), c))
|
|
|
|
def : GCNPat <
|
|
|
|
(min (max_oneuse vt:$src0, vt:$src1),
|
|
|
|
(max_oneuse (min_oneuse vt:$src0, vt:$src1), vt:$src2)),
|
|
|
|
(med3Inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE)
|
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
2017-01-31 11:07:46 +08:00
|
|
|
def : FPMed3Pat<f32, V_MED3_F32>;
|
|
|
|
|
2019-02-23 07:21:06 +08:00
|
|
|
let OtherPredicates = [isGFX9] in {
|
2017-07-21 21:54:11 +08:00
|
|
|
def : FP16Med3Pat<f16, V_MED3_F16>;
|
2018-11-15 04:10:41 +08:00
|
|
|
defm : Int16Med3Pat<V_MED3_I16, smin, smax, smax_oneuse, smin_oneuse>;
|
|
|
|
defm : Int16Med3Pat<V_MED3_U16, umin, umax, umax_oneuse, umin_oneuse>;
|
2019-02-23 07:21:06 +08:00
|
|
|
} // End Predicates = [isGFX9]
|