2012-12-12 05:25:42 +08:00
|
|
|
//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// This file was originally auto-generated from a GPU register header file and
|
|
|
|
// all the instruction definitions were originally commented out. Instructions
|
|
|
|
// that are not yet supported remain commented out.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-01-31 07:24:40 +08:00
|
|
|
def isGCN : Predicate<"Subtarget->getGeneration() "
|
2016-06-24 14:30:11 +08:00
|
|
|
">= SISubtarget::SOUTHERN_ISLANDS">,
|
2015-04-08 09:09:26 +08:00
|
|
|
AssemblerPredicate<"FeatureGCN">;
|
2015-03-24 21:40:15 +08:00
|
|
|
def isSI : Predicate<"Subtarget->getGeneration() "
|
2016-06-24 14:30:11 +08:00
|
|
|
"== SISubtarget::SOUTHERN_ISLANDS">,
|
2015-09-25 03:52:21 +08:00
|
|
|
AssemblerPredicate<"FeatureSouthernIslands">;
|
|
|
|
|
2015-05-26 00:15:54 +08:00
|
|
|
def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
|
|
|
|
def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
|
|
|
|
|
2016-09-19 22:39:49 +08:00
|
|
|
include "VOPInstructions.td"
|
2016-08-30 23:20:31 +08:00
|
|
|
include "SOPInstructions.td"
|
2016-09-01 17:56:47 +08:00
|
|
|
include "SMInstructions.td"
|
2016-09-05 19:22:51 +08:00
|
|
|
include "FLATInstructions.td"
|
2016-09-10 21:09:16 +08:00
|
|
|
include "BUFInstructions.td"
|
2016-08-30 23:20:31 +08:00
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
let SubtargetPredicate = isGCN in {
|
2014-05-17 04:56:45 +08:00
|
|
|
|
2014-10-01 22:44:45 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// EXP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
defm EXP : EXP_m;
|
|
|
|
|
2014-04-23 00:33:57 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VINTRP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-09-10 09:23:28 +08:00
|
|
|
let Uses = [M0, EXEC] in {
|
2015-05-12 23:00:46 +08:00
|
|
|
|
2015-01-14 09:13:19 +08:00
|
|
|
// FIXME: Specify SchedRW for VINTRP insturctions.
|
2015-05-26 00:15:54 +08:00
|
|
|
|
|
|
|
multiclass V_INTERP_P1_F32_m : VINTRP_m <
|
|
|
|
0x00000000,
|
2015-01-08 04:59:25 +08:00
|
|
|
(outs VGPR_32:$dst),
|
2015-05-12 23:00:46 +08:00
|
|
|
(ins VGPR_32:$i, i32imm:$attr_chan, i32imm:$attr),
|
|
|
|
"v_interp_p1_f32 $dst, $i, $attr_chan, $attr, [m0]",
|
|
|
|
[(set f32:$dst, (AMDGPUinterp_p1 i32:$i, (i32 imm:$attr_chan),
|
2015-05-26 00:15:54 +08:00
|
|
|
(i32 imm:$attr)))]
|
|
|
|
>;
|
|
|
|
|
|
|
|
let OtherPredicates = [has32BankLDS] in {
|
|
|
|
|
|
|
|
defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
|
|
|
|
|
|
|
|
} // End OtherPredicates = [has32BankLDS]
|
|
|
|
|
2016-02-18 11:42:32 +08:00
|
|
|
let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1 in {
|
2015-05-26 00:15:54 +08:00
|
|
|
|
|
|
|
defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
|
|
|
|
|
2016-02-18 11:42:32 +08:00
|
|
|
} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-05-26 00:15:56 +08:00
|
|
|
let DisableEncoding = "$src0", Constraints = "$src0 = $dst" in {
|
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
defm V_INTERP_P2_F32 : VINTRP_m <
|
2015-05-26 00:15:50 +08:00
|
|
|
0x00000001,
|
2015-01-08 04:59:25 +08:00
|
|
|
(outs VGPR_32:$dst),
|
2015-05-12 23:00:46 +08:00
|
|
|
(ins VGPR_32:$src0, VGPR_32:$j, i32imm:$attr_chan, i32imm:$attr),
|
|
|
|
"v_interp_p2_f32 $dst, [$src0], $j, $attr_chan, $attr, [m0]",
|
|
|
|
[(set f32:$dst, (AMDGPUinterp_p2 f32:$src0, i32:$j, (i32 imm:$attr_chan),
|
2015-05-26 00:15:56 +08:00
|
|
|
(i32 imm:$attr)))]>;
|
|
|
|
|
|
|
|
} // End DisableEncoding = "$src0", Constraints = "$src0 = $dst"
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
defm V_INTERP_MOV_F32 : VINTRP_m <
|
2015-05-26 00:15:50 +08:00
|
|
|
0x00000002,
|
2015-01-08 04:59:25 +08:00
|
|
|
(outs VGPR_32:$dst),
|
2015-05-12 23:00:46 +08:00
|
|
|
(ins InterpSlot:$src0, i32imm:$attr_chan, i32imm:$attr),
|
|
|
|
"v_interp_mov_f32 $dst, $src0, $attr_chan, $attr, [m0]",
|
|
|
|
[(set f32:$dst, (AMDGPUinterp_mov (i32 imm:$src0), (i32 imm:$attr_chan),
|
|
|
|
(i32 imm:$attr)))]>;
|
|
|
|
|
2015-09-10 09:23:28 +08:00
|
|
|
} // End Uses = [M0, EXEC]
|
2014-04-23 00:33:57 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pseudo Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2016-07-12 08:23:17 +08:00
|
|
|
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-03-24 21:40:15 +08:00
|
|
|
// For use in patterns
|
2016-02-17 02:14:56 +08:00
|
|
|
def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
(ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
|
2016-07-12 08:23:17 +08:00
|
|
|
let isPseudo = 1;
|
|
|
|
let isCodeGenOnly = 1;
|
2016-08-27 09:00:37 +08:00
|
|
|
let usesCustomInserter = 1;
|
2016-07-12 08:23:17 +08:00
|
|
|
}
|
2015-03-24 21:40:15 +08:00
|
|
|
|
2015-01-08 04:27:25 +08:00
|
|
|
// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
|
|
|
|
// pass to enable folding of inline immediates.
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
def V_MOV_B64_PSEUDO : PseudoInstSI <(outs VReg_64:$vdst), (ins VSrc_b64:$src0)> {
|
2016-07-12 08:23:17 +08:00
|
|
|
let VALU = 1;
|
2014-09-24 09:33:24 +08:00
|
|
|
}
|
2016-07-12 08:23:17 +08:00
|
|
|
} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
|
2014-09-24 09:33:24 +08:00
|
|
|
|
2016-03-16 01:28:44 +08:00
|
|
|
let usesCustomInserter = 1, SALU = 1 in {
|
2016-07-12 08:23:17 +08:00
|
|
|
def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
|
2016-03-16 01:28:44 +08:00
|
|
|
[(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
|
|
|
|
} // End let usesCustomInserter = 1, SALU = 1
|
|
|
|
|
2013-10-12 05:03:36 +08:00
|
|
|
// SI pseudo instructions. These are used by the CFG structurizer pass
|
2012-12-12 05:25:42 +08:00
|
|
|
// and should be lowered to ISA instructions prior to codegen.
|
|
|
|
|
2016-06-23 04:15:28 +08:00
|
|
|
// Dummy terminator instruction to use after control flow instructions
|
|
|
|
// replaced with exec mask operations.
|
2016-07-12 08:23:17 +08:00
|
|
|
def SI_MASK_BRANCH : PseudoInstSI <
|
2016-08-27 08:42:21 +08:00
|
|
|
(outs), (ins brtarget:$target)> {
|
2016-08-11 03:11:42 +08:00
|
|
|
let isBranch = 0;
|
2016-06-23 04:15:28 +08:00
|
|
|
let isTerminator = 1;
|
2016-08-11 03:11:42 +08:00
|
|
|
let isBarrier = 0;
|
2016-06-23 04:15:28 +08:00
|
|
|
let SALU = 1;
|
2016-08-23 03:33:16 +08:00
|
|
|
let Uses = [EXEC];
|
2016-06-23 04:15:28 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
let isTerminator = 1 in {
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_IF: CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
|
2016-08-27 11:00:51 +08:00
|
|
|
[(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
|
2016-07-12 08:23:17 +08:00
|
|
|
let Constraints = "";
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 8;
|
2016-09-17 06:11:18 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 1;
|
|
|
|
let hasSideEffects = 1;
|
2016-07-12 08:23:17 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_ELSE : CFPseudoInstSI <
|
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
|
2012-12-20 06:10:31 +08:00
|
|
|
let Constraints = "$src = $dst";
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 12;
|
2016-09-17 06:11:18 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 1;
|
|
|
|
let hasSideEffects = 1;
|
2012-12-20 06:10:31 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_LOOP : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs), (ins SReg_64:$saved, brtarget:$target),
|
2016-08-27 11:00:51 +08:00
|
|
|
[(int_amdgcn_loop i64:$saved, bb:$target)], 1, 1> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 8;
|
2016-08-27 11:00:51 +08:00
|
|
|
let isBranch = 1;
|
2016-09-17 06:11:18 +08:00
|
|
|
let hasSideEffects = 1;
|
|
|
|
let mayLoad = 1;
|
|
|
|
let mayStore = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
} // End isBranch = 1, isTerminator = 1
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_END_CF : CFPseudoInstSI <
|
|
|
|
(outs), (ins SReg_64:$saved),
|
|
|
|
[(int_amdgcn_end_cf i64:$saved)], 1, 1> {
|
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
|
|
|
let mayLoad = 1;
|
|
|
|
let mayStore = 1;
|
|
|
|
let hasSideEffects = 1;
|
2016-08-27 11:00:51 +08:00
|
|
|
}
|
2016-07-12 08:23:17 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_BREAK : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$src),
|
2016-08-27 11:00:51 +08:00
|
|
|
[(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2016-07-10 01:18:39 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_IF_BREAK : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
|
2016-08-27 11:00:51 +08:00
|
|
|
[(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
|
2016-08-11 03:11:51 +08:00
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_ELSE_BREAK : CFPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
|
2016-08-11 03:11:51 +08:00
|
|
|
[(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
|
|
|
|
let Size = 4;
|
2016-09-17 06:11:18 +08:00
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
2016-08-11 03:11:51 +08:00
|
|
|
}
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2015-05-01 11:44:09 +08:00
|
|
|
let Uses = [EXEC], Defs = [EXEC,VCC] in {
|
2016-07-12 08:23:17 +08:00
|
|
|
def SI_KILL : PseudoInstSI <
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
(outs), (ins VSrc_b32:$src),
|
2016-07-20 00:27:56 +08:00
|
|
|
[(AMDGPUkill i32:$src)]> {
|
2016-07-13 05:41:32 +08:00
|
|
|
let isConvergent = 1;
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_KILL_TERMINATOR : SPseudoInstSI <
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
(outs), (ins VSrc_b32:$src)> {
|
2016-07-13 05:41:32 +08:00
|
|
|
let isTerminator = 1;
|
|
|
|
}
|
|
|
|
|
2015-05-01 11:44:09 +08:00
|
|
|
} // End Uses = [EXEC], Defs = [EXEC,VCC]
|
2013-01-19 05:15:50 +08:00
|
|
|
|
2012-12-20 06:10:31 +08:00
|
|
|
|
2016-07-12 08:23:17 +08:00
|
|
|
def SI_PS_LIVE : PseudoInstSI <
|
|
|
|
(outs SReg_64:$dst), (ins),
|
2016-06-23 04:15:28 +08:00
|
|
|
[(set i1:$dst, (int_amdgcn_ps_live))]> {
|
|
|
|
let SALU = 1;
|
|
|
|
}
|
2016-04-22 12:04:08 +08:00
|
|
|
|
2016-04-15 05:58:15 +08:00
|
|
|
// Used as an isel pseudo to directly emit initialization with an
|
|
|
|
// s_mov_b32 rather than a copy of another initialized
|
|
|
|
// register. MachineCSE skips copies, and we don't want to have to
|
|
|
|
// fold operands before it runs.
|
AMDGPU] Assembler: better support for immediate literals in assembler.
Summary:
Prevously assembler parsed all literals as either 32-bit integers or 32-bit floating-point values. Because of this we couldn't support f64 literals.
E.g. in instruction "v_fract_f64 v[0:1], 0.5", literal 0.5 was encoded as 32-bit literal 0x3f000000, which is incorrect and will be interpreted as 3.0517578125E-5 instead of 0.5. Correct encoding is inline constant 240 (optimal) or 32-bit literal 0x3FE00000 at least.
With this change the way immediate literals are parsed is changed. All literals are always parsed as 64-bit values either integer or floating-point. Then we convert parsed literals to correct form based on information about type of operand parsed (was literal floating or binary) and type of expected instruction operands (is this f32/64 or b32/64 instruction).
Here are rules how we convert literals:
- We parsed fp literal:
- Instruction expects 64-bit operand:
- If parsed literal is inlinable (e.g. v_fract_f64_e32 v[0:1], 0.5)
- then we do nothing this literal
- Else if literal is not-inlinable but instruction requires to inline it (e.g. this is e64 encoding, v_fract_f64_e64 v[0:1], 1.5)
- report error
- Else literal is not-inlinable but we can encode it as additional 32-bit literal constant
- If instruction expect fp operand type (f64)
- Check if low 32 bits of literal are zeroes (e.g. v_fract_f64 v[0:1], 1.5)
- If so then do nothing
- Else (e.g. v_fract_f64 v[0:1], 3.1415)
- report warning that low 32 bits will be set to zeroes and precision will be lost
- set low 32 bits of literal to zeroes
- Instruction expects integer operand type (e.g. s_mov_b64_e32 s[0:1], 1.5)
- report error as it is unclear how to encode this literal
- Instruction expects 32-bit operand:
- Convert parsed 64 bit fp literal to 32 bit fp. Allow lose of precision but not overflow or underflow
- Is this literal inlinable and are we required to inline literal (e.g. v_trunc_f32_e64 v0, 0.5)
- do nothing
- Else report error
- Do nothing. We can encode any other 32-bit fp literal (e.g. v_trunc_f32 v0, 10000000.0)
- Parsed binary literal:
- Is this literal inlinable (e.g. v_trunc_f32_e32 v0, 35)
- do nothing
- Else, are we required to inline this literal (e.g. v_trunc_f32_e64 v0, 35)
- report error
- Else, literal is not-inlinable and we are not required to inline it
- Are high 32 bit of literal zeroes or same as sign bit (32 bit)
- do nothing (e.g. v_trunc_f32 v0, 0xdeadbeef)
- Else
- report error (e.g. v_trunc_f32 v0, 0x123456789abcdef0)
For this change it is required that we know operand types of instruction (are they f32/64 or b32/64). I added several new register operands (they extend previous register operands) and set operand types to corresponding types:
'''
enum OperandType {
OPERAND_REG_IMM32_INT,
OPERAND_REG_IMM32_FP,
OPERAND_REG_INLINE_C_INT,
OPERAND_REG_INLINE_C_FP,
}
'''
This is not working yet:
- Several tests are failing
- Problems with predicate methods for inline immediates
- LLVM generated assembler parts try to select e64 encoding before e32.
More changes are required for several AsmOperands.
Reviewers: vpykhtin, tstellarAMD
Subscribers: arsenm, kzhuravl, artem.tamazov
Differential Revision: https://reviews.llvm.org/D22922
llvm-svn: 281050
2016-09-09 22:44:04 +08:00
|
|
|
def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
|
2016-04-15 05:58:15 +08:00
|
|
|
let Defs = [M0];
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
let isAsCheapAsAMove = 1;
|
|
|
|
let isReMaterializable = 1;
|
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_RETURN : SPseudoInstSI <
|
2016-07-12 08:23:17 +08:00
|
|
|
(outs), (ins variable_ops), [(AMDGPUreturn)]> {
|
2016-06-23 04:15:28 +08:00
|
|
|
let isTerminator = 1;
|
|
|
|
let isBarrier = 1;
|
|
|
|
let isReturn = 1;
|
|
|
|
let hasSideEffects = 1;
|
|
|
|
let hasNoSchedulingInfo = 1;
|
2016-09-03 20:26:32 +08:00
|
|
|
let DisableWQM = 1;
|
2016-06-23 04:15:28 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
let Defs = [M0, EXEC],
|
2016-06-23 07:40:57 +08:00
|
|
|
UseNamedOperandTable = 1 in {
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
|
2016-07-19 08:35:03 +08:00
|
|
|
(outs VGPR_32:$vdst),
|
|
|
|
(ins rc:$src, VS_32:$idx, i32imm:$offset)> {
|
|
|
|
let usesCustomInserter = 1;
|
|
|
|
}
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
|
2016-07-19 08:35:03 +08:00
|
|
|
(outs rc:$vdst),
|
|
|
|
(ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
|
2016-06-23 07:40:57 +08:00
|
|
|
let Constraints = "$src = $vdst";
|
2016-07-19 08:35:03 +08:00
|
|
|
let usesCustomInserter = 1;
|
2013-03-18 19:34:16 +08:00
|
|
|
}
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
// TODO: We can support indirect SGPR access.
|
|
|
|
def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
|
|
|
|
def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
|
|
|
|
def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
|
|
|
|
def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
|
|
|
|
def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
|
|
|
|
|
2015-01-08 04:59:25 +08:00
|
|
|
def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
|
2013-03-18 19:34:16 +08:00
|
|
|
def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
|
|
|
|
def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
|
|
|
|
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
|
|
|
|
def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
|
|
|
|
|
2016-07-19 08:35:03 +08:00
|
|
|
} // End Uses = [EXEC], Defs = [M0, EXEC]
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2014-05-02 23:41:42 +08:00
|
|
|
multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
|
2016-09-10 09:20:33 +08:00
|
|
|
let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
|
2016-07-12 08:23:17 +08:00
|
|
|
def _SAVE : PseudoInstSI <
|
2015-01-14 23:42:31 +08:00
|
|
|
(outs),
|
2016-09-10 09:20:33 +08:00
|
|
|
(ins sgpr_class:$data, i32imm:$addr)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 0;
|
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
|
2016-07-12 08:23:17 +08:00
|
|
|
def _RESTORE : PseudoInstSI <
|
2016-09-10 09:20:33 +08:00
|
|
|
(outs sgpr_class:$data),
|
|
|
|
(ins i32imm:$addr)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 0;
|
|
|
|
let mayLoad = 1;
|
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
} // End UseNamedOperandTable = 1
|
2014-05-02 23:41:42 +08:00
|
|
|
}
|
|
|
|
|
2016-09-03 14:57:55 +08:00
|
|
|
// You cannot use M0 as the output of v_readlane_b32 instructions or
|
|
|
|
// use it in the sdata operand of SMEM instructions. We still need to
|
|
|
|
// be able to spill the physical register m0, so allow it for
|
|
|
|
// SI_SPILL_32_* instructions.
|
|
|
|
defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
|
2014-05-02 23:41:42 +08:00
|
|
|
defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
|
|
|
|
defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
|
|
|
|
defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
|
|
|
|
defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
|
|
|
|
|
2014-09-24 09:33:17 +08:00
|
|
|
multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
|
2016-09-10 09:20:28 +08:00
|
|
|
let UseNamedOperandTable = 1, VGPRSpill = 1,
|
|
|
|
SchedRW = [WriteVMEM] in {
|
2016-08-27 11:00:51 +08:00
|
|
|
def _SAVE : VPseudoInstSI <
|
2015-01-14 23:42:31 +08:00
|
|
|
(outs),
|
2016-09-17 23:52:37 +08:00
|
|
|
(ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
|
|
|
|
SReg_32:$soffset, i32imm:$offset)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 1;
|
|
|
|
let mayLoad = 0;
|
2016-09-04 01:25:44 +08:00
|
|
|
// (2 * 4) + (8 * num_subregs) bytes maximum
|
|
|
|
let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
|
2015-08-29 14:48:57 +08:00
|
|
|
}
|
2015-01-14 23:42:31 +08:00
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def _RESTORE : VPseudoInstSI <
|
2016-09-10 09:20:33 +08:00
|
|
|
(outs vgpr_class:$vdata),
|
2016-09-17 23:52:37 +08:00
|
|
|
(ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
|
2016-06-23 04:15:28 +08:00
|
|
|
i32imm:$offset)> {
|
2015-08-29 14:48:57 +08:00
|
|
|
let mayStore = 0;
|
|
|
|
let mayLoad = 1;
|
2016-09-04 01:25:44 +08:00
|
|
|
|
|
|
|
// (2 * 4) + (8 * num_subregs) bytes maximum
|
|
|
|
let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
|
2015-08-29 14:48:57 +08:00
|
|
|
}
|
2016-09-10 09:20:28 +08:00
|
|
|
} // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
|
2014-09-24 09:33:17 +08:00
|
|
|
}
|
|
|
|
|
2015-01-08 04:59:25 +08:00
|
|
|
defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
|
2014-09-24 09:33:17 +08:00
|
|
|
defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
|
|
|
|
defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
|
|
|
|
defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
|
|
|
|
defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
|
|
|
|
defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
|
|
|
|
|
2016-08-27 11:00:51 +08:00
|
|
|
def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
|
2014-07-21 22:01:14 +08:00
|
|
|
(outs SReg_64:$dst),
|
2016-06-15 04:29:59 +08:00
|
|
|
(ins si_ga:$ptr),
|
2016-07-12 08:23:17 +08:00
|
|
|
[(set SReg_64:$dst, (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr))))]> {
|
2016-08-27 11:00:51 +08:00
|
|
|
let Defs = [SCC];
|
2015-10-03 02:58:37 +08:00
|
|
|
}
|
2014-07-21 22:01:14 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
} // End SubtargetPredicate = isGCN
|
2014-05-17 04:56:45 +08:00
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
let Predicates = [isGCN] in {
|
2014-05-17 04:56:45 +08:00
|
|
|
|
AMDGPU: add execfix flag to SI_ELSE
Summary:
SI_ELSE is lowered into two parts:
s_or_saveexec_b64 dst, src (at the start of the basic block)
s_xor_b64 exec, exec, dst (at the end of the basic block)
The idea is that dst contains the exec mask of the preceding IF block. It can
happen that SIWholeQuadMode decides to switch from WQM to Exact mode inside
the basic block that contains SI_ELSE, in which case it introduces an instruction
s_and_b64 exec, exec, s[...]
which masks out bits that can correspond to both the IF and the ELSE paths.
So the resulting sequence must be:
s_or_savexec_b64 dst, src
s_and_b64 exec, exec, s[...] <-- added by SIWholeQuadMode
s_and_b64 dst, dst, exec <-- added by SILowerControlFlow
s_xor_b64 exec, exec, dst
Whether to add the additional s_and_b64 dst, dst, exec is currently determined
via the ExecModified tracking. With this change, it is instead determined by
an additional flag on SI_ELSE which is set by SIWholeQuadMode.
Finally: It also occured to me that an alternative approach for the long run
is for SILowerControlFlow to unconditionally emit
s_or_saveexec_b64 dst, src
...
s_and_b64 dst, dst, exec
s_xor_b64 exec, exec, dst
and have a pass that detects and cleans up the "redundant AND with exec"
pattern where possible. This could be useful anyway, because we also add
instructions
s_and_b64 vcc, exec, vcc
before s_cbranch_scc (in moveToALU), and those are often redundant. I have
some pending changes to how KILL is lowered that could also benefit from
such a cleanup pass.
In any case, this current patch could help in the short term with the whole
ExecModified business.
Reviewers: tstellarAMD, arsenm
Subscribers: arsenm, llvm-commits, kzhuravl
Differential Revision: https://reviews.llvm.org/D22846
llvm-svn: 276972
2016-07-28 19:39:24 +08:00
|
|
|
def : Pat<
|
|
|
|
(int_amdgcn_else i64:$src, bb:$target),
|
|
|
|
(SI_ELSE $src, $target, 0)
|
|
|
|
>;
|
|
|
|
|
2013-01-19 05:15:50 +08:00
|
|
|
def : Pat <
|
|
|
|
(int_AMDGPU_kilp),
|
2014-02-27 09:47:09 +08:00
|
|
|
(SI_KILL 0xbf800000)
|
2013-01-19 05:15:50 +08:00
|
|
|
>;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
def : Pat <
|
|
|
|
(int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr,
|
2013-05-02 23:30:12 +08:00
|
|
|
f32:$src0, f32:$src1, f32:$src2, f32:$src3),
|
2012-12-12 05:25:42 +08:00
|
|
|
(EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm,
|
2013-05-02 23:30:12 +08:00
|
|
|
$src0, $src1, $src2, $src3)
|
2012-12-12 05:25:42 +08:00
|
|
|
>;
|
|
|
|
|
2014-04-30 07:12:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2014-06-19 09:19:19 +08:00
|
|
|
// VOP1 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-07-16 07:50:10 +08:00
|
|
|
let Predicates = [UnsafeFPMath] in {
|
2015-02-14 12:30:08 +08:00
|
|
|
|
|
|
|
//def : RcpPat<V_RCP_F64_e32, f64>;
|
|
|
|
//defm : RsqPat<V_RSQ_F64_e32, f64>;
|
|
|
|
//defm : RsqPat<V_RSQ_F32_e32, f32>;
|
|
|
|
|
|
|
|
def : RsqPat<V_RSQ_F32_e32, f32>;
|
|
|
|
def : RsqPat<V_RSQ_F64_e32, f64>;
|
2016-05-28 08:19:52 +08:00
|
|
|
|
|
|
|
// Convert (x - floor(x)) to fract(x)
|
|
|
|
def : Pat <
|
|
|
|
(f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
|
|
|
|
(f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
|
|
|
|
(V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
|
|
|
// Convert (x + (-floor(x))) to fract(x)
|
|
|
|
def : Pat <
|
|
|
|
(f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
|
|
|
|
(f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
|
|
|
|
(V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
|
|
|
} // End Predicates = [UnsafeFPMath]
|
2014-07-16 04:18:31 +08:00
|
|
|
|
2014-06-19 09:19:19 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2014-04-30 07:12:48 +08:00
|
|
|
// VOP2 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-06-21 01:06:11 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
|
2014-09-16 01:15:02 +08:00
|
|
|
(V_BCNT_U32_B32_e64 $popcnt, $val)
|
2014-06-21 01:06:11 +08:00
|
|
|
>;
|
|
|
|
|
2015-03-11 00:16:44 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 (select i1:$src0, i32:$src1, i32:$src2)),
|
|
|
|
(V_CNDMASK_B32_e64 $src2, $src1, $src0)
|
|
|
|
>;
|
|
|
|
|
2015-07-13 23:47:57 +08:00
|
|
|
// Pattern for V_MAC_F32
|
|
|
|
def : Pat <
|
|
|
|
(fmad (VOP3NoMods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
|
|
|
|
(VOP3NoMods f32:$src1, i32:$src1_modifiers),
|
|
|
|
(VOP3NoMods f32:$src2, i32:$src2_modifiers)),
|
|
|
|
(V_MAC_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1,
|
|
|
|
$src2_modifiers, $src2, $clamp, $omod)
|
|
|
|
>;
|
|
|
|
|
2013-03-18 19:34:10 +08:00
|
|
|
/********** ============================================ **********/
|
|
|
|
/********** Extraction, Insertion, Building and Casting **********/
|
|
|
|
/********** ============================================ **********/
|
|
|
|
|
|
|
|
foreach Index = 0-2 in {
|
|
|
|
def Extract_Element_v2i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v2i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v2f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v2f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach Index = 0-3 in {
|
|
|
|
def Extract_Element_v4i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v4i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v4f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v4f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2013-03-18 19:34:10 +08:00
|
|
|
foreach Index = 0-7 in {
|
|
|
|
def Extract_Element_v8i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v8i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v8f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v8f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach Index = 0-15 in {
|
|
|
|
def Extract_Element_v16i32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v16i32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def Extract_Element_v16f32_#Index : Extract_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
def Insert_Element_v16f32_#Index : Insert_Element <
|
2013-05-02 23:30:12 +08:00
|
|
|
f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
|
2013-03-18 19:34:10 +08:00
|
|
|
>;
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// FIXME: Why do only some of these type combinations for SReg and
|
|
|
|
// VReg?
|
|
|
|
// 32-bit bitcast
|
2015-01-08 04:59:25 +08:00
|
|
|
def : BitConvert <i32, f32, VGPR_32>;
|
|
|
|
def : BitConvert <f32, i32, VGPR_32>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <i32, f32, SReg_32>;
|
|
|
|
def : BitConvert <f32, i32, SReg_32>;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 64-bit bitcast
|
2013-07-13 02:14:56 +08:00
|
|
|
def : BitConvert <i64, f64, VReg_64>;
|
|
|
|
def : BitConvert <f64, i64, VReg_64>;
|
2013-07-19 05:43:42 +08:00
|
|
|
def : BitConvert <v2i32, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, v2i32, VReg_64>;
|
2014-03-31 22:01:55 +08:00
|
|
|
def : BitConvert <i64, v2i32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2i32, i64, VReg_64>;
|
2014-06-12 01:40:32 +08:00
|
|
|
def : BitConvert <i64, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, i64, VReg_64>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <f64, v2f32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2f32, f64, VReg_64>;
|
2014-06-12 03:31:13 +08:00
|
|
|
def : BitConvert <f64, v2i32, VReg_64>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v2i32, f64, VReg_64>;
|
2013-07-19 05:43:53 +08:00
|
|
|
def : BitConvert <v4i32, v4f32, VReg_128>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v4f32, v4i32, VReg_128>;
|
2013-07-19 05:43:53 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 128-bit bitcast
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v2i64, v4i32, SReg_128>;
|
|
|
|
def : BitConvert <v4i32, v2i64, SReg_128>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <v2f64, v4f32, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v2f64, v4i32, VReg_128>;
|
2015-12-16 01:11:17 +08:00
|
|
|
def : BitConvert <v4f32, v2f64, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
def : BitConvert <v4i32, v2f64, VReg_128>;
|
2016-05-26 02:07:36 +08:00
|
|
|
def : BitConvert <v2i64, v2f64, VReg_128>;
|
|
|
|
def : BitConvert <v2f64, v2i64, VReg_128>;
|
2015-11-26 03:58:34 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 256-bit bitcast
|
2014-02-14 07:34:15 +08:00
|
|
|
def : BitConvert <v8i32, v8f32, SReg_256>;
|
2016-01-26 12:49:22 +08:00
|
|
|
def : BitConvert <v8f32, v8i32, SReg_256>;
|
2014-02-02 08:05:35 +08:00
|
|
|
def : BitConvert <v8i32, v8f32, VReg_256>;
|
|
|
|
def : BitConvert <v8f32, v8i32, VReg_256>;
|
2013-08-15 06:22:09 +08:00
|
|
|
|
2016-01-26 12:49:22 +08:00
|
|
|
// 512-bit bitcast
|
2014-02-02 08:05:35 +08:00
|
|
|
def : BitConvert <v16i32, v16f32, VReg_512>;
|
|
|
|
def : BitConvert <v16f32, v16i32, VReg_512>;
|
|
|
|
|
2013-02-21 23:17:27 +08:00
|
|
|
/********** =================== **********/
|
|
|
|
/********** Src & Dst modifiers **********/
|
|
|
|
/********** =================== **********/
|
|
|
|
|
|
|
|
def : Pat <
|
2014-11-14 03:49:04 +08:00
|
|
|
(AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod),
|
|
|
|
(f32 FP_ZERO), (f32 FP_ONE)),
|
|
|
|
(V_ADD_F32_e64 $src0_modifiers, $src0, 0, 0, 1, $omod)
|
2013-02-21 23:17:27 +08:00
|
|
|
>;
|
|
|
|
|
2014-02-04 15:12:38 +08:00
|
|
|
/********** ================================ **********/
|
|
|
|
/********** Floating point absolute/negative **********/
|
|
|
|
/********** ================================ **********/
|
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
// Prevent expanding both fneg and fabs.
|
2014-02-04 15:12:38 +08:00
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(fneg (fabs f32:$src)),
|
2016-09-10 07:32:53 +08:00
|
|
|
(S_OR_B32 $src, (S_MOV_B32 0x80000000)) // Set sign bit
|
2014-02-04 15:12:38 +08:00
|
|
|
>;
|
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
// FIXME: Should use S_OR_B32
|
2014-08-16 02:42:18 +08:00
|
|
|
def : Pat <
|
|
|
|
(fneg (fabs f64:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2014-08-16 02:42:22 +08:00
|
|
|
(V_OR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1),
|
2014-11-03 07:46:54 +08:00
|
|
|
(V_MOV_B32_e32 0x80000000)), // Set sign bit.
|
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2014-05-11 03:18:25 +08:00
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
def : Pat <
|
|
|
|
(fabs f32:$src),
|
2016-09-14 23:19:03 +08:00
|
|
|
(V_AND_B32_e64 $src, (V_MOV_B32_e32 0x7fffffff))
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2013-02-21 23:17:27 +08:00
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
def : Pat <
|
|
|
|
(fneg f32:$src),
|
|
|
|
(V_XOR_B32_e32 $src, (V_MOV_B32_e32 0x80000000))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(fabs f64:$src),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2016-09-14 23:19:03 +08:00
|
|
|
(V_AND_B32_e64 (EXTRACT_SUBREG f64:$src, sub1),
|
2014-11-03 07:46:54 +08:00
|
|
|
(V_MOV_B32_e32 0x7fffffff)), // Set sign bit.
|
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2014-05-11 03:18:25 +08:00
|
|
|
|
2014-08-16 02:42:22 +08:00
|
|
|
def : Pat <
|
|
|
|
(fneg f64:$src),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(i32 (EXTRACT_SUBREG f64:$src, sub0)),
|
|
|
|
sub0,
|
2014-08-16 02:42:22 +08:00
|
|
|
(V_XOR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1),
|
2014-11-03 07:46:54 +08:00
|
|
|
(V_MOV_B32_e32 0x80000000)),
|
|
|
|
sub1)
|
2014-08-16 02:42:22 +08:00
|
|
|
>;
|
2013-02-21 23:17:27 +08:00
|
|
|
|
2013-02-16 19:28:22 +08:00
|
|
|
/********** ================== **********/
|
|
|
|
/********** Immediate Patterns **********/
|
|
|
|
/********** ================== **********/
|
|
|
|
|
2013-08-15 07:24:24 +08:00
|
|
|
def : Pat <
|
|
|
|
(SGPRImm<(i32 imm)>:$imm),
|
|
|
|
(S_MOV_B32 imm:$imm)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(SGPRImm<(f32 fpimm)>:$imm),
|
2015-01-14 06:59:41 +08:00
|
|
|
(S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
|
2013-08-15 07:24:24 +08:00
|
|
|
>;
|
|
|
|
|
2013-02-16 19:28:22 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 imm:$imm),
|
|
|
|
(V_MOV_B32_e32 imm:$imm)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(f32 fpimm:$imm),
|
2015-01-14 06:59:41 +08:00
|
|
|
(V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
|
2013-02-16 19:28:22 +08:00
|
|
|
>;
|
|
|
|
|
2016-09-18 00:09:55 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 frameindex:$fi),
|
|
|
|
(V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
|
|
|
|
>;
|
|
|
|
|
2013-02-16 19:28:36 +08:00
|
|
|
def : Pat <
|
|
|
|
(i64 InlineImm<i64>:$imm),
|
|
|
|
(S_MOV_B64 InlineImm<i64>:$imm)
|
|
|
|
>;
|
|
|
|
|
2014-12-03 13:22:35 +08:00
|
|
|
// XXX - Should this use a s_cmp to set SCC?
|
|
|
|
|
|
|
|
// Set to sign-extended 64-bit value (true = -1, false = 0)
|
|
|
|
def : Pat <
|
|
|
|
(i1 imm:$imm),
|
|
|
|
(S_MOV_B64 (i64 (as_i64imm $imm)))
|
|
|
|
>;
|
|
|
|
|
2014-12-18 05:04:08 +08:00
|
|
|
def : Pat <
|
|
|
|
(f64 InlineFPImm<f64>:$imm),
|
2015-01-14 06:59:41 +08:00
|
|
|
(S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
|
2014-12-18 05:04:08 +08:00
|
|
|
>;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
/********** ================== **********/
|
|
|
|
/********** Intrinsic Patterns **********/
|
|
|
|
/********** ================== **********/
|
|
|
|
|
2013-05-02 23:30:12 +08:00
|
|
|
def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
def : Pat <
|
2013-05-02 23:30:12 +08:00
|
|
|
(int_AMDGPU_cube v4f32:$src),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_128,
|
2014-08-01 08:32:39 +08:00
|
|
|
(V_CUBETC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0),
|
|
|
|
0 /* src1_modifiers */, (EXTRACT_SUBREG $src, sub1),
|
|
|
|
0 /* src2_modifiers */, (EXTRACT_SUBREG $src, sub2),
|
2014-11-03 07:46:54 +08:00
|
|
|
0 /* clamp */, 0 /* omod */), sub0,
|
2014-08-01 08:32:39 +08:00
|
|
|
(V_CUBESC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0),
|
|
|
|
0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
|
|
|
|
0 /* src2_modifiers */,(EXTRACT_SUBREG $src, sub2),
|
2014-11-03 07:46:54 +08:00
|
|
|
0 /* clamp */, 0 /* omod */), sub1,
|
2014-08-01 08:32:39 +08:00
|
|
|
(V_CUBEMA_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0),
|
|
|
|
0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
|
|
|
|
0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2),
|
2014-11-03 07:46:54 +08:00
|
|
|
0 /* clamp */, 0 /* omod */), sub2,
|
2014-08-01 08:32:39 +08:00
|
|
|
(V_CUBEID_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0),
|
|
|
|
0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
|
|
|
|
0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2),
|
2014-11-03 07:46:54 +08:00
|
|
|
0 /* clamp */, 0 /* omod */), sub3)
|
2012-12-12 05:25:42 +08:00
|
|
|
>;
|
|
|
|
|
2013-02-22 19:22:58 +08:00
|
|
|
def : Pat <
|
2013-05-02 23:30:12 +08:00
|
|
|
(i32 (sext i1:$src0)),
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
|
2013-02-22 19:22:58 +08:00
|
|
|
>;
|
|
|
|
|
2014-02-14 07:34:13 +08:00
|
|
|
class Ext32Pat <SDNode ext> : Pat <
|
|
|
|
(i32 (ext i1:$src0)),
|
2014-02-05 17:48:05 +08:00
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
|
|
|
|
>;
|
|
|
|
|
2014-02-14 07:34:13 +08:00
|
|
|
def : Ext32Pat <zext>;
|
|
|
|
def : Ext32Pat <anyext>;
|
|
|
|
|
2013-04-11 01:17:56 +08:00
|
|
|
// The multiplication scales from [0,1] to the unsigned integer range
|
|
|
|
def : Pat <
|
|
|
|
(AMDGPUurecip i32:$src0),
|
|
|
|
(V_CVT_U32_F32_e32
|
|
|
|
(V_MUL_F32_e32 CONST.FP_UINT_MAX_PLUS_1,
|
|
|
|
(V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
|
|
|
|
>;
|
|
|
|
|
2014-05-17 04:56:44 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VOP3 Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-05-23 02:00:15 +08:00
|
|
|
def : IMad24Pat<V_MAD_I32_I24>;
|
|
|
|
def : UMad24Pat<V_MAD_U32_U24>;
|
|
|
|
|
2014-11-03 07:46:54 +08:00
|
|
|
defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
|
2014-05-17 04:56:44 +08:00
|
|
|
def : ROTRPattern <V_ALIGNBIT_B32>;
|
|
|
|
|
2013-03-18 19:34:16 +08:00
|
|
|
/********** ====================== **********/
|
|
|
|
/********** Indirect adressing **********/
|
|
|
|
/********** ====================== **********/
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
|
2016-07-09 09:13:56 +08:00
|
|
|
// Extract with offset
|
2013-03-18 19:34:16 +08:00
|
|
|
def : Pat<
|
2016-07-12 16:12:16 +08:00
|
|
|
(eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
|
2016-07-09 09:13:56 +08:00
|
|
|
(!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
|
2013-03-18 19:34:16 +08:00
|
|
|
>;
|
|
|
|
|
2016-07-09 09:13:56 +08:00
|
|
|
// Insert with offset
|
2013-03-18 19:34:16 +08:00
|
|
|
def : Pat<
|
2016-07-12 16:12:16 +08:00
|
|
|
(insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
|
2016-07-09 09:13:56 +08:00
|
|
|
(!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
|
2013-03-18 19:34:16 +08:00
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
|
2014-02-02 08:05:35 +08:00
|
|
|
|
2015-10-07 08:42:51 +08:00
|
|
|
defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
|
|
|
|
defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
|
2013-03-18 19:34:16 +08:00
|
|
|
|
2016-08-24 22:59:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SAD Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(add (sub_oneuse (umax i32:$src0, i32:$src1),
|
|
|
|
(umin i32:$src0, i32:$src1)),
|
|
|
|
i32:$src2),
|
|
|
|
(V_SAD_U32 $src0, $src1, $src2)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
|
|
|
|
(sub i32:$src0, i32:$src1),
|
|
|
|
(sub i32:$src1, i32:$src0)),
|
|
|
|
i32:$src2),
|
|
|
|
(V_SAD_U32 $src0, $src1, $src2)
|
|
|
|
>;
|
|
|
|
|
2014-04-22 11:49:30 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Conversion Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
def : Pat<(i32 (sext_inreg i32:$src, i1)),
|
|
|
|
(S_BFE_I32 i32:$src, 65536)>; // 0 | 1 << 16
|
|
|
|
|
|
|
|
// Handle sext_inreg in i64
|
|
|
|
def : Pat <
|
|
|
|
(i64 (sext_inreg i64:$src, i1)),
|
2014-11-15 02:18:16 +08:00
|
|
|
(S_BFE_I64 i64:$src, 0x10000) // 0 | 1 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i64 (sext_inreg i64:$src, i8)),
|
2014-11-15 02:18:16 +08:00
|
|
|
(S_BFE_I64 i64:$src, 0x80000) // 0 | 8 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i64 (sext_inreg i64:$src, i16)),
|
2014-11-15 02:18:16 +08:00
|
|
|
(S_BFE_I64 i64:$src, 0x100000) // 0 | 16 << 16
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i64 (sext_inreg i64:$src, i32)),
|
|
|
|
(S_BFE_I64 i64:$src, 0x200000) // 0 | 32 << 16
|
2014-04-22 11:49:30 +08:00
|
|
|
>;
|
|
|
|
|
2016-07-27 07:06:33 +08:00
|
|
|
def : Pat <
|
|
|
|
(i64 (zext i32:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 0), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
2016-07-27 07:06:33 +08:00
|
|
|
def : Pat <
|
|
|
|
(i64 (anyext i32:$src)),
|
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
|
|
|
|
>;
|
|
|
|
|
2014-06-11 02:54:59 +08:00
|
|
|
class ZExt_i64_i1_Pat <SDNode ext> : Pat <
|
|
|
|
(i64 (ext i1:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
|
|
|
|
(S_MOV_B32 0), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
|
|
|
|
def : ZExt_i64_i1_Pat<zext>;
|
|
|
|
def : ZExt_i64_i1_Pat<anyext>;
|
|
|
|
|
2016-02-13 07:45:29 +08:00
|
|
|
// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
|
|
|
|
// REG_SEQUENCE patterns don't support instructions with multiple outputs.
|
2014-06-11 02:54:59 +08:00
|
|
|
def : Pat <
|
|
|
|
(i64 (sext i32:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE SReg_64, $src, sub0,
|
2016-04-30 01:04:50 +08:00
|
|
|
(i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, 31), SReg_32_XM0)), sub1)
|
2014-06-11 02:54:59 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i64 (sext i1:$src)),
|
2014-11-03 07:46:54 +08:00
|
|
|
(REG_SEQUENCE VReg_64,
|
|
|
|
(V_CNDMASK_B32_e64 0, -1, $src), sub0,
|
2014-06-11 02:54:59 +08:00
|
|
|
(V_CNDMASK_B32_e64 0, -1, $src), sub1)
|
|
|
|
>;
|
|
|
|
|
2016-07-23 01:01:21 +08:00
|
|
|
class FPToI1Pat<Instruction Inst, int KOne, ValueType vt, SDPatternOperator fp_to_int> : Pat <
|
|
|
|
(i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
|
|
|
|
(i1 (Inst 0, KOne, $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, f32, fp_to_uint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, f32, fp_to_sint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, f64, fp_to_uint>;
|
|
|
|
def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, f64, fp_to_sint>;
|
|
|
|
|
2014-12-03 13:22:35 +08:00
|
|
|
// If we need to perform a logical operation on i1 values, we need to
|
|
|
|
// use vector comparisons since there is only one SCC register. Vector
|
|
|
|
// comparisions still write to a pair of SGPRs, so treat these as
|
|
|
|
// 64-bit comparisons. When legalizing SGPR copies, instructions
|
|
|
|
// resulting in the copies from SCC to these instructions will be
|
|
|
|
// moved to the VALU.
|
|
|
|
def : Pat <
|
|
|
|
(i1 (and i1:$src0, i1:$src1)),
|
|
|
|
(S_AND_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i1 (or i1:$src0, i1:$src1)),
|
|
|
|
(S_OR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(i1 (xor i1:$src0, i1:$src1)),
|
|
|
|
(S_XOR_B64 $src0, $src1)
|
|
|
|
>;
|
|
|
|
|
2014-05-31 14:47:42 +08:00
|
|
|
def : Pat <
|
|
|
|
(f32 (sint_to_fp i1:$src)),
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(f32 (uint_to_fp i1:$src)),
|
|
|
|
(V_CNDMASK_B32_e64 (i32 0), CONST.FP32_ONE, $src)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(f64 (sint_to_fp i1:$src)),
|
2014-12-03 13:22:35 +08:00
|
|
|
(V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
|
2014-05-31 14:47:42 +08:00
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(f64 (uint_to_fp i1:$src)),
|
|
|
|
(V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
|
|
|
|
>;
|
|
|
|
|
2013-11-14 07:36:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2013-10-23 08:44:19 +08:00
|
|
|
// Miscellaneous Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2013-11-14 07:36:50 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 (trunc i64:$a)),
|
|
|
|
(EXTRACT_SUBREG $a, sub0)
|
|
|
|
>;
|
|
|
|
|
2014-01-28 11:01:16 +08:00
|
|
|
def : Pat <
|
|
|
|
(i1 (trunc i32:$a)),
|
2015-10-29 23:05:03 +08:00
|
|
|
(V_CMP_EQ_I32_e64 (S_AND_B32 (i32 1), $a), 1)
|
2014-01-28 11:01:16 +08:00
|
|
|
>;
|
|
|
|
|
2015-02-05 14:05:13 +08:00
|
|
|
def : Pat <
|
|
|
|
(i1 (trunc i64:$a)),
|
2015-10-29 23:05:03 +08:00
|
|
|
(V_CMP_EQ_I32_e64 (S_AND_B32 (i32 1),
|
2015-02-05 14:05:13 +08:00
|
|
|
(EXTRACT_SUBREG $a, sub0)), 1)
|
|
|
|
>;
|
|
|
|
|
2014-10-22 00:25:08 +08:00
|
|
|
def : Pat <
|
|
|
|
(i32 (bswap i32:$a)),
|
|
|
|
(V_BFI_B32 (S_MOV_B32 0x00ff00ff),
|
|
|
|
(V_ALIGNBIT_B32 $a, $a, 24),
|
|
|
|
(V_ALIGNBIT_B32 $a, $a, 8))
|
|
|
|
>;
|
|
|
|
|
2014-12-12 10:30:29 +08:00
|
|
|
def : Pat <
|
|
|
|
(f32 (select i1:$src2, f32:$src1, f32:$src0)),
|
|
|
|
(V_CNDMASK_B32_e64 $src0, $src1, $src2)
|
|
|
|
>;
|
|
|
|
|
2015-03-24 21:40:21 +08:00
|
|
|
multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
|
|
|
|
def : Pat <
|
|
|
|
(vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
|
|
|
|
(BFM $a, $b)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat <
|
|
|
|
(vt (add (vt (shl 1, vt:$a)), -1)),
|
|
|
|
(BFM $a, (MOV 0))
|
|
|
|
>;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
|
|
|
|
// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
|
|
|
|
|
2015-03-24 21:40:34 +08:00
|
|
|
def : BFEPattern <V_BFE_U32, S_MOV_B32>;
|
|
|
|
|
2016-04-14 09:42:16 +08:00
|
|
|
def : Pat<
|
|
|
|
(fcanonicalize f32:$src),
|
|
|
|
(V_MUL_F32_e64 0, CONST.FP32_ONE, 0, $src, 0, 0)
|
|
|
|
>;
|
|
|
|
|
|
|
|
def : Pat<
|
|
|
|
(fcanonicalize f64:$src),
|
|
|
|
(V_MUL_F64 0, CONST.FP64_ONE, 0, $src, 0, 0)
|
|
|
|
>;
|
|
|
|
|
2015-03-24 21:40:08 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Fract Patterns
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-03-24 21:40:15 +08:00
|
|
|
let Predicates = [isSI] in {
|
|
|
|
|
|
|
|
// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
|
|
|
|
// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
|
|
|
|
// way to implement it is using V_FRACT_F64.
|
|
|
|
// The workaround for the V_FRACT bug is:
|
|
|
|
// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
|
|
|
|
|
|
|
|
// Convert floor(x) to (x - fract(x))
|
|
|
|
def : Pat <
|
|
|
|
(f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
|
|
|
|
(V_ADD_F64
|
|
|
|
$mods,
|
|
|
|
$x,
|
|
|
|
SRCMODS.NEG,
|
|
|
|
(V_CNDMASK_B64_PSEUDO
|
|
|
|
(V_MIN_F64
|
|
|
|
SRCMODS.NONE,
|
|
|
|
(V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
|
|
|
|
SRCMODS.NONE,
|
|
|
|
(V_MOV_B64_PSEUDO 0x3fefffffffffffff),
|
|
|
|
DSTCLAMP.NONE, DSTOMOD.NONE),
|
2015-07-27 19:37:42 +08:00
|
|
|
$x,
|
2015-03-24 21:40:15 +08:00
|
|
|
(V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, 3/*NaN*/)),
|
|
|
|
DSTCLAMP.NONE, DSTOMOD.NONE)
|
|
|
|
>;
|
|
|
|
|
|
|
|
} // End Predicates = [isSI]
|
|
|
|
|
2013-05-04 01:21:20 +08:00
|
|
|
//============================================================================//
|
|
|
|
// Miscellaneous Optimization Patterns
|
|
|
|
//============================================================================//
|
|
|
|
|
2014-09-16 01:15:02 +08:00
|
|
|
def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
|
2013-05-04 01:21:20 +08:00
|
|
|
|
2016-03-08 05:54:48 +08:00
|
|
|
def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
|
|
|
|
def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
|
|
|
|
|
2015-05-26 23:55:52 +08:00
|
|
|
//============================================================================//
|
|
|
|
// Assembler aliases
|
|
|
|
//============================================================================//
|
|
|
|
|
|
|
|
def : MnemonicAlias<"v_add_u32", "v_add_i32">;
|
|
|
|
def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
|
|
|
|
def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
|
|
|
|
|
2014-12-07 20:18:57 +08:00
|
|
|
} // End isGCN predicate
|