2012-12-12 05:25:42 +08:00
|
|
|
//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// \brief Implementation of the TargetInstrInfo class that is common to all
|
|
|
|
/// AMD GPUs.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPUInstrInfo.h"
|
|
|
|
#include "AMDGPURegisterInfo.h"
|
|
|
|
#include "AMDGPUTargetMachine.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
|
2014-04-22 10:03:14 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2013-11-19 08:57:56 +08:00
|
|
|
#define GET_INSTRINFO_CTOR_DTOR
|
2013-06-26 05:22:18 +08:00
|
|
|
#define GET_INSTRINFO_NAMED_OPS
|
2013-02-27 01:52:42 +08:00
|
|
|
#define GET_INSTRMAP_INFO
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "AMDGPUGenInstrInfo.inc"
|
|
|
|
|
2013-11-19 08:57:56 +08:00
|
|
|
// Pin the vtable to this file.
|
|
|
|
void AMDGPUInstrInfo::anchor() {}
|
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST)
|
|
|
|
: AMDGPUGenInstrInfo(-1, -1), ST(ST) {}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-07-24 10:10:17 +08:00
|
|
|
bool AMDGPUInstrInfo::enableClusterLoads() const {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-06 08:29:49 +08:00
|
|
|
// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
|
|
|
|
// the first 16 loads will be interleaved with the stores, and the next 16 will
|
|
|
|
// be clustered as expected. It should really split into 2 16 store batches.
|
|
|
|
//
|
|
|
|
// Loads are clustered until this returns false, rather than trying to schedule
|
|
|
|
// groups of stores. This also means we have to deal with saying different
|
|
|
|
// address space loads should be clustered, and ones which might cause bank
|
|
|
|
// conflicts.
|
|
|
|
//
|
|
|
|
// This might be deprecated so it might not be worth that much effort to fix.
|
|
|
|
bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
|
|
|
|
int64_t Offset0, int64_t Offset1,
|
|
|
|
unsigned NumLoads) const {
|
|
|
|
assert(Offset1 > Offset0 &&
|
|
|
|
"Second offset should be larger than first offset!");
|
|
|
|
// If we have less than 16 loads in a row, and the offsets are within 64
|
|
|
|
// bytes, then schedule together.
|
|
|
|
|
|
|
|
// A cacheline is 64 bytes (for global memory).
|
|
|
|
return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2013-10-11 01:11:24 +08:00
|
|
|
int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
|
|
|
|
switch (Channels) {
|
|
|
|
default: return Opcode;
|
|
|
|
case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
|
|
|
|
case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
|
|
|
|
case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
|
|
|
|
}
|
|
|
|
}
|
2014-05-17 04:56:47 +08:00
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
|
|
|
|
enum SIEncodingFamily {
|
|
|
|
SI = 0,
|
|
|
|
VI = 1
|
|
|
|
};
|
|
|
|
|
2014-05-17 04:56:47 +08:00
|
|
|
// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
|
2014-10-08 05:29:56 +08:00
|
|
|
// header files, so we need to wrap it in a function that takes unsigned
|
2014-05-17 04:56:47 +08:00
|
|
|
// instead.
|
|
|
|
namespace llvm {
|
|
|
|
namespace AMDGPU {
|
2015-01-16 02:42:51 +08:00
|
|
|
static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
|
2016-06-24 14:30:11 +08:00
|
|
|
return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
|
2014-05-17 04:56:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-01-16 02:42:51 +08:00
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
static SIEncodingFamily subtargetEncodingFamily(const AMDGPUSubtarget &ST) {
|
|
|
|
switch (ST.getGeneration()) {
|
|
|
|
case AMDGPUSubtarget::SOUTHERN_ISLANDS:
|
|
|
|
case AMDGPUSubtarget::SEA_ISLANDS:
|
|
|
|
return SIEncodingFamily::SI;
|
2015-01-16 02:42:51 +08:00
|
|
|
case AMDGPUSubtarget::VOLCANIC_ISLANDS:
|
2016-06-24 14:30:11 +08:00
|
|
|
return SIEncodingFamily::VI;
|
|
|
|
|
|
|
|
// FIXME: This should never be called for r600 GPUs.
|
|
|
|
case AMDGPUSubtarget::R600:
|
|
|
|
case AMDGPUSubtarget::R700:
|
|
|
|
case AMDGPUSubtarget::EVERGREEN:
|
|
|
|
case AMDGPUSubtarget::NORTHERN_ISLANDS:
|
|
|
|
return SIEncodingFamily::SI;
|
2015-01-16 02:42:51 +08:00
|
|
|
}
|
2016-06-27 20:58:10 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unknown subtarget generation!");
|
2015-01-16 02:42:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
|
2016-06-24 14:30:11 +08:00
|
|
|
int MCOp = AMDGPU::getMCOpcode(Opcode, subtargetEncodingFamily(ST));
|
2015-01-16 02:42:51 +08:00
|
|
|
|
|
|
|
// -1 means that Opcode is already a native instruction.
|
|
|
|
if (MCOp == -1)
|
|
|
|
return Opcode;
|
|
|
|
|
|
|
|
// (uint16_t)-1 means that Opcode is a pseudo instruction that has
|
|
|
|
// no encoding in the given subtarget generation.
|
|
|
|
if (MCOp == (uint16_t)-1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return MCOp;
|
|
|
|
}
|