2013-05-07 00:15:19 +08:00
|
|
|
//===-- SystemZTargetMachine.cpp - Define TargetMachine for SystemZ -------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "SystemZTargetMachine.h"
|
2015-03-31 20:52:27 +08:00
|
|
|
#include "SystemZTargetTransformInfo.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2013-08-23 18:27:02 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2014-11-13 17:26:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2015-12-10 17:10:07 +08:00
|
|
|
extern cl::opt<bool> MISchedPostRA;
|
2013-05-07 00:15:19 +08:00
|
|
|
extern "C" void LLVMInitializeSystemZTarget() {
|
|
|
|
// Register the target.
|
|
|
|
RegisterTargetMachine<SystemZTargetMachine> X(TheSystemZTarget);
|
|
|
|
}
|
|
|
|
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
// Determine whether we use the vector ABI.
|
|
|
|
static bool UsesVectorABI(StringRef CPU, StringRef FS) {
|
|
|
|
// We use the vector ABI whenever the vector facility is avaiable.
|
|
|
|
// This is the case by default if CPU is z13 or later, and can be
|
|
|
|
// overridden via "[+-]vector" feature string elements.
|
|
|
|
bool VectorABI = true;
|
|
|
|
if (CPU.empty() || CPU == "generic" ||
|
|
|
|
CPU == "z10" || CPU == "z196" || CPU == "zEC12")
|
|
|
|
VectorABI = false;
|
|
|
|
|
|
|
|
SmallVector<StringRef, 3> Features;
|
2015-09-10 14:12:31 +08:00
|
|
|
FS.split(Features, ',', -1, false /* KeepEmpty */);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
for (auto &Feature : Features) {
|
|
|
|
if (Feature == "vector" || Feature == "+vector")
|
|
|
|
VectorABI = true;
|
|
|
|
if (Feature == "-vector")
|
|
|
|
VectorABI = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VectorABI;
|
|
|
|
}
|
|
|
|
|
2015-06-11 23:34:59 +08:00
|
|
|
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
StringRef FS) {
|
|
|
|
bool VectorABI = UsesVectorABI(CPU, FS);
|
|
|
|
std::string Ret = "";
|
|
|
|
|
|
|
|
// Big endian.
|
|
|
|
Ret += "E";
|
|
|
|
|
|
|
|
// Data mangling.
|
2015-06-11 23:34:59 +08:00
|
|
|
Ret += DataLayout::getManglingComponent(TT);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
// Make sure that global data has at least 16 bits of alignment by
|
|
|
|
// default, so that we can refer to it using LARL. We don't have any
|
|
|
|
// special requirements for stack variables though.
|
|
|
|
Ret += "-i1:8:16-i8:8:16";
|
|
|
|
|
|
|
|
// 64-bit integers are naturally aligned.
|
|
|
|
Ret += "-i64:64";
|
|
|
|
|
|
|
|
// 128-bit floats are aligned only to 64 bits.
|
|
|
|
Ret += "-f128:64";
|
|
|
|
|
|
|
|
// When using the vector ABI, 128-bit vectors are also aligned to 64 bits.
|
|
|
|
if (VectorABI)
|
|
|
|
Ret += "-v128:64";
|
|
|
|
|
|
|
|
// We prefer 16 bits of aligned for all globals; see above.
|
|
|
|
Ret += "-a:8:16";
|
|
|
|
|
|
|
|
// Integer registers are 32 or 64 bits.
|
|
|
|
Ret += "-n32:64";
|
|
|
|
|
|
|
|
return Ret;
|
|
|
|
}
|
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT,
|
2013-05-07 00:15:19 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
|
|
|
const TargetOptions &Options,
|
2014-07-02 04:18:59 +08:00
|
|
|
Reloc::Model RM, CodeModel::Model CM,
|
2013-05-07 00:15:19 +08:00
|
|
|
CodeGenOpt::Level OL)
|
2015-06-12 03:41:26 +08:00
|
|
|
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, FS), TT, CPU, FS, Options,
|
|
|
|
RM, CM, OL),
|
2014-11-13 17:26:31 +08:00
|
|
|
TLOF(make_unique<TargetLoweringObjectFileELF>()),
|
2015-06-12 03:41:26 +08:00
|
|
|
Subtarget(TT, CPU, FS, *this) {
|
2013-05-13 09:16:13 +08:00
|
|
|
initAsmInfo();
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2014-11-21 07:37:18 +08:00
|
|
|
SystemZTargetMachine::~SystemZTargetMachine() {}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
namespace {
|
|
|
|
/// SystemZ Code Generator Pass Configuration Options.
|
|
|
|
class SystemZPassConfig : public TargetPassConfig {
|
|
|
|
public:
|
|
|
|
SystemZPassConfig(SystemZTargetMachine *TM, PassManagerBase &PM)
|
|
|
|
: TargetPassConfig(TM, PM) {}
|
|
|
|
|
|
|
|
SystemZTargetMachine &getSystemZTargetMachine() const {
|
|
|
|
return getTM<SystemZTargetMachine>();
|
|
|
|
}
|
|
|
|
|
2014-03-06 20:03:36 +08:00
|
|
|
void addIRPasses() override;
|
|
|
|
bool addInstSelector() override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
2013-05-07 00:15:19 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2013-08-23 18:27:02 +08:00
|
|
|
void SystemZPassConfig::addIRPasses() {
|
|
|
|
TargetPassConfig::addIRPasses();
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
bool SystemZPassConfig::addInstSelector() {
|
|
|
|
addPass(createSystemZISelDag(getSystemZTargetMachine(), getOptLevel()));
|
2015-02-18 17:13:27 +08:00
|
|
|
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addPass(createSystemZLDCleanupPass(getSystemZTargetMachine()));
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void SystemZPassConfig::addPreSched2() {
|
2014-06-05 22:20:10 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None &&
|
|
|
|
getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
|
2013-07-25 17:11:15 +08:00
|
|
|
addPass(&IfConverterID);
|
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void SystemZPassConfig::addPreEmitPass() {
|
2015-10-08 15:40:23 +08:00
|
|
|
|
|
|
|
// Do instruction shortening before compare elimination because some
|
|
|
|
// vector instructions will be shortened into opcodes that compare
|
|
|
|
// elimination recognizes.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addPass(createSystemZShortenInstPass(getSystemZTargetMachine()), false);
|
|
|
|
|
2013-08-05 18:58:53 +08:00
|
|
|
// We eliminate comparisons here rather than earlier because some
|
|
|
|
// transformations can change the set of available CC values and we
|
|
|
|
// generally want those transformations to have priority. This is
|
|
|
|
// especially true in the commonest case where the result of the comparison
|
|
|
|
// is used by a single in-range branch instruction, since we will then
|
|
|
|
// be able to fuse the compare and the branch instead.
|
|
|
|
//
|
|
|
|
// For example, two-address NILF can sometimes be converted into
|
|
|
|
// three-address RISBLG. NILF produces a CC value that indicates whether
|
|
|
|
// the low word is zero, but RISBLG does not modify CC at all. On the
|
|
|
|
// other hand, 64-bit ANDs like NILL can sometimes be converted to RISBG.
|
|
|
|
// The CC value produced by NILL isn't useful for our purposes, but the
|
|
|
|
// value produced by RISBG can be used for any comparison with zero
|
|
|
|
// (not just equality). So there are some transformations that lose
|
|
|
|
// CC values (while still being worthwhile) and others that happen to make
|
|
|
|
// the CC result more useful than it was originally.
|
|
|
|
//
|
2013-08-05 19:23:46 +08:00
|
|
|
// Another reason is that we only want to use BRANCH ON COUNT in cases
|
|
|
|
// where we know that the count register is not going to be spilled.
|
|
|
|
//
|
2013-08-05 18:58:53 +08:00
|
|
|
// Doing it so late makes it more likely that a register will be reused
|
|
|
|
// between the comparison and the branch, but it isn't clear whether
|
|
|
|
// preventing that would be a win or not.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(createSystemZElimComparePass(getSystemZTargetMachine()), false);
|
[SystemZ] Add long branch pass
Before this change, the SystemZ backend would use BRCL for all branches
and only consider shortening them to BRC when generating an object file.
E.g. a branch on equal would use the JGE alias of BRCL in assembly output,
but might be shortened to the JE alias of BRC in ELF output. This was
a useful first step, but it had two problems:
(1) The z assembler isn't traditionally supposed to perform branch shortening
or branch relaxation. We followed this rule by not relaxing branches
in assembler input, but that meant that generating assembly code and
then assembling it would not produce the same result as going directly
to object code; the former would give long branches everywhere, whereas
the latter would use short branches where possible.
(2) Other useful branches, like COMPARE AND BRANCH, do not have long forms.
We would need to do something else before supporting them.
(Although COMPARE AND BRANCH does not change the condition codes,
the plan is to model COMPARE AND BRANCH as a CC-clobbering instruction
during codegen, so that we can safely lower it to a separate compare
and long branch where necessary. This is not a valid transformation
for the assembler proper to make.)
This patch therefore moves branch relaxation to a pre-emit pass.
For now, calls are still shortened from BRASL to BRAS by the assembler,
although this too is not really the traditional behaviour.
The first test takes about 1.5s to run, and there are likely to be
more tests in this vein once further branch types are added. The feeling
on IRC was that 1.5s is a bit much for a single test, so I've restricted
it to SystemZ hosts for now.
The patch exposes (and fixes) some typos in the main CodeGen/SystemZ tests.
A later patch will remove the {{g}}s from that directory.
llvm-svn: 182274
2013-05-20 22:23:08 +08:00
|
|
|
addPass(createSystemZLongBranchPass(getSystemZTargetMachine()));
|
2015-12-10 17:10:07 +08:00
|
|
|
|
|
|
|
// Do final scheduling after all other optimizations, to get an
|
|
|
|
// optimal input for the decoder (branch relaxation must happen
|
|
|
|
// after block placement).
|
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
|
|
|
if (MISchedPostRA)
|
|
|
|
addPass(&PostMachineSchedulerID);
|
|
|
|
else
|
|
|
|
addPass(&PostRASchedulerID);
|
|
|
|
}
|
[SystemZ] Add long branch pass
Before this change, the SystemZ backend would use BRCL for all branches
and only consider shortening them to BRC when generating an object file.
E.g. a branch on equal would use the JGE alias of BRCL in assembly output,
but might be shortened to the JE alias of BRC in ELF output. This was
a useful first step, but it had two problems:
(1) The z assembler isn't traditionally supposed to perform branch shortening
or branch relaxation. We followed this rule by not relaxing branches
in assembler input, but that meant that generating assembly code and
then assembling it would not produce the same result as going directly
to object code; the former would give long branches everywhere, whereas
the latter would use short branches where possible.
(2) Other useful branches, like COMPARE AND BRANCH, do not have long forms.
We would need to do something else before supporting them.
(Although COMPARE AND BRANCH does not change the condition codes,
the plan is to model COMPARE AND BRANCH as a CC-clobbering instruction
during codegen, so that we can safely lower it to a separate compare
and long branch where necessary. This is not a valid transformation
for the assembler proper to make.)
This patch therefore moves branch relaxation to a pre-emit pass.
For now, calls are still shortened from BRASL to BRAS by the assembler,
although this too is not really the traditional behaviour.
The first test takes about 1.5s to run, and there are likely to be
more tests in this vein once further branch types are added. The feeling
on IRC was that 1.5s is a bit much for a single test, so I've restricted
it to SystemZ hosts for now.
The patch exposes (and fixes) some typos in the main CodeGen/SystemZ tests.
A later patch will remove the {{g}}s from that directory.
llvm-svn: 182274
2013-05-20 22:23:08 +08:00
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
TargetPassConfig *SystemZTargetMachine::createPassConfig(PassManagerBase &PM) {
|
|
|
|
return new SystemZPassConfig(this, PM);
|
|
|
|
}
|
2015-03-31 20:52:27 +08:00
|
|
|
|
|
|
|
TargetIRAnalysis SystemZTargetMachine::getTargetIRAnalysis() {
|
2015-09-17 07:38:13 +08:00
|
|
|
return TargetIRAnalysis([this](const Function &F) {
|
2015-03-31 20:52:27 +08:00
|
|
|
return TargetTransformInfo(SystemZTTIImpl(this, F));
|
|
|
|
});
|
|
|
|
}
|