2007-08-04 09:51:18 +08:00
|
|
|
//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-08-04 09:51:18 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2015-03-11 00:08:36 +08:00
|
|
|
// This file implements the auto-upgrade helper functions.
|
|
|
|
// This is where deprecated IR intrinsics and other IR features are updated to
|
|
|
|
// current specifications.
|
2007-08-04 09:51:18 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-03-05 18:34:14 +08:00
|
|
|
#include "llvm/IR/AutoUpgrade.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2015-01-14 19:23:27 +08:00
|
|
|
#include "llvm/IR/DIBuilder.h"
|
2014-03-06 08:46:21 +08:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2014-01-16 09:51:12 +08:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-09-30 18:56:37 +08:00
|
|
|
#include "llvm/Support/Regex.h"
|
2008-02-20 19:08:44 +08:00
|
|
|
#include <cstring>
|
2007-08-04 09:51:18 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2012-06-11 02:42:51 +08:00
|
|
|
// Upgrade the declarations of the SSE4.1 functions whose arguments have
|
|
|
|
// changed their type from v4f32 to v2i64.
|
|
|
|
static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
|
|
|
|
Function *&NewFn) {
|
|
|
|
// Check whether this is an old version of the function, which received
|
|
|
|
// v4f32 arguments.
|
|
|
|
Type *Arg0Type = F->getFunctionType()->getParamType(0);
|
|
|
|
if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Yes, it's old, replace it with new version.
|
|
|
|
F->setName(F->getName() + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
|
|
|
|
return true;
|
|
|
|
}
|
2007-08-04 09:51:18 +08:00
|
|
|
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
|
|
|
|
// arguments have changed their type from i32 to i8.
|
|
|
|
static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
|
|
|
|
Function *&NewFn) {
|
|
|
|
// Check that the last argument is an i32.
|
|
|
|
Type *LastArgType = F->getFunctionType()->getParamType(
|
|
|
|
F->getFunctionType()->getNumParams() - 1);
|
|
|
|
if (!LastArgType->isIntegerTy(32))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Move this function aside and map down.
|
|
|
|
F->setName(F->getName() + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-12-18 06:33:23 +08:00
|
|
|
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
2007-08-04 09:51:18 +08:00
|
|
|
assert(F && "Illegal to upgrade a non-existent Function.");
|
|
|
|
|
|
|
|
// Quickly eliminate it, if it's not a candidate.
|
2011-06-19 02:56:39 +08:00
|
|
|
StringRef Name = F->getName();
|
|
|
|
if (Name.size() <= 8 || !Name.startswith("llvm."))
|
2007-12-18 06:33:23 +08:00
|
|
|
return false;
|
2011-06-19 02:56:39 +08:00
|
|
|
Name = Name.substr(5); // Strip off "llvm."
|
2011-11-27 16:42:07 +08:00
|
|
|
|
2011-06-19 02:56:39 +08:00
|
|
|
switch (Name[0]) {
|
2007-08-04 09:51:18 +08:00
|
|
|
default: break;
|
2012-07-14 07:25:25 +08:00
|
|
|
case 'a': {
|
|
|
|
if (Name.startswith("arm.neon.vclz")) {
|
|
|
|
Type* args[2] = {
|
2013-07-21 01:46:00 +08:00
|
|
|
F->arg_begin()->getType(),
|
2012-07-14 07:25:25 +08:00
|
|
|
Type::getInt1Ty(F->getContext())
|
|
|
|
};
|
|
|
|
// Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
|
|
|
|
// the end of the name. Change name from llvm.arm.neon.vclz.* to
|
|
|
|
// llvm.ctlz.*
|
|
|
|
FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
|
2013-07-21 01:46:00 +08:00
|
|
|
NewFn = Function::Create(fType, F->getLinkage(),
|
2012-07-14 07:25:25 +08:00
|
|
|
"llvm.ctlz." + Name.substr(14), F->getParent());
|
|
|
|
return true;
|
|
|
|
}
|
2012-07-18 08:02:16 +08:00
|
|
|
if (Name.startswith("arm.neon.vcnt")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
2015-09-30 18:56:37 +08:00
|
|
|
Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
|
|
|
|
if (vldRegex.match(Name)) {
|
|
|
|
auto fArgs = F->getFunctionType()->params();
|
|
|
|
SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
|
|
|
|
// Can't use Intrinsic::getDeclaration here as the return types might
|
|
|
|
// then only be structurally equal.
|
|
|
|
FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
|
|
|
|
NewFn = Function::Create(fType, F->getLinkage(),
|
|
|
|
"llvm." + Name + ".p0i8", F->getParent());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
|
|
|
|
if (vstRegex.match(Name)) {
|
2015-10-18 13:15:34 +08:00
|
|
|
static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
|
|
|
|
Intrinsic::arm_neon_vst2,
|
|
|
|
Intrinsic::arm_neon_vst3,
|
|
|
|
Intrinsic::arm_neon_vst4};
|
2015-09-30 18:56:37 +08:00
|
|
|
|
2015-10-18 13:15:34 +08:00
|
|
|
static const Intrinsic::ID StoreLaneInts[] = {
|
|
|
|
Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
|
|
|
|
Intrinsic::arm_neon_vst4lane
|
|
|
|
};
|
2015-09-30 18:56:37 +08:00
|
|
|
|
|
|
|
auto fArgs = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = {fArgs[0], fArgs[1]};
|
|
|
|
if (Name.find("lane") == StringRef::npos)
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
StoreInts[fArgs.size() - 3], Tys);
|
|
|
|
else
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
StoreLaneInts[fArgs.size() - 5], Tys);
|
|
|
|
return true;
|
|
|
|
}
|
2016-04-20 04:51:05 +08:00
|
|
|
if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
|
|
|
|
return true;
|
|
|
|
}
|
2012-07-14 07:25:25 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-09-30 18:56:37 +08:00
|
|
|
|
2011-12-12 12:26:04 +08:00
|
|
|
case 'c': {
|
|
|
|
if (Name.startswith("ctlz.") && F->arg_size() == 1) {
|
|
|
|
F->setName(Name + ".old");
|
2011-12-12 18:57:20 +08:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
|
|
|
|
F->arg_begin()->getType());
|
2011-12-12 12:26:04 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("cttz.") && F->arg_size() == 1) {
|
|
|
|
F->setName(Name + ".old");
|
2011-12-12 18:57:20 +08:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
|
|
|
|
F->arg_begin()->getType());
|
2011-12-12 12:26:04 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
Move the complex address expression out of DIVariable and into an extra
argument of the llvm.dbg.declare/llvm.dbg.value intrinsics.
Previously, DIVariable was a variable-length field that has an optional
reference to a Metadata array consisting of a variable number of
complex address expressions. In the case of OpPiece expressions this is
wasting a lot of storage in IR, because when an aggregate type is, e.g.,
SROA'd into all of its n individual members, the IR will contain n copies
of the DIVariable, all alike, only differing in the complex address
reference at the end.
By making the complex address into an extra argument of the
dbg.value/dbg.declare intrinsics, all of the pieces can reference the
same variable and the complex address expressions can be uniqued across
the CU, too.
Down the road, this will allow us to move other flags, such as
"indirection" out of the DIVariable, too.
The new intrinsics look like this:
declare void @llvm.dbg.declare(metadata %storage, metadata %var, metadata %expr)
declare void @llvm.dbg.value(metadata %storage, i64 %offset, metadata %var, metadata %expr)
This patch adds a new LLVM-local tag to DIExpressions, so we can detect
and pretty-print DIExpression metadata nodes.
What this patch doesn't do:
This patch does not touch the "Indirect" field in DIVariable; but moving
that into the expression would be a natural next step.
http://reviews.llvm.org/D4919
rdar://problem/17994491
Thanks to dblaikie and dexonsmith for reviewing this patch!
Note: I accidentally committed a bogus older version of this patch previously.
llvm-svn: 218787
2014-10-02 02:55:02 +08:00
|
|
|
|
2016-06-29 02:27:25 +08:00
|
|
|
case 'm': {
|
|
|
|
if (Name.startswith("masked.load.")) {
|
|
|
|
Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_load,
|
|
|
|
Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Name.startswith("masked.store.")) {
|
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = { Args[0], Args[1] };
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_store,
|
|
|
|
Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-10-08 02:06:48 +08:00
|
|
|
case 'o':
|
|
|
|
// We only need to change the name to match the mangling including the
|
|
|
|
// address space.
|
|
|
|
if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
|
|
|
|
Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::objectsize, Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-04-09 05:26:31 +08:00
|
|
|
case 's':
|
|
|
|
if (Name == "stackprotectorcheck") {
|
|
|
|
NewFn = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-03 14:10:55 +08:00
|
|
|
case 'x': {
|
|
|
|
if (Name.startswith("x86.sse2.pcmpeq.") ||
|
|
|
|
Name.startswith("x86.sse2.pcmpgt.") ||
|
|
|
|
Name.startswith("x86.avx2.pcmpeq.") ||
|
2012-04-18 13:24:00 +08:00
|
|
|
Name.startswith("x86.avx2.pcmpgt.") ||
|
2016-06-21 11:53:24 +08:00
|
|
|
Name.startswith("x86.avx512.mask.pcmpeq.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.pcmpgt.") ||
|
2016-06-16 23:48:30 +08:00
|
|
|
Name == "x86.sse41.pmaxsb" ||
|
|
|
|
Name == "x86.sse2.pmaxs.w" ||
|
|
|
|
Name == "x86.sse41.pmaxsd" ||
|
|
|
|
Name == "x86.sse2.pmaxu.b" ||
|
|
|
|
Name == "x86.sse41.pmaxuw" ||
|
|
|
|
Name == "x86.sse41.pmaxud" ||
|
|
|
|
Name == "x86.sse41.pminsb" ||
|
|
|
|
Name == "x86.sse2.pmins.w" ||
|
|
|
|
Name == "x86.sse41.pminsd" ||
|
|
|
|
Name == "x86.sse2.pminu.b" ||
|
|
|
|
Name == "x86.sse41.pminuw" ||
|
|
|
|
Name == "x86.sse41.pminud" ||
|
2016-06-17 02:44:20 +08:00
|
|
|
Name.startswith("x86.avx2.pmax") ||
|
|
|
|
Name.startswith("x86.avx2.pmin") ||
|
2015-08-21 04:36:19 +08:00
|
|
|
Name.startswith("x86.avx2.vbroadcast") ||
|
|
|
|
Name.startswith("x86.avx2.pbroadcast") ||
|
2012-05-08 14:58:15 +08:00
|
|
|
Name.startswith("x86.avx.vpermil.") ||
|
2016-06-12 22:11:32 +08:00
|
|
|
Name.startswith("x86.sse2.pshuf") ||
|
2016-07-02 22:42:35 +08:00
|
|
|
Name.startswith("x86.avx512.mask.movddup") ||
|
|
|
|
Name.startswith("x86.avx512.mask.movshdup") ||
|
|
|
|
Name.startswith("x86.avx512.mask.movsldup") ||
|
2016-06-13 10:36:48 +08:00
|
|
|
Name.startswith("x86.avx512.mask.pshuf.d.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.pshufl.w.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.pshufh.w.") ||
|
2016-07-04 20:40:54 +08:00
|
|
|
Name.startswith("x86.avx512.mask.vpermil.p") ||
|
2016-06-23 15:37:33 +08:00
|
|
|
Name.startswith("x86.avx512.mask.punpckl") ||
|
|
|
|
Name.startswith("x86.avx512.mask.punpckh") ||
|
|
|
|
Name.startswith("x86.avx512.mask.unpckl.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.unpckh.") ||
|
2015-09-23 16:48:33 +08:00
|
|
|
Name.startswith("x86.sse41.pmovsx") ||
|
2016-05-29 02:03:41 +08:00
|
|
|
Name.startswith("x86.sse41.pmovzx") ||
|
|
|
|
Name.startswith("x86.avx2.pmovsx") ||
|
|
|
|
Name.startswith("x86.avx2.pmovzx") ||
|
2016-05-25 16:59:18 +08:00
|
|
|
Name == "x86.sse2.cvtdq2pd" ||
|
|
|
|
Name == "x86.sse2.cvtps2pd" ||
|
|
|
|
Name == "x86.avx.cvtdq2.pd.256" ||
|
|
|
|
Name == "x86.avx.cvt.ps2.pd.256" ||
|
2016-06-02 18:55:21 +08:00
|
|
|
Name == "x86.sse2.cvttps2dq" ||
|
|
|
|
Name.startswith("x86.avx.cvtt.") ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.avx.vinsertf128.") ||
|
2015-03-13 07:16:18 +08:00
|
|
|
Name == "x86.avx2.vinserti128" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.avx.vextractf128.") ||
|
2015-03-13 07:16:18 +08:00
|
|
|
Name == "x86.avx2.vextracti128" ||
|
2016-06-18 10:38:26 +08:00
|
|
|
Name.startswith("x86.sse4a.movnt.") ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.avx.movnt.") ||
|
2016-05-25 14:56:32 +08:00
|
|
|
Name == "x86.sse2.storel.dq" ||
|
2016-05-31 07:15:56 +08:00
|
|
|
Name.startswith("x86.sse.storeu.") ||
|
|
|
|
Name.startswith("x86.sse2.storeu.") ||
|
|
|
|
Name.startswith("x86.avx.storeu.") ||
|
2016-05-31 09:50:02 +08:00
|
|
|
Name.startswith("x86.avx512.mask.storeu.p") ||
|
|
|
|
Name.startswith("x86.avx512.mask.storeu.b.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.storeu.w.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.storeu.d.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.storeu.q.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.store.p") ||
|
|
|
|
Name.startswith("x86.avx512.mask.store.b.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.store.w.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.store.d.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.store.q.") ||
|
2016-06-02 12:19:36 +08:00
|
|
|
Name.startswith("x86.avx512.mask.loadu.p") ||
|
|
|
|
Name.startswith("x86.avx512.mask.loadu.b.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.loadu.w.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.loadu.d.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.loadu.q.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.load.p") ||
|
|
|
|
Name.startswith("x86.avx512.mask.load.b.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.load.w.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.load.d.") ||
|
|
|
|
Name.startswith("x86.avx512.mask.load.q.") ||
|
2013-10-15 13:20:47 +08:00
|
|
|
Name == "x86.sse42.crc32.64.8" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.avx.vbroadcast.s") ||
|
2016-06-06 14:12:54 +08:00
|
|
|
Name.startswith("x86.avx512.mask.palignr.") ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.sse2.psll.dq") ||
|
|
|
|
Name.startswith("x86.sse2.psrl.dq") ||
|
|
|
|
Name.startswith("x86.avx2.psll.dq") ||
|
|
|
|
Name.startswith("x86.avx2.psrl.dq") ||
|
2016-06-10 05:09:03 +08:00
|
|
|
Name.startswith("x86.avx512.psll.dq") ||
|
|
|
|
Name.startswith("x86.avx512.psrl.dq") ||
|
2015-03-01 03:33:17 +08:00
|
|
|
Name == "x86.sse41.pblendw" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.sse41.blendp") ||
|
|
|
|
Name.startswith("x86.avx.blend.p") ||
|
2015-03-01 03:33:17 +08:00
|
|
|
Name == "x86.avx2.pblendw" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("x86.avx2.pblendd.") ||
|
2015-03-04 08:13:25 +08:00
|
|
|
Name == "x86.avx2.vbroadcasti128" ||
|
2015-11-04 04:27:01 +08:00
|
|
|
Name == "x86.xop.vpcmov" ||
|
2012-06-10 00:46:13 +08:00
|
|
|
(Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) {
|
2014-04-09 14:08:46 +08:00
|
|
|
NewFn = nullptr;
|
2012-02-03 14:10:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
2012-06-11 02:42:51 +08:00
|
|
|
// SSE4.1 ptest functions may have an old signature.
|
|
|
|
if (Name.startswith("x86.sse41.ptest")) {
|
|
|
|
if (Name == "x86.sse41.ptestc")
|
|
|
|
return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
|
|
|
|
if (Name == "x86.sse41.ptestz")
|
|
|
|
return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
|
|
|
|
if (Name == "x86.sse41.ptestnzc")
|
|
|
|
return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
|
|
|
|
}
|
2015-03-01 06:25:06 +08:00
|
|
|
// Several blend and other instructions with masks used the wrong number of
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
// bits.
|
|
|
|
if (Name == "x86.sse41.insertps")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "x86.sse41.dppd")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "x86.sse41.dpps")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "x86.sse41.mpsadbw")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "x86.avx.dp.ps.256")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "x86.avx2.mpsadbw")
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
|
|
|
|
NewFn);
|
2015-01-26 07:26:02 +08:00
|
|
|
|
2012-06-13 15:18:53 +08:00
|
|
|
// frcz.ss/sd may need to have an argument dropped
|
|
|
|
if (Name.startswith("x86.xop.vfrcz.ss") && F->arg_size() == 2) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_xop_vfrcz_ss);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("x86.xop.vfrcz.sd") && F->arg_size() == 2) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_xop_vfrcz_sd);
|
|
|
|
return true;
|
|
|
|
}
|
2012-06-03 16:07:25 +08:00
|
|
|
// Fix the FMA4 intrinsics to remove the 4
|
|
|
|
if (Name.startswith("x86.fma4.")) {
|
2012-06-04 00:48:52 +08:00
|
|
|
F->setName("llvm.x86.fma" + Name.substr(8));
|
|
|
|
NewFn = F;
|
|
|
|
return true;
|
2012-06-03 16:07:25 +08:00
|
|
|
}
|
2016-06-03 16:06:03 +08:00
|
|
|
// Upgrade any XOP PERMIL2 index operand still using a float/double vector.
|
|
|
|
if (Name.startswith("x86.xop.vpermil2")) {
|
|
|
|
auto Params = F->getFunctionType()->params();
|
|
|
|
auto Idx = Params[2];
|
|
|
|
if (Idx->getScalarType()->isFloatingPointTy()) {
|
|
|
|
F->setName(Name + ".old");
|
|
|
|
unsigned IdxSize = Idx->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltSize = Idx->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID Permil2ID;
|
|
|
|
if (EltSize == 64 && IdxSize == 128)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2pd;
|
|
|
|
else if (EltSize == 32 && IdxSize == 128)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2ps;
|
|
|
|
else if (EltSize == 64 && IdxSize == 256)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
|
|
|
|
else
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2012-02-03 14:10:55 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-08-04 09:51:18 +08:00
|
|
|
}
|
|
|
|
|
2012-06-11 02:42:51 +08:00
|
|
|
// This may not belong here. This function is effectively being overloaded
|
|
|
|
// to both detect an intrinsic which needs upgrading, and to provide the
|
|
|
|
// upgraded form of the intrinsic. We should perhaps have two separate
|
2007-08-04 09:51:18 +08:00
|
|
|
// functions for this.
|
2007-12-18 06:33:23 +08:00
|
|
|
return false;
|
2007-08-04 09:51:18 +08:00
|
|
|
}
|
|
|
|
|
2007-12-18 06:33:23 +08:00
|
|
|
bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
|
2014-04-09 14:08:46 +08:00
|
|
|
NewFn = nullptr;
|
2007-12-18 06:33:23 +08:00
|
|
|
bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
|
2015-07-04 04:12:01 +08:00
|
|
|
assert(F != NewFn && "Intrinsic function upgraded to the same function");
|
2007-12-04 04:06:50 +08:00
|
|
|
|
|
|
|
// Upgrade intrinsic attributes. This does not change the function.
|
2007-12-18 06:33:23 +08:00
|
|
|
if (NewFn)
|
|
|
|
F = NewFn;
|
2015-05-21 01:16:39 +08:00
|
|
|
if (Intrinsic::ID id = F->getIntrinsicID())
|
|
|
|
F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
|
2007-12-04 04:06:50 +08:00
|
|
|
return Upgraded;
|
|
|
|
}
|
|
|
|
|
2010-09-11 02:51:56 +08:00
|
|
|
bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
|
2011-06-18 14:05:24 +08:00
|
|
|
// Nothing to do yet.
|
2010-09-11 02:51:56 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-10 05:09:03 +08:00
|
|
|
// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
|
2015-02-18 14:24:44 +08:00
|
|
|
// to byte shuffles.
|
|
|
|
static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
2016-05-29 14:37:33 +08:00
|
|
|
Value *Op, unsigned Shift) {
|
|
|
|
Type *ResultTy = Op->getType();
|
|
|
|
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
2015-02-18 14:24:44 +08:00
|
|
|
|
|
|
|
// Bitcast from a 64-bit element type to a byte element type.
|
2016-05-29 14:37:33 +08:00
|
|
|
Type *VecTy = VectorType::get(Type::getInt8Ty(C), NumElts);
|
|
|
|
Op = Builder.CreateBitCast(Op, VecTy, "cast");
|
|
|
|
|
2015-02-18 14:24:44 +08:00
|
|
|
// We'll be shuffling in zeroes.
|
2016-05-29 14:37:33 +08:00
|
|
|
Value *Res = Constant::getNullValue(VecTy);
|
2015-02-18 14:24:44 +08:00
|
|
|
|
|
|
|
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
|
|
|
// we'll just return the zero vector.
|
|
|
|
if (Shift < 16) {
|
2016-06-12 08:41:19 +08:00
|
|
|
uint32_t Idxs[64];
|
2016-06-10 05:09:03 +08:00
|
|
|
// 256/512-bit version is split into 2/4 16-byte lanes.
|
2015-02-18 14:24:44 +08:00
|
|
|
for (unsigned l = 0; l != NumElts; l += 16)
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = NumElts + i - Shift;
|
|
|
|
if (Idx < NumElts)
|
|
|
|
Idx -= NumElts - 16; // end of lane, switch operand.
|
2016-05-29 14:37:33 +08:00
|
|
|
Idxs[l + i] = Idx + l;
|
2015-02-18 14:24:44 +08:00
|
|
|
}
|
|
|
|
|
2016-05-29 14:37:33 +08:00
|
|
|
Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
|
2015-02-18 14:24:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Bitcast back to a 64-bit element type.
|
2016-05-29 14:37:33 +08:00
|
|
|
return Builder.CreateBitCast(Res, ResultTy, "cast");
|
2015-02-18 14:24:44 +08:00
|
|
|
}
|
|
|
|
|
2016-06-13 10:36:42 +08:00
|
|
|
// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
|
|
|
|
// to byte shuffles.
|
|
|
|
static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
|
|
|
Value *Op,
|
|
|
|
unsigned Shift) {
|
|
|
|
Type *ResultTy = Op->getType();
|
|
|
|
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
|
|
|
|
|
|
|
// Bitcast from a 64-bit element type to a byte element type.
|
|
|
|
Type *VecTy = VectorType::get(Type::getInt8Ty(C), NumElts);
|
|
|
|
Op = Builder.CreateBitCast(Op, VecTy, "cast");
|
|
|
|
|
|
|
|
// We'll be shuffling in zeroes.
|
|
|
|
Value *Res = Constant::getNullValue(VecTy);
|
|
|
|
|
|
|
|
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
|
|
|
// we'll just return the zero vector.
|
|
|
|
if (Shift < 16) {
|
|
|
|
uint32_t Idxs[64];
|
|
|
|
// 256/512-bit version is split into 2/4 16-byte lanes.
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 16)
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = i + Shift;
|
|
|
|
if (Idx >= 16)
|
|
|
|
Idx += NumElts - 16; // end of lane, switch operand.
|
|
|
|
Idxs[l + i] = Idx + l;
|
|
|
|
}
|
|
|
|
|
|
|
|
Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bitcast back to a 64-bit element type.
|
|
|
|
return Builder.CreateBitCast(Res, ResultTy, "cast");
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
|
|
|
|
unsigned NumElts) {
|
|
|
|
llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
|
|
|
|
cast<IntegerType>(Mask->getType())->getBitWidth());
|
|
|
|
Mask = Builder.CreateBitCast(Mask, MaskTy);
|
|
|
|
|
|
|
|
// If we have less than 8 elements, then the starting mask was an i8 and
|
|
|
|
// we need to extract down to the right number of elements.
|
|
|
|
if (NumElts < 8) {
|
|
|
|
uint32_t Indices[4];
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Indices[i] = i;
|
|
|
|
Mask = Builder.CreateShuffleVector(Mask, Mask,
|
|
|
|
makeArrayRef(Indices, NumElts),
|
|
|
|
"extract");
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
|
|
|
|
Value *Op0, Value *Op1) {
|
|
|
|
// If the mask is all ones just emit the align operation.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
|
|
|
return Op0;
|
|
|
|
|
|
|
|
Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
|
|
|
|
return Builder.CreateSelect(Mask, Op0, Op1);
|
|
|
|
}
|
|
|
|
|
2016-06-06 14:12:54 +08:00
|
|
|
static Value *UpgradeX86PALIGNRIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
|
|
|
Value *Op0, Value *Op1, Value *Shift,
|
|
|
|
Value *Passthru, Value *Mask) {
|
|
|
|
unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
|
|
|
|
|
|
|
|
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
|
|
|
assert(NumElts % 16 == 0);
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of vectors more than the size of two
|
|
|
|
// lanes, emit zero.
|
|
|
|
if (ShiftVal >= 32)
|
|
|
|
return llvm::Constant::getNullValue(Op0->getType());
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of input vectors more than one lane,
|
|
|
|
// but less than two lanes, convert to shifting in zeroes.
|
|
|
|
if (ShiftVal > 16) {
|
|
|
|
ShiftVal -= 16;
|
|
|
|
Op1 = Op0;
|
|
|
|
Op0 = llvm::Constant::getNullValue(Op0->getType());
|
|
|
|
}
|
|
|
|
|
2016-06-12 08:41:19 +08:00
|
|
|
uint32_t Indices[64];
|
2016-06-06 14:12:54 +08:00
|
|
|
// 256-bit palignr operates on 128-bit lanes so we need to handle that
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 16) {
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = ShiftVal + i;
|
|
|
|
if (Idx >= 16)
|
|
|
|
Idx += NumElts - 16; // End of lane, switch operand.
|
|
|
|
Indices[l + i] = Idx + l;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Align = Builder.CreateShuffleVector(Op1, Op0,
|
|
|
|
makeArrayRef(Indices, NumElts),
|
|
|
|
"palignr");
|
|
|
|
|
2016-06-13 10:36:42 +08:00
|
|
|
return EmitX86Select(Builder, Mask, Align, Passthru);
|
2015-02-18 14:24:44 +08:00
|
|
|
}
|
|
|
|
|
2016-05-31 09:50:02 +08:00
|
|
|
static Value *UpgradeMaskedStore(IRBuilder<> &Builder, LLVMContext &C,
|
|
|
|
Value *Ptr, Value *Data, Value *Mask,
|
|
|
|
bool Aligned) {
|
|
|
|
// Cast the pointer to the right type.
|
|
|
|
Ptr = Builder.CreateBitCast(Ptr,
|
|
|
|
llvm::PointerType::getUnqual(Data->getType()));
|
|
|
|
unsigned Align =
|
|
|
|
Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
|
|
|
|
|
|
|
|
// If the mask is all ones just emit a regular store.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
|
|
|
return Builder.CreateAlignedStore(Data, Ptr, Align);
|
|
|
|
|
|
|
|
// Convert the mask from an integer type to a vector of i1.
|
|
|
|
unsigned NumElts = Data->getType()->getVectorNumElements();
|
2016-06-13 10:36:42 +08:00
|
|
|
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
2016-05-31 09:50:02 +08:00
|
|
|
return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
|
|
|
|
}
|
|
|
|
|
2016-06-02 12:19:36 +08:00
|
|
|
static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, LLVMContext &C,
|
|
|
|
Value *Ptr, Value *Passthru, Value *Mask,
|
|
|
|
bool Aligned) {
|
|
|
|
// Cast the pointer to the right type.
|
|
|
|
Ptr = Builder.CreateBitCast(Ptr,
|
|
|
|
llvm::PointerType::getUnqual(Passthru->getType()));
|
|
|
|
unsigned Align =
|
|
|
|
Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
|
|
|
|
|
|
|
|
// If the mask is all ones just emit a regular store.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
|
|
|
return Builder.CreateAlignedLoad(Ptr, Align);
|
|
|
|
|
|
|
|
// Convert the mask from an integer type to a vector of i1.
|
|
|
|
unsigned NumElts = Passthru->getType()->getVectorNumElements();
|
2016-06-13 10:36:42 +08:00
|
|
|
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
2016-06-02 12:19:36 +08:00
|
|
|
return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
|
|
|
|
}
|
|
|
|
|
2016-06-16 23:48:30 +08:00
|
|
|
static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
ICmpInst::Predicate Pred) {
|
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
Value *Op1 = CI.getArgOperand(1);
|
|
|
|
Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1);
|
|
|
|
return Builder.CreateSelect(Cmp, Op0, Op1);
|
|
|
|
}
|
|
|
|
|
2016-06-21 11:53:24 +08:00
|
|
|
static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
ICmpInst::Predicate Pred) {
|
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
|
|
|
Value *Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
|
|
|
|
|
|
|
|
Value *Mask = CI.getArgOperand(2);
|
|
|
|
const auto *C = dyn_cast<Constant>(Mask);
|
|
|
|
if (!C || !C->isAllOnesValue())
|
|
|
|
Cmp = Builder.CreateAnd(Cmp, getX86MaskVec(Builder, Mask, NumElts));
|
|
|
|
|
|
|
|
if (NumElts < 8) {
|
|
|
|
uint32_t Indices[8];
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Indices[i] = i;
|
|
|
|
for (unsigned i = NumElts; i != 8; ++i)
|
|
|
|
Indices[i] = NumElts;
|
|
|
|
Cmp = Builder.CreateShuffleVector(Cmp, UndefValue::get(Cmp->getType()),
|
|
|
|
Indices);
|
|
|
|
}
|
|
|
|
return Builder.CreateBitCast(Cmp, IntegerType::get(CI.getContext(),
|
|
|
|
std::max(NumElts, 8U)));
|
|
|
|
}
|
|
|
|
|
2016-06-16 06:01:28 +08:00
|
|
|
/// Upgrade a call to an old intrinsic. All argument and return casting must be
|
|
|
|
/// provided to seamlessly integrate with existing context.
|
2007-08-04 09:51:18 +08:00
|
|
|
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
2012-02-03 14:10:55 +08:00
|
|
|
Function *F = CI->getCalledFunction();
|
2011-12-13 06:59:34 +08:00
|
|
|
LLVMContext &C = CI->getContext();
|
2011-12-12 12:26:04 +08:00
|
|
|
IRBuilder<> Builder(C);
|
2015-10-09 07:49:46 +08:00
|
|
|
Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
|
2011-12-12 12:26:04 +08:00
|
|
|
|
2012-02-03 14:10:55 +08:00
|
|
|
assert(F && "Intrinsic call is not direct?");
|
|
|
|
|
|
|
|
if (!NewFn) {
|
|
|
|
// Get the Function's name.
|
|
|
|
StringRef Name = F->getName();
|
|
|
|
|
|
|
|
Value *Rep;
|
2016-06-16 06:01:28 +08:00
|
|
|
// Upgrade packed integer vector compare intrinsics to compare instructions.
|
2012-02-03 14:10:55 +08:00
|
|
|
if (Name.startswith("llvm.x86.sse2.pcmpeq.") ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pcmpeq.")) {
|
|
|
|
Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
"pcmpeq");
|
|
|
|
Rep = Builder.CreateSExt(Rep, CI->getType(), "");
|
|
|
|
} else if (Name.startswith("llvm.x86.sse2.pcmpgt.") ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pcmpgt.")) {
|
|
|
|
Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
"pcmpgt");
|
|
|
|
Rep = Builder.CreateSExt(Rep, CI->getType(), "");
|
2016-06-21 11:53:24 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.pcmpeq.")) {
|
|
|
|
Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_EQ);
|
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.pcmpgt.")) {
|
|
|
|
Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_SGT);
|
2016-06-16 23:48:30 +08:00
|
|
|
} else if (Name == "llvm.x86.sse41.pmaxsb" ||
|
|
|
|
Name == "llvm.x86.sse2.pmaxs.w" ||
|
2016-06-17 02:44:20 +08:00
|
|
|
Name == "llvm.x86.sse41.pmaxsd" ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pmaxs")) {
|
2016-06-16 23:48:30 +08:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT);
|
|
|
|
} else if (Name == "llvm.x86.sse2.pmaxu.b" ||
|
|
|
|
Name == "llvm.x86.sse41.pmaxuw" ||
|
2016-06-17 02:44:20 +08:00
|
|
|
Name == "llvm.x86.sse41.pmaxud" ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pmaxu")) {
|
2016-06-16 23:48:30 +08:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT);
|
|
|
|
} else if (Name == "llvm.x86.sse41.pminsb" ||
|
|
|
|
Name == "llvm.x86.sse2.pmins.w" ||
|
2016-06-17 02:44:20 +08:00
|
|
|
Name == "llvm.x86.sse41.pminsd" ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pmins")) {
|
2016-06-16 23:48:30 +08:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT);
|
|
|
|
} else if (Name == "llvm.x86.sse2.pminu.b" ||
|
|
|
|
Name == "llvm.x86.sse41.pminuw" ||
|
2016-06-17 02:44:20 +08:00
|
|
|
Name == "llvm.x86.sse41.pminud" ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pminu")) {
|
2016-06-16 23:48:30 +08:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT);
|
2016-05-25 16:59:18 +08:00
|
|
|
} else if (Name == "llvm.x86.sse2.cvtdq2pd" ||
|
|
|
|
Name == "llvm.x86.sse2.cvtps2pd" ||
|
|
|
|
Name == "llvm.x86.avx.cvtdq2.pd.256" ||
|
|
|
|
Name == "llvm.x86.avx.cvt.ps2.pd.256") {
|
|
|
|
// Lossless i32/float to double conversion.
|
|
|
|
// Extract the bottom elements if necessary and convert to double vector.
|
|
|
|
Value *Src = CI->getArgOperand(0);
|
|
|
|
VectorType *SrcTy = cast<VectorType>(Src->getType());
|
|
|
|
VectorType *DstTy = cast<VectorType>(CI->getType());
|
|
|
|
Rep = CI->getArgOperand(0);
|
|
|
|
|
|
|
|
unsigned NumDstElts = DstTy->getNumElements();
|
|
|
|
if (NumDstElts < SrcTy->getNumElements()) {
|
|
|
|
assert(NumDstElts == 2 && "Unexpected vector size");
|
2016-06-12 08:41:19 +08:00
|
|
|
uint32_t ShuffleMask[2] = { 0, 1 };
|
|
|
|
Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy),
|
|
|
|
ShuffleMask);
|
2016-05-25 16:59:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Int2Double = (StringRef::npos != Name.find("cvtdq2"));
|
|
|
|
if (Int2Double)
|
|
|
|
Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd");
|
|
|
|
else
|
|
|
|
Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
|
2016-06-02 18:55:21 +08:00
|
|
|
} else if (Name == "llvm.x86.sse2.cvttps2dq" ||
|
|
|
|
Name.startswith("llvm.x86.avx.cvtt.")) {
|
|
|
|
// Truncation (round to zero) float/double to i32 vector conversion.
|
|
|
|
Value *Src = CI->getArgOperand(0);
|
|
|
|
VectorType *DstTy = cast<VectorType>(CI->getType());
|
|
|
|
Rep = Builder.CreateFPToSI(Src, DstTy, "cvtt");
|
2016-06-18 10:38:26 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.sse4a.movnt.")) {
|
|
|
|
Module *M = F->getParent();
|
|
|
|
SmallVector<Metadata *, 1> Elts;
|
|
|
|
Elts.push_back(
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
|
|
|
|
MDNode *Node = MDNode::get(C, Elts);
|
|
|
|
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
// Nontemporal (unaligned) store of the 0'th element of the float/double
|
|
|
|
// vector.
|
|
|
|
Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
|
|
|
|
PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
|
|
|
|
Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
|
|
|
|
Value *Extract =
|
|
|
|
Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
|
|
|
|
|
|
|
|
StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1);
|
|
|
|
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
2016-05-31 06:54:05 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx.movnt.")) {
|
2012-05-08 14:58:15 +08:00
|
|
|
Module *M = F->getParent();
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
SmallVector<Metadata *, 1> Elts;
|
|
|
|
Elts.push_back(
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
|
2012-05-08 14:58:15 +08:00
|
|
|
MDNode *Node = MDNode::get(C, Elts);
|
|
|
|
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
// Convert the type of the pointer to a pointer to the stored type.
|
|
|
|
Value *BC = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Arg1->getType()),
|
|
|
|
"cast");
|
2016-05-31 06:54:12 +08:00
|
|
|
StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 32);
|
2012-05-08 14:58:15 +08:00
|
|
|
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
|
|
|
|
2016-05-25 14:56:32 +08:00
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
} else if (Name == "llvm.x86.sse2.storel.dq") {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
|
|
|
|
Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
|
|
|
|
Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
|
|
|
|
Value *BC = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Elt->getType()),
|
|
|
|
"cast");
|
2016-05-31 06:54:12 +08:00
|
|
|
Builder.CreateAlignedStore(Elt, BC, 1);
|
2016-05-25 14:56:32 +08:00
|
|
|
|
2016-05-31 07:15:56 +08:00
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
} else if (Name.startswith("llvm.x86.sse.storeu.") ||
|
|
|
|
Name.startswith("llvm.x86.sse2.storeu.") ||
|
|
|
|
Name.startswith("llvm.x86.avx.storeu.")) {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
Arg0 = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Arg1->getType()),
|
|
|
|
"cast");
|
|
|
|
Builder.CreateAlignedStore(Arg1, Arg0, 1);
|
|
|
|
|
2016-05-31 09:50:02 +08:00
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.storeu.p") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.storeu.b.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.storeu.w.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.storeu.d.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.storeu.q.")) {
|
|
|
|
UpgradeMaskedStore(Builder, C, CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), /*Aligned*/false);
|
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.store.p") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.store.b.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.store.w.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.store.d.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.store.q.")) {
|
|
|
|
UpgradeMaskedStore(Builder, C, CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), /*Aligned*/true);
|
|
|
|
|
2012-05-08 14:58:15 +08:00
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
2016-06-02 12:19:36 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.loadu.p") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.loadu.b.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.loadu.w.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.loadu.d.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.loadu.q.")) {
|
|
|
|
Rep = UpgradeMaskedLoad(Builder, C, CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1), CI->getArgOperand(2),
|
|
|
|
/*Aligned*/false);
|
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.load.p") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.load.b.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.load.w.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.load.d.") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.load.q.")) {
|
|
|
|
Rep = UpgradeMaskedLoad(Builder, C, CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1),CI->getArgOperand(2),
|
|
|
|
/*Aligned*/true);
|
2012-06-10 00:46:13 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.xop.vpcom")) {
|
|
|
|
Intrinsic::ID intID;
|
|
|
|
if (Name.endswith("ub"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomub;
|
|
|
|
else if (Name.endswith("uw"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomuw;
|
|
|
|
else if (Name.endswith("ud"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomud;
|
|
|
|
else if (Name.endswith("uq"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomuq;
|
|
|
|
else if (Name.endswith("b"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomb;
|
|
|
|
else if (Name.endswith("w"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomw;
|
|
|
|
else if (Name.endswith("d"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomd;
|
|
|
|
else if (Name.endswith("q"))
|
|
|
|
intID = Intrinsic::x86_xop_vpcomq;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unknown suffix");
|
|
|
|
|
|
|
|
Name = Name.substr(18); // strip off "llvm.x86.xop.vpcom"
|
|
|
|
unsigned Imm;
|
|
|
|
if (Name.startswith("lt"))
|
|
|
|
Imm = 0;
|
|
|
|
else if (Name.startswith("le"))
|
|
|
|
Imm = 1;
|
|
|
|
else if (Name.startswith("gt"))
|
|
|
|
Imm = 2;
|
|
|
|
else if (Name.startswith("ge"))
|
|
|
|
Imm = 3;
|
|
|
|
else if (Name.startswith("eq"))
|
|
|
|
Imm = 4;
|
|
|
|
else if (Name.startswith("ne"))
|
|
|
|
Imm = 5;
|
|
|
|
else if (Name.startswith("false"))
|
2015-02-13 15:42:15 +08:00
|
|
|
Imm = 6;
|
|
|
|
else if (Name.startswith("true"))
|
2012-06-10 00:46:13 +08:00
|
|
|
Imm = 7;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unknown condition");
|
|
|
|
|
|
|
|
Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
|
2015-05-19 06:13:54 +08:00
|
|
|
Rep =
|
|
|
|
Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
Builder.getInt8(Imm)});
|
2015-11-04 04:27:01 +08:00
|
|
|
} else if (Name == "llvm.x86.xop.vpcmov") {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
Value *Sel = CI->getArgOperand(2);
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
|
|
|
|
Value *NotSel = Builder.CreateXor(Sel, MinusOne);
|
|
|
|
Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
|
|
|
|
Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
|
|
|
|
Rep = Builder.CreateOr(Sel0, Sel1);
|
2013-10-15 13:20:47 +08:00
|
|
|
} else if (Name == "llvm.x86.sse42.crc32.64.8") {
|
|
|
|
Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_sse42_crc32_32_8);
|
|
|
|
Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
|
2015-05-19 06:13:54 +08:00
|
|
|
Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
|
2013-10-15 13:20:47 +08:00
|
|
|
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
|
2014-05-30 07:35:33 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx.vbroadcast")) {
|
|
|
|
// Replace broadcasts with a series of insertelements.
|
|
|
|
Type *VecTy = CI->getType();
|
|
|
|
Type *EltTy = VecTy->getVectorElementType();
|
|
|
|
unsigned EltNum = VecTy->getVectorNumElements();
|
|
|
|
Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
|
|
|
|
EltTy->getPointerTo());
|
2015-05-21 05:46:30 +08:00
|
|
|
Value *Load = Builder.CreateLoad(EltTy, Cast);
|
2014-05-30 07:35:33 +08:00
|
|
|
Type *I32Ty = Type::getInt32Ty(C);
|
|
|
|
Rep = UndefValue::get(VecTy);
|
|
|
|
for (unsigned I = 0; I < EltNum; ++I)
|
|
|
|
Rep = Builder.CreateInsertElement(Rep, Load,
|
|
|
|
ConstantInt::get(I32Ty, I));
|
2016-05-29 02:03:41 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.sse41.pmovsx") ||
|
|
|
|
Name.startswith("llvm.x86.sse41.pmovzx") ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pmovsx") ||
|
|
|
|
Name.startswith("llvm.x86.avx2.pmovzx")) {
|
2015-09-23 16:48:33 +08:00
|
|
|
VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
|
|
|
|
VectorType *DstTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumDstElts = DstTy->getNumElements();
|
|
|
|
|
2016-05-29 02:03:41 +08:00
|
|
|
// Extract a subvector of the first NumDstElts lanes and sign/zero extend.
|
2016-06-12 12:48:00 +08:00
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
|
2016-06-12 08:41:19 +08:00
|
|
|
for (unsigned i = 0; i != NumDstElts; ++i)
|
2016-06-12 12:48:00 +08:00
|
|
|
ShuffleMask[i] = i;
|
2015-09-23 16:48:33 +08:00
|
|
|
|
|
|
|
Value *SV = Builder.CreateShuffleVector(
|
|
|
|
CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
|
2016-05-29 02:03:41 +08:00
|
|
|
|
|
|
|
bool DoSext = (StringRef::npos != Name.find("pmovsx"));
|
|
|
|
Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
|
|
|
|
: Builder.CreateZExt(SV, DstTy);
|
2015-03-04 08:13:25 +08:00
|
|
|
} else if (Name == "llvm.x86.avx2.vbroadcasti128") {
|
|
|
|
// Replace vbroadcasts with a vector shuffle.
|
2015-05-21 05:46:30 +08:00
|
|
|
Type *VT = VectorType::get(Type::getInt64Ty(C), 2);
|
|
|
|
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
|
|
|
|
PointerType::getUnqual(VT));
|
|
|
|
Value *Load = Builder.CreateLoad(VT, Op);
|
2016-06-12 08:41:19 +08:00
|
|
|
uint32_t Idxs[4] = { 0, 1, 0, 1 };
|
2015-03-04 08:13:25 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
|
2015-03-12 23:27:07 +08:00
|
|
|
Idxs);
|
2015-08-21 04:36:19 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx2.pbroadcast") ||
|
|
|
|
Name.startswith("llvm.x86.avx2.vbroadcast")) {
|
|
|
|
// Replace vp?broadcasts with a vector shuffle.
|
|
|
|
Value *Op = CI->getArgOperand(0);
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
|
|
|
|
Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
|
|
|
|
Constant::getNullValue(MaskTy));
|
2016-06-06 14:12:54 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.palignr.")) {
|
|
|
|
Rep = UpgradeX86PALIGNRIntrinsics(Builder, C, CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2),
|
|
|
|
CI->getArgOperand(3),
|
|
|
|
CI->getArgOperand(4));
|
2016-05-29 14:37:33 +08:00
|
|
|
} else if (Name == "llvm.x86.sse2.psll.dq" ||
|
|
|
|
Name == "llvm.x86.avx2.psll.dq") {
|
|
|
|
// 128/256-bit shift left specified in bits.
|
2015-02-18 14:24:44 +08:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-05-29 14:37:33 +08:00
|
|
|
Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0),
|
2015-02-18 14:24:44 +08:00
|
|
|
Shift / 8); // Shift is in bits.
|
2016-05-29 14:37:33 +08:00
|
|
|
} else if (Name == "llvm.x86.sse2.psrl.dq" ||
|
|
|
|
Name == "llvm.x86.avx2.psrl.dq") {
|
|
|
|
// 128/256-bit shift right specified in bits.
|
2015-02-18 14:24:44 +08:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-05-29 14:37:33 +08:00
|
|
|
Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0),
|
2015-02-18 14:24:44 +08:00
|
|
|
Shift / 8); // Shift is in bits.
|
2016-05-29 14:37:33 +08:00
|
|
|
} else if (Name == "llvm.x86.sse2.psll.dq.bs" ||
|
2016-06-10 05:09:03 +08:00
|
|
|
Name == "llvm.x86.avx2.psll.dq.bs" ||
|
|
|
|
Name == "llvm.x86.avx512.psll.dq.512") {
|
|
|
|
// 128/256/512-bit shift left specified in bytes.
|
2015-02-17 04:51:59 +08:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-05-29 14:37:33 +08:00
|
|
|
Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
|
|
|
|
} else if (Name == "llvm.x86.sse2.psrl.dq.bs" ||
|
2016-06-10 05:09:03 +08:00
|
|
|
Name == "llvm.x86.avx2.psrl.dq.bs" ||
|
|
|
|
Name == "llvm.x86.avx512.psrl.dq.512") {
|
|
|
|
// 128/256/512-bit shift right specified in bytes.
|
2015-02-17 04:51:59 +08:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-05-29 14:37:33 +08:00
|
|
|
Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
|
2015-03-01 03:33:17 +08:00
|
|
|
} else if (Name == "llvm.x86.sse41.pblendw" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("llvm.x86.sse41.blendp") ||
|
|
|
|
Name.startswith("llvm.x86.avx.blend.p") ||
|
2015-03-01 03:33:17 +08:00
|
|
|
Name == "llvm.x86.avx2.pblendw" ||
|
2016-05-31 06:54:05 +08:00
|
|
|
Name.startswith("llvm.x86.avx2.pblendd.")) {
|
2015-03-01 03:33:17 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
|
|
|
|
2016-06-12 12:48:00 +08:00
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
|
2015-03-01 03:33:17 +08:00
|
|
|
|
2016-06-12 09:05:59 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
2016-05-31 06:54:05 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx.vinsertf128.") ||
|
2015-03-13 07:16:18 +08:00
|
|
|
Name == "llvm.x86.avx2.vinserti128") {
|
2015-03-11 00:08:36 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
2015-09-23 16:48:33 +08:00
|
|
|
|
2015-03-11 00:08:36 +08:00
|
|
|
// Mask off the high bits of the immediate value; hardware ignores those.
|
|
|
|
Imm = Imm & 1;
|
2015-09-23 16:48:33 +08:00
|
|
|
|
2015-03-11 00:08:36 +08:00
|
|
|
// Extend the second operand into a vector that is twice as big.
|
|
|
|
Value *UndefV = UndefValue::get(Op1->getType());
|
2016-06-12 12:48:00 +08:00
|
|
|
SmallVector<uint32_t, 8> Idxs(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = i;
|
2016-06-12 09:05:59 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs);
|
2015-03-11 00:08:36 +08:00
|
|
|
|
|
|
|
// Insert the second operand into the first operand.
|
|
|
|
|
|
|
|
// Note that there is no guarantee that instruction lowering will actually
|
|
|
|
// produce a vinsertf128 instruction for the created shuffles. In
|
|
|
|
// particular, the 0 immediate case involves no lane changes, so it can
|
|
|
|
// be handled as a blend.
|
|
|
|
|
|
|
|
// Example of shuffle mask for 32-bit elements:
|
|
|
|
// Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
|
|
|
|
// Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
|
|
|
|
|
|
|
|
// The low half of the result is either the low half of the 1st operand
|
|
|
|
// or the low half of the 2nd operand (the inserted vector).
|
2016-06-12 12:48:00 +08:00
|
|
|
for (unsigned i = 0; i != NumElts / 2; ++i)
|
|
|
|
Idxs[i] = Imm ? i : (i + NumElts);
|
2015-03-11 00:08:36 +08:00
|
|
|
// The high half of the result is either the low half of the 2nd operand
|
|
|
|
// (the inserted vector) or the high half of the 1st operand.
|
2016-06-12 12:48:00 +08:00
|
|
|
for (unsigned i = NumElts / 2; i != NumElts; ++i)
|
|
|
|
Idxs[i] = Imm ? (i + NumElts / 2) : i;
|
2016-06-12 09:05:59 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
|
2016-05-31 06:54:05 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx.vextractf128.") ||
|
2015-03-13 07:16:18 +08:00
|
|
|
Name == "llvm.x86.avx2.vextracti128") {
|
2015-03-12 23:15:19 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
2015-09-23 16:48:33 +08:00
|
|
|
|
2015-03-12 23:15:19 +08:00
|
|
|
// Mask off the high bits of the immediate value; hardware ignores those.
|
|
|
|
Imm = Imm & 1;
|
|
|
|
|
|
|
|
// Get indexes for either the high half or low half of the input vector.
|
2016-06-12 09:05:59 +08:00
|
|
|
SmallVector<uint32_t, 4> Idxs(NumElts);
|
2015-03-12 23:15:19 +08:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
2016-06-12 09:05:59 +08:00
|
|
|
Idxs[i] = Imm ? (i + NumElts) : i;
|
2015-03-12 23:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *UndefV = UndefValue::get(Op0->getType());
|
2016-06-12 09:05:59 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, UndefV, Idxs);
|
2016-04-09 05:26:31 +08:00
|
|
|
} else if (Name == "llvm.stackprotectorcheck") {
|
|
|
|
Rep = nullptr;
|
2016-06-12 22:11:32 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx.vpermil.") ||
|
2016-06-13 10:36:48 +08:00
|
|
|
Name == "llvm.x86.sse2.pshuf.d" ||
|
2016-07-04 20:40:54 +08:00
|
|
|
Name.startswith("llvm.x86.avx512.mask.vpermil.p") ||
|
2016-06-13 10:36:48 +08:00
|
|
|
Name.startswith("llvm.x86.avx512.mask.pshuf.d.")) {
|
2016-06-12 11:10:47 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
2016-07-04 20:40:54 +08:00
|
|
|
// Calculate the size of each index in the immediate.
|
2016-06-12 11:10:47 +08:00
|
|
|
unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
|
|
|
|
unsigned IdxMask = ((1 << IdxSize) - 1);
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 8> Idxs(NumElts);
|
|
|
|
// Lookup the bits for this element, wrapping around the immediate every
|
|
|
|
// 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
|
|
|
|
// to offset by the first index of each group.
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
|
|
|
|
|
2016-06-12 22:11:32 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 10:36:48 +08:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
|
|
|
} else if (Name == "llvm.x86.sse2.pshufl.w" ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.pshufl.w.")) {
|
2016-06-12 22:11:32 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 8) {
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
|
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
Idxs[i + l] = i + l;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 10:36:48 +08:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
|
|
|
} else if (Name == "llvm.x86.sse2.pshufh.w" ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.pshufh.w.")) {
|
2016-06-12 22:11:32 +08:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 8) {
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l] = i + l;
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
|
|
|
|
}
|
|
|
|
|
2016-06-12 11:10:47 +08:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 10:36:48 +08:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-07-02 22:42:35 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.movddup") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.movshdup") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.movsldup")) {
|
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
unsigned Offset = 0;
|
|
|
|
if (Name.startswith("llvm.x86.avx512.mask.movshdup."))
|
|
|
|
Offset = 1;
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (unsigned i = 0; i != NumLaneElts; i += 2) {
|
|
|
|
Idxs[i + l + 0] = i + l + Offset;
|
|
|
|
Idxs[i + l + 1] = i + l + Offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-06-23 15:37:33 +08:00
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.punpckl") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.unpckl.")) {
|
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 64> Idxs(NumElts);
|
|
|
|
for (int l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (int i = 0; i != NumLaneElts; ++i)
|
|
|
|
Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
|
|
|
} else if (Name.startswith("llvm.x86.avx512.mask.punpckh") ||
|
|
|
|
Name.startswith("llvm.x86.avx512.mask.unpckh.")) {
|
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 64> Idxs(NumElts);
|
|
|
|
for (int l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (int i = 0; i != NumLaneElts; ++i)
|
|
|
|
Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2012-02-03 14:10:55 +08:00
|
|
|
} else {
|
2016-06-12 11:10:47 +08:00
|
|
|
llvm_unreachable("Unknown function for CallInst upgrade.");
|
2012-02-03 14:10:55 +08:00
|
|
|
}
|
|
|
|
|
2016-04-09 05:26:31 +08:00
|
|
|
if (Rep)
|
|
|
|
CI->replaceAllUsesWith(Rep);
|
2012-02-03 14:10:55 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-03-31 00:10:39 +08:00
|
|
|
std::string Name = CI->getName();
|
Move the complex address expression out of DIVariable and into an extra
argument of the llvm.dbg.declare/llvm.dbg.value intrinsics.
Previously, DIVariable was a variable-length field that has an optional
reference to a Metadata array consisting of a variable number of
complex address expressions. In the case of OpPiece expressions this is
wasting a lot of storage in IR, because when an aggregate type is, e.g.,
SROA'd into all of its n individual members, the IR will contain n copies
of the DIVariable, all alike, only differing in the complex address
reference at the end.
By making the complex address into an extra argument of the
dbg.value/dbg.declare intrinsics, all of the pieces can reference the
same variable and the complex address expressions can be uniqued across
the CU, too.
Down the road, this will allow us to move other flags, such as
"indirection" out of the DIVariable, too.
The new intrinsics look like this:
declare void @llvm.dbg.declare(metadata %storage, metadata %var, metadata %expr)
declare void @llvm.dbg.value(metadata %storage, i64 %offset, metadata %var, metadata %expr)
This patch adds a new LLVM-local tag to DIExpressions, so we can detect
and pretty-print DIExpression metadata nodes.
What this patch doesn't do:
This patch does not touch the "Indirect" field in DIVariable; but moving
that into the expression would be a natural next step.
http://reviews.llvm.org/D4919
rdar://problem/17994491
Thanks to dblaikie and dexonsmith for reviewing this patch!
Note: I accidentally committed a bogus older version of this patch previously.
llvm-svn: 218787
2014-10-02 02:55:02 +08:00
|
|
|
if (!Name.empty())
|
|
|
|
CI->setName(Name + ".old");
|
2012-06-11 02:42:51 +08:00
|
|
|
|
2011-12-12 12:26:04 +08:00
|
|
|
switch (NewFn->getIntrinsicID()) {
|
|
|
|
default:
|
2011-11-27 16:42:07 +08:00
|
|
|
llvm_unreachable("Unknown function for CallInst upgrade.");
|
2011-12-12 12:26:04 +08:00
|
|
|
|
2015-09-30 18:56:37 +08:00
|
|
|
case Intrinsic::arm_neon_vld1:
|
|
|
|
case Intrinsic::arm_neon_vld2:
|
|
|
|
case Intrinsic::arm_neon_vld3:
|
|
|
|
case Intrinsic::arm_neon_vld4:
|
|
|
|
case Intrinsic::arm_neon_vld2lane:
|
|
|
|
case Intrinsic::arm_neon_vld3lane:
|
|
|
|
case Intrinsic::arm_neon_vld4lane:
|
|
|
|
case Intrinsic::arm_neon_vst1:
|
|
|
|
case Intrinsic::arm_neon_vst2:
|
|
|
|
case Intrinsic::arm_neon_vst3:
|
|
|
|
case Intrinsic::arm_neon_vst4:
|
|
|
|
case Intrinsic::arm_neon_vst2lane:
|
|
|
|
case Intrinsic::arm_neon_vst3lane:
|
|
|
|
case Intrinsic::arm_neon_vst4lane: {
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-12-12 12:26:04 +08:00
|
|
|
case Intrinsic::ctlz:
|
2012-05-22 23:25:31 +08:00
|
|
|
case Intrinsic::cttz:
|
2011-12-12 12:26:04 +08:00
|
|
|
assert(CI->getNumArgOperands() == 1 &&
|
|
|
|
"Mismatch between function args and call args");
|
2015-05-19 06:13:54 +08:00
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(
|
|
|
|
NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
|
2011-12-12 12:26:04 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
2012-06-11 02:42:51 +08:00
|
|
|
|
2013-10-08 02:06:48 +08:00
|
|
|
case Intrinsic::objectsize:
|
2015-05-19 06:13:54 +08:00
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(
|
|
|
|
NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
|
2013-10-08 02:06:48 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
|
2012-07-18 08:02:16 +08:00
|
|
|
case Intrinsic::ctpop: {
|
2015-05-19 06:13:54 +08:00
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
|
2012-07-18 08:02:16 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
2012-07-14 07:25:25 +08:00
|
|
|
|
2012-06-13 15:18:53 +08:00
|
|
|
case Intrinsic::x86_xop_vfrcz_ss:
|
|
|
|
case Intrinsic::x86_xop_vfrcz_sd:
|
2015-05-19 06:13:54 +08:00
|
|
|
CI->replaceAllUsesWith(
|
|
|
|
Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
|
2012-06-13 15:18:53 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
|
2016-06-03 16:06:03 +08:00
|
|
|
case Intrinsic::x86_xop_vpermil2pd:
|
|
|
|
case Intrinsic::x86_xop_vpermil2ps:
|
|
|
|
case Intrinsic::x86_xop_vpermil2pd_256:
|
|
|
|
case Intrinsic::x86_xop_vpermil2ps_256: {
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
|
|
|
|
VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
|
|
|
|
Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
|
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name));
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-06-11 02:42:51 +08:00
|
|
|
case Intrinsic::x86_sse41_ptestc:
|
|
|
|
case Intrinsic::x86_sse41_ptestz:
|
2012-06-13 15:18:53 +08:00
|
|
|
case Intrinsic::x86_sse41_ptestnzc: {
|
2012-06-11 02:42:51 +08:00
|
|
|
// The arguments for these intrinsics used to be v4f32, and changed
|
|
|
|
// to v2i64. This is purely a nop, since those are bitwise intrinsics.
|
|
|
|
// So, the only thing required is a bitcast for both arguments.
|
|
|
|
// First, check the arguments have the old type.
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Old intrinsic, add bitcasts
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
2015-04-25 05:16:07 +08:00
|
|
|
Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
|
|
|
|
|
|
|
|
Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
|
|
|
|
Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
|
|
|
|
|
2015-05-19 06:13:54 +08:00
|
|
|
CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
|
2012-06-11 02:42:51 +08:00
|
|
|
CI->replaceAllUsesWith(NewCall);
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::x86_sse41_insertps:
|
|
|
|
case Intrinsic::x86_sse41_dppd:
|
|
|
|
case Intrinsic::x86_sse41_dpps:
|
|
|
|
case Intrinsic::x86_sse41_mpsadbw:
|
|
|
|
case Intrinsic::x86_avx_dp_ps_256:
|
|
|
|
case Intrinsic::x86_avx2_mpsadbw: {
|
|
|
|
// Need to truncate the last argument from i32 to i8 -- this argument models
|
|
|
|
// an inherently 8-bit immediate operand to these x86 instructions.
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
|
|
|
|
// Replace the last argument with a trunc.
|
|
|
|
Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
|
|
|
|
|
|
|
|
CallInst *NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
CI->replaceAllUsesWith(NewCall);
|
2012-06-11 02:42:51 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
2007-12-18 06:33:23 +08:00
|
|
|
}
|
2016-04-20 04:51:05 +08:00
|
|
|
|
|
|
|
case Intrinsic::thread_pointer: {
|
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
2016-06-29 02:27:25 +08:00
|
|
|
|
|
|
|
case Intrinsic::masked_load:
|
|
|
|
case Intrinsic::masked_store: {
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
2012-06-13 15:18:53 +08:00
|
|
|
}
|
2007-08-04 09:51:18 +08:00
|
|
|
}
|
|
|
|
|
2016-04-19 03:11:57 +08:00
|
|
|
void llvm::UpgradeCallsToIntrinsic(Function *F) {
|
2007-08-04 09:51:18 +08:00
|
|
|
assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
|
|
|
|
|
2016-04-19 03:11:57 +08:00
|
|
|
// Check if this function should be upgraded and get the replacement function
|
|
|
|
// if there is one.
|
2011-06-18 14:05:24 +08:00
|
|
|
Function *NewFn;
|
2007-12-18 06:33:23 +08:00
|
|
|
if (UpgradeIntrinsicFunction(F, NewFn)) {
|
2016-04-19 03:11:57 +08:00
|
|
|
// Replace all users of the old function with the new function or new
|
|
|
|
// instructions. This is not a range loop because the call is deleted.
|
|
|
|
for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
|
2016-04-17 11:59:37 +08:00
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(*UI++))
|
2015-07-04 04:12:01 +08:00
|
|
|
UpgradeIntrinsicCall(CI, NewFn);
|
2016-04-19 03:11:57 +08:00
|
|
|
|
2015-07-04 04:12:01 +08:00
|
|
|
// Remove old function, no longer used, from the module.
|
|
|
|
F->eraseFromParent();
|
2007-08-04 09:51:18 +08:00
|
|
|
}
|
|
|
|
}
|
2009-08-29 07:24:31 +08:00
|
|
|
|
2013-09-28 08:22:27 +08:00
|
|
|
void llvm::UpgradeInstWithTBAATag(Instruction *I) {
|
2014-11-12 05:30:22 +08:00
|
|
|
MDNode *MD = I->getMetadata(LLVMContext::MD_tbaa);
|
2013-09-28 08:22:27 +08:00
|
|
|
assert(MD && "UpgradeInstWithTBAATag should have a TBAA tag");
|
|
|
|
// Check if the tag uses struct-path aware TBAA format.
|
|
|
|
if (isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (MD->getNumOperands() == 3) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
Metadata *Elts[] = {MD->getOperand(0), MD->getOperand(1)};
|
2013-09-28 08:22:27 +08:00
|
|
|
MDNode *ScalarType = MDNode::get(I->getContext(), Elts);
|
|
|
|
// Create a MDNode <ScalarType, ScalarType, offset 0, const>
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
Metadata *Elts2[] = {ScalarType, ScalarType,
|
|
|
|
ConstantAsMetadata::get(Constant::getNullValue(
|
|
|
|
Type::getInt64Ty(I->getContext()))),
|
|
|
|
MD->getOperand(2)};
|
2013-09-28 08:22:27 +08:00
|
|
|
I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts2));
|
|
|
|
} else {
|
|
|
|
// Create a MDNode <MD, MD, offset 0>
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
Metadata *Elts[] = {MD, MD, ConstantAsMetadata::get(Constant::getNullValue(
|
|
|
|
Type::getInt64Ty(I->getContext())))};
|
2013-09-28 08:22:27 +08:00
|
|
|
I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts));
|
|
|
|
}
|
|
|
|
}
|
2013-11-15 09:34:59 +08:00
|
|
|
|
|
|
|
Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
|
|
|
|
Instruction *&Temp) {
|
|
|
|
if (Opc != Instruction::BitCast)
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2013-11-15 09:34:59 +08:00
|
|
|
|
2014-04-09 14:08:46 +08:00
|
|
|
Temp = nullptr;
|
2013-11-15 09:34:59 +08:00
|
|
|
Type *SrcTy = V->getType();
|
|
|
|
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
|
|
|
|
LLVMContext &Context = V->getContext();
|
|
|
|
|
|
|
|
// We have no information about target data layout, so we assume that
|
|
|
|
// the maximum pointer size is 64bit.
|
|
|
|
Type *MidTy = Type::getInt64Ty(Context);
|
|
|
|
Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
|
|
|
|
|
|
|
|
return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
|
|
|
|
}
|
|
|
|
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2013-11-15 09:34:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
|
|
|
|
if (Opc != Instruction::BitCast)
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2013-11-15 09:34:59 +08:00
|
|
|
|
|
|
|
Type *SrcTy = C->getType();
|
|
|
|
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
|
|
|
|
LLVMContext &Context = C->getContext();
|
|
|
|
|
|
|
|
// We have no information about target data layout, so we assume that
|
|
|
|
// the maximum pointer size is 64bit.
|
|
|
|
Type *MidTy = Type::getInt64Ty(Context);
|
|
|
|
|
|
|
|
return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
|
|
|
|
DestTy);
|
|
|
|
}
|
|
|
|
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2013-11-15 09:34:59 +08:00
|
|
|
}
|
2013-12-03 05:29:56 +08:00
|
|
|
|
|
|
|
/// Check the debug info version number, if it is out-dated, drop the debug
|
|
|
|
/// info. Return true if module is modified.
|
|
|
|
bool llvm::UpgradeDebugInfo(Module &M) {
|
2014-01-16 09:51:12 +08:00
|
|
|
unsigned Version = getDebugMetadataVersionFromModule(M);
|
|
|
|
if (Version == DEBUG_METADATA_VERSION)
|
2013-12-03 05:29:56 +08:00
|
|
|
return false;
|
|
|
|
|
2014-01-16 09:51:12 +08:00
|
|
|
bool RetCode = StripDebugInfo(M);
|
|
|
|
if (RetCode) {
|
|
|
|
DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
|
|
|
|
M.getContext().diagnose(DiagVersion);
|
|
|
|
}
|
|
|
|
return RetCode;
|
2013-12-03 05:29:56 +08:00
|
|
|
}
|
2014-06-25 23:41:00 +08:00
|
|
|
|
2016-05-26 07:14:48 +08:00
|
|
|
bool llvm::UpgradeModuleFlags(Module &M) {
|
|
|
|
const NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
|
|
|
|
if (!ModFlags)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool HasObjCFlag = false, HasClassProperties = false;
|
|
|
|
for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
|
|
|
|
MDNode *Op = ModFlags->getOperand(I);
|
|
|
|
if (Op->getNumOperands() < 2)
|
|
|
|
continue;
|
|
|
|
MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
|
|
|
|
if (!ID)
|
|
|
|
continue;
|
|
|
|
if (ID->getString() == "Objective-C Image Info Version")
|
|
|
|
HasObjCFlag = true;
|
|
|
|
if (ID->getString() == "Objective-C Class Properties")
|
|
|
|
HasClassProperties = true;
|
|
|
|
}
|
|
|
|
// "Objective-C Class Properties" is recently added for Objective-C. We
|
|
|
|
// upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
|
|
|
|
// flag of value 0, so we can correclty report error when trying to link
|
|
|
|
// an ObjC bitcode without this module flag with an ObjC bitcode with this
|
|
|
|
// module flag.
|
|
|
|
if (HasObjCFlag && !HasClassProperties) {
|
|
|
|
M.addModuleFlag(llvm::Module::Error, "Objective-C Class Properties",
|
|
|
|
(uint32_t)0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-25 08:56:13 +08:00
|
|
|
static bool isOldLoopArgument(Metadata *MD) {
|
|
|
|
auto *T = dyn_cast_or_null<MDTuple>(MD);
|
|
|
|
if (!T)
|
|
|
|
return false;
|
|
|
|
if (T->getNumOperands() < 1)
|
|
|
|
return false;
|
|
|
|
auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
|
|
|
|
if (!S)
|
|
|
|
return false;
|
|
|
|
return S->getString().startswith("llvm.vectorizer.");
|
|
|
|
}
|
|
|
|
|
|
|
|
static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
|
|
|
|
StringRef OldPrefix = "llvm.vectorizer.";
|
|
|
|
assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
|
|
|
|
|
|
|
|
if (OldTag == "llvm.vectorizer.unroll")
|
|
|
|
return MDString::get(C, "llvm.loop.interleave.count");
|
|
|
|
|
|
|
|
return MDString::get(
|
|
|
|
C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
|
|
|
|
.str());
|
|
|
|
}
|
|
|
|
|
|
|
|
static Metadata *upgradeLoopArgument(Metadata *MD) {
|
|
|
|
auto *T = dyn_cast_or_null<MDTuple>(MD);
|
|
|
|
if (!T)
|
|
|
|
return MD;
|
|
|
|
if (T->getNumOperands() < 1)
|
|
|
|
return MD;
|
|
|
|
auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
|
|
|
|
if (!OldTag)
|
|
|
|
return MD;
|
|
|
|
if (!OldTag->getString().startswith("llvm.vectorizer."))
|
|
|
|
return MD;
|
|
|
|
|
|
|
|
// This has an old tag. Upgrade it.
|
|
|
|
SmallVector<Metadata *, 8> Ops;
|
|
|
|
Ops.reserve(T->getNumOperands());
|
|
|
|
Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
|
|
|
|
for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
|
|
|
|
Ops.push_back(T->getOperand(I));
|
|
|
|
|
|
|
|
return MDTuple::get(T->getContext(), Ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
|
|
|
|
auto *T = dyn_cast<MDTuple>(&N);
|
|
|
|
if (!T)
|
|
|
|
return &N;
|
|
|
|
|
|
|
|
if (!llvm::any_of(T->operands(), isOldLoopArgument))
|
|
|
|
return &N;
|
|
|
|
|
|
|
|
SmallVector<Metadata *, 8> Ops;
|
|
|
|
Ops.reserve(T->getNumOperands());
|
|
|
|
for (Metadata *MD : T->operands())
|
|
|
|
Ops.push_back(upgradeLoopArgument(MD));
|
|
|
|
|
|
|
|
return MDTuple::get(T->getContext(), Ops);
|
2014-06-25 23:41:00 +08:00
|
|
|
}
|