2010-01-05 15:32:13 +08:00
|
|
|
//===- InstCombineCalls.cpp -----------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the visitCall and visitInvoke functions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-01-22 13:25:13 +08:00
|
|
|
#include "InstCombineInternal.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/ADT/APFloat.h"
|
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
2012-11-30 12:05:06 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Twine.h"
|
2015-05-22 11:56:46 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
2010-01-05 15:32:13 +08:00
|
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/Constant.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
2014-03-04 19:08:18 +08:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
[Statepoints 3/4] Statepoint infrastructure for garbage collection: SelectionDAGBuilder
This is the third patch in a small series. It contains the CodeGen support for lowering the gc.statepoint intrinsic sequences (223078) to the STATEPOINT pseudo machine instruction (223085). The change also includes the set of helper routines and classes for working with gc.statepoints, gc.relocates, and gc.results since the lowering code uses them.
With this change, gc.statepoints should be functionally complete. The documentation will follow in the fourth change, and there will likely be some cleanup changes, but interested parties can start experimenting now.
I'm not particularly happy with the amount of code or complexity involved with the lowering step, but at least it's fairly well isolated. The statepoint lowering code is split into it's own files and anyone not working on the statepoint support itself should be able to ignore it.
During the lowering process, we currently spill aggressively to stack. This is not entirely ideal (and we have plans to do better), but it's functional, relatively straight forward, and matches closely the implementations of the patchpoint intrinsics. Most of the complexity comes from trying to keep relocated copies of values in the same stack slots across statepoints. Doing so avoids the insertion of pointless load and store instructions to reshuffle the stack. The current implementation isn't as effective as I'd like, but it is functional and 'good enough' for many common use cases.
In the long term, I'd like to figure out how to integrate the statepoint lowering with the register allocator. In principal, we shouldn't need to eagerly spill at all. The register allocator should do any spilling required and the statepoint should simply record that fact. Depending on how challenging that turns out to be, we may invest in a smarter global stack slot assignment mechanism as a stop gap measure.
Reviewed by: atrick, ributzka
llvm-svn: 223137
2014-12-03 02:50:36 +08:00
|
|
|
#include "llvm/IR/Statepoint.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/IR/ValueHandle.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2010-12-26 04:37:57 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2015-01-21 19:23:40 +08:00
|
|
|
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstring>
|
|
|
|
#include <vector>
|
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
using namespace llvm;
|
2012-12-13 11:13:36 +08:00
|
|
|
using namespace PatternMatch;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "instcombine"
|
|
|
|
|
2012-11-30 12:05:06 +08:00
|
|
|
STATISTIC(NumSimplified, "Number of library calls simplified");
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// Return the specified type promoted as it would be to pass though a va_arg
|
|
|
|
/// area.
|
2011-07-18 12:54:35 +08:00
|
|
|
static Type *getPromotedType(Type *Ty) {
|
|
|
|
if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
|
2010-01-05 15:32:13 +08:00
|
|
|
if (ITy->getBitWidth() < 32)
|
|
|
|
return Type::getInt32Ty(Ty->getContext());
|
|
|
|
}
|
|
|
|
return Ty;
|
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// Given an aggregate type which ultimately holds a single scalar element,
|
|
|
|
/// like {{{type}}} or [1 x type], return type.
|
2012-09-14 02:19:06 +08:00
|
|
|
static Type *reduceToSingleValueType(Type *T) {
|
|
|
|
while (!T->isSingleValueType()) {
|
|
|
|
if (StructType *STy = dyn_cast<StructType>(T)) {
|
|
|
|
if (STy->getNumElements() == 1)
|
|
|
|
T = STy->getElementType(0);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
} else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
|
|
|
|
if (ATy->getNumElements() == 1)
|
|
|
|
T = ATy->getElementType();
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return T;
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2016-02-22 01:29:33 +08:00
|
|
|
/// Return a constant boolean vector that has true elements in all positions
|
2016-02-22 01:33:31 +08:00
|
|
|
/// where the input constant data vector has an element with the sign bit set.
|
2016-02-22 01:29:33 +08:00
|
|
|
static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
|
|
|
|
SmallVector<Constant *, 32> BoolVec;
|
|
|
|
IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
|
|
|
|
for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
|
|
|
|
Constant *Elt = V->getElementAsConstant(I);
|
|
|
|
assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
|
|
|
|
"Unexpected constant data vector element type");
|
|
|
|
bool Sign = V->getElementType()->isIntegerTy()
|
|
|
|
? cast<ConstantInt>(Elt)->isNegative()
|
|
|
|
: cast<ConstantFP>(Elt)->isNegative();
|
|
|
|
BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
|
|
|
|
}
|
|
|
|
return ConstantVector::get(BoolVec);
|
|
|
|
}
|
|
|
|
|
2015-11-19 13:56:52 +08:00
|
|
|
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
|
2016-08-05 09:06:44 +08:00
|
|
|
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &AC, &DT);
|
|
|
|
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &AC, &DT);
|
2015-11-19 13:56:52 +08:00
|
|
|
unsigned MinAlign = std::min(DstAlign, SrcAlign);
|
|
|
|
unsigned CopyAlign = MI->getAlignment();
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2015-11-19 13:56:52 +08:00
|
|
|
if (CopyAlign < MinAlign) {
|
|
|
|
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), MinAlign, false));
|
2010-01-05 15:32:13 +08:00
|
|
|
return MI;
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
|
|
|
|
// load/store.
|
2010-06-24 21:54:33 +08:00
|
|
|
ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!MemOpLength) return nullptr;
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Source and destination pointer types are always "i8*" for intrinsic. See
|
|
|
|
// if the size is something we can handle with a single primitive load/store.
|
|
|
|
// A single load+store correctly handles overlapping memory in the memmove
|
|
|
|
// case.
|
2012-08-15 11:49:59 +08:00
|
|
|
uint64_t Size = MemOpLength->getLimitedValue();
|
2014-01-25 01:20:08 +08:00
|
|
|
assert(Size && "0-sized memory transferring should be removed already.");
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
if (Size > 8 || (Size&(Size-1)))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr; // If not 1/2/4/8 bytes, exit.
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Use an integer load+store unless we can find something better.
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
llvm-svn: 100304
2010-04-04 11:10:48 +08:00
|
|
|
unsigned SrcAddrSp =
|
2010-06-24 21:54:33 +08:00
|
|
|
cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
|
2010-04-16 23:33:14 +08:00
|
|
|
unsigned DstAddrSp =
|
2010-06-24 21:54:33 +08:00
|
|
|
cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
llvm-svn: 100304
2010-04-04 11:10:48 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
llvm-svn: 100304
2010-04-04 11:10:48 +08:00
|
|
|
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
|
|
|
|
Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Memcpy forces the use of i8* for the source and destination. That means
|
|
|
|
// that if you're using memcpy to move one double around, you'll get a cast
|
|
|
|
// from double* to i8*. We'd much rather use a double load+store rather than
|
|
|
|
// an i64 load+store, here because this improves the odds that the source or
|
|
|
|
// dest address will be promotable. See if we can find a better type than the
|
|
|
|
// integer datatype.
|
2010-06-24 20:58:35 +08:00
|
|
|
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
|
2014-04-25 13:29:35 +08:00
|
|
|
MDNode *CopyMD = nullptr;
|
2010-06-24 20:58:35 +08:00
|
|
|
if (StrippedDest != MI->getArgOperand(0)) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
|
2010-01-05 15:32:13 +08:00
|
|
|
->getElementType();
|
2015-03-10 10:37:25 +08:00
|
|
|
if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
|
2010-01-05 15:32:13 +08:00
|
|
|
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
|
|
|
|
// down through these levels if so.
|
2012-09-14 02:19:06 +08:00
|
|
|
SrcETy = reduceToSingleValueType(SrcETy);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
llvm-svn: 100304
2010-04-04 11:10:48 +08:00
|
|
|
if (SrcETy->isSingleValueType()) {
|
|
|
|
NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
|
|
|
|
NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
|
2012-09-14 05:51:01 +08:00
|
|
|
|
|
|
|
// If the memcpy has metadata describing the members, see if we can
|
|
|
|
// get the TBAA tag describing our copy.
|
2014-11-12 05:30:22 +08:00
|
|
|
if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (M->getNumOperands() == 3 && M->getOperand(0) &&
|
|
|
|
mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
|
|
|
|
mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
|
2012-10-11 10:05:23 +08:00
|
|
|
M->getOperand(1) &&
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
|
|
|
|
mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
|
|
|
|
Size &&
|
|
|
|
M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
|
2012-09-14 05:51:01 +08:00
|
|
|
CopyMD = cast<MDNode>(M->getOperand(2));
|
|
|
|
}
|
Reapply address space patch after fixing an issue in MemCopyOptimizer.
Added support for address spaces and added a isVolatile field to memcpy, memmove, and memset,
e.g., llvm.memcpy.i32(i8*, i8*, i32, i32) -> llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
llvm-svn: 100304
2010-04-04 11:10:48 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// If the memcpy/memmove provides better alignment info than we can
|
|
|
|
// infer, use it.
|
2015-11-19 13:56:52 +08:00
|
|
|
SrcAlign = std::max(SrcAlign, CopyAlign);
|
|
|
|
DstAlign = std::max(DstAlign, CopyAlign);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-06-25 15:57:14 +08:00
|
|
|
Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
|
|
|
|
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
|
2011-05-19 03:57:14 +08:00
|
|
|
LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
|
|
|
|
L->setAlignment(SrcAlign);
|
2012-09-14 05:51:01 +08:00
|
|
|
if (CopyMD)
|
|
|
|
L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
|
2016-09-04 15:49:39 +08:00
|
|
|
MDNode *LoopMemParallelMD =
|
|
|
|
MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
|
|
|
|
if (LoopMemParallelMD)
|
|
|
|
L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
|
2016-09-04 15:06:00 +08:00
|
|
|
|
2011-05-19 03:57:14 +08:00
|
|
|
StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
|
|
|
|
S->setAlignment(DstAlign);
|
2012-09-14 05:51:01 +08:00
|
|
|
if (CopyMD)
|
|
|
|
S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
|
2016-09-04 15:49:39 +08:00
|
|
|
if (LoopMemParallelMD)
|
|
|
|
S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Set the size of the copy to 0, it will be deleted on the next iteration.
|
2010-06-29 00:50:57 +08:00
|
|
|
MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
|
2010-01-05 15:32:13 +08:00
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
|
2016-08-05 09:06:44 +08:00
|
|
|
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
|
2015-11-19 13:56:52 +08:00
|
|
|
if (MI->getAlignment() < Alignment) {
|
|
|
|
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
|
|
|
|
Alignment, false));
|
2010-01-05 15:32:13 +08:00
|
|
|
return MI;
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Extract the length and alignment and fill if they are constant.
|
|
|
|
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
|
|
|
|
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
|
2010-02-16 00:12:20 +08:00
|
|
|
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2012-08-15 11:49:59 +08:00
|
|
|
uint64_t Len = LenC->getLimitedValue();
|
2015-11-19 13:56:52 +08:00
|
|
|
Alignment = MI->getAlignment();
|
2012-08-15 11:49:59 +08:00
|
|
|
assert(Len && "0-sized memory setting should be removed already.");
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
|
|
|
|
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
Value *Dest = MI->getDest();
|
2010-12-20 09:05:30 +08:00
|
|
|
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
|
|
|
|
Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
|
|
|
|
Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Alignment 0 is identity for alignment 1 for memset, but not store.
|
|
|
|
if (Alignment == 0) Alignment = 1;
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Extract the fill value and store.
|
|
|
|
uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
|
2011-05-19 03:57:14 +08:00
|
|
|
StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
|
|
|
|
MI->isVolatile());
|
|
|
|
S->setAlignment(Alignment);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Set the size of the copy to 0, it will be deleted on the next iteration.
|
|
|
|
MI->setLength(Constant::getNullValue(LenC->getType()));
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2015-08-05 16:18:00 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86immShift(const IntrinsicInst &II,
|
2015-08-13 15:39:03 +08:00
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
bool LogicalShift = false;
|
|
|
|
bool ShiftLeft = false;
|
|
|
|
|
|
|
|
switch (II.getIntrinsicID()) {
|
2016-11-13 15:26:19 +08:00
|
|
|
default: llvm_unreachable("Unexpected intrinsic!");
|
2015-08-13 15:39:03 +08:00
|
|
|
case Intrinsic::x86_sse2_psra_d:
|
|
|
|
case Intrinsic::x86_sse2_psra_w:
|
|
|
|
case Intrinsic::x86_sse2_psrai_d:
|
|
|
|
case Intrinsic::x86_sse2_psrai_w:
|
|
|
|
case Intrinsic::x86_avx2_psra_d:
|
|
|
|
case Intrinsic::x86_avx2_psra_w:
|
|
|
|
case Intrinsic::x86_avx2_psrai_d:
|
|
|
|
case Intrinsic::x86_avx2_psrai_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psra_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psrai_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psra_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psrai_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psra_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psra_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psra_w_512:
|
|
|
|
case Intrinsic::x86_avx512_psrai_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrai_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrai_w_512:
|
2015-08-13 15:39:03 +08:00
|
|
|
LogicalShift = false; ShiftLeft = false;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrl_d:
|
|
|
|
case Intrinsic::x86_sse2_psrl_q:
|
|
|
|
case Intrinsic::x86_sse2_psrl_w:
|
|
|
|
case Intrinsic::x86_sse2_psrli_d:
|
|
|
|
case Intrinsic::x86_sse2_psrli_q:
|
|
|
|
case Intrinsic::x86_sse2_psrli_w:
|
|
|
|
case Intrinsic::x86_avx2_psrl_d:
|
|
|
|
case Intrinsic::x86_avx2_psrl_q:
|
|
|
|
case Intrinsic::x86_avx2_psrl_w:
|
|
|
|
case Intrinsic::x86_avx2_psrli_d:
|
|
|
|
case Intrinsic::x86_avx2_psrli_q:
|
|
|
|
case Intrinsic::x86_avx2_psrli_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psrl_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrl_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrl_w_512:
|
|
|
|
case Intrinsic::x86_avx512_psrli_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrli_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrli_w_512:
|
2015-08-13 15:39:03 +08:00
|
|
|
LogicalShift = true; ShiftLeft = false;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psll_d:
|
|
|
|
case Intrinsic::x86_sse2_psll_q:
|
|
|
|
case Intrinsic::x86_sse2_psll_w:
|
|
|
|
case Intrinsic::x86_sse2_pslli_d:
|
|
|
|
case Intrinsic::x86_sse2_pslli_q:
|
|
|
|
case Intrinsic::x86_sse2_pslli_w:
|
|
|
|
case Intrinsic::x86_avx2_psll_d:
|
|
|
|
case Intrinsic::x86_avx2_psll_q:
|
|
|
|
case Intrinsic::x86_avx2_psll_w:
|
|
|
|
case Intrinsic::x86_avx2_pslli_d:
|
|
|
|
case Intrinsic::x86_avx2_pslli_q:
|
|
|
|
case Intrinsic::x86_avx2_pslli_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psll_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psll_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psll_w_512:
|
|
|
|
case Intrinsic::x86_avx512_pslli_d_512:
|
|
|
|
case Intrinsic::x86_avx512_pslli_q_512:
|
|
|
|
case Intrinsic::x86_avx512_pslli_w_512:
|
2015-08-13 15:39:03 +08:00
|
|
|
LogicalShift = true; ShiftLeft = true;
|
|
|
|
break;
|
|
|
|
}
|
2015-08-11 04:21:15 +08:00
|
|
|
assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
|
|
|
|
|
2015-08-08 02:22:50 +08:00
|
|
|
// Simplify if count is constant.
|
|
|
|
auto Arg1 = II.getArgOperand(1);
|
|
|
|
auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
|
|
|
|
auto CDV = dyn_cast<ConstantDataVector>(Arg1);
|
|
|
|
auto CInt = dyn_cast<ConstantInt>(Arg1);
|
|
|
|
if (!CAZ && !CDV && !CInt)
|
2015-08-05 16:18:00 +08:00
|
|
|
return nullptr;
|
2015-08-08 02:22:50 +08:00
|
|
|
|
|
|
|
APInt Count(64, 0);
|
|
|
|
if (CDV) {
|
|
|
|
// SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
|
|
|
|
// operand to compute the shift amount.
|
|
|
|
auto VT = cast<VectorType>(CDV->getType());
|
|
|
|
unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
|
|
|
|
assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
|
|
|
|
unsigned NumSubElts = 64 / BitWidth;
|
|
|
|
|
|
|
|
// Concatenate the sub-elements to create the 64-bit value.
|
|
|
|
for (unsigned i = 0; i != NumSubElts; ++i) {
|
|
|
|
unsigned SubEltIdx = (NumSubElts - 1) - i;
|
|
|
|
auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
|
|
|
|
Count = Count.shl(BitWidth);
|
|
|
|
Count |= SubElt->getValue().zextOrTrunc(64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (CInt)
|
|
|
|
Count = CInt->getValue();
|
2015-08-05 16:18:00 +08:00
|
|
|
|
|
|
|
auto Vec = II.getArgOperand(0);
|
|
|
|
auto VT = cast<VectorType>(Vec->getType());
|
|
|
|
auto SVT = VT->getElementType();
|
|
|
|
unsigned VWidth = VT->getNumElements();
|
2015-08-08 02:22:50 +08:00
|
|
|
unsigned BitWidth = SVT->getPrimitiveSizeInBits();
|
|
|
|
|
|
|
|
// If shift-by-zero then just return the original value.
|
|
|
|
if (Count == 0)
|
|
|
|
return Vec;
|
|
|
|
|
2015-08-11 04:21:15 +08:00
|
|
|
// Handle cases when Shift >= BitWidth.
|
|
|
|
if (Count.uge(BitWidth)) {
|
|
|
|
// If LogicalShift - just return zero.
|
|
|
|
if (LogicalShift)
|
|
|
|
return ConstantAggregateZero::get(VT);
|
|
|
|
|
|
|
|
// If ArithmeticShift - clamp Shift to (BitWidth - 1).
|
|
|
|
Count = APInt(64, BitWidth - 1);
|
|
|
|
}
|
2015-08-05 16:18:00 +08:00
|
|
|
|
|
|
|
// Get a constant vector of the same type as the first operand.
|
2015-08-08 02:22:50 +08:00
|
|
|
auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
|
|
|
|
auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
|
2015-08-05 16:18:00 +08:00
|
|
|
|
|
|
|
if (ShiftLeft)
|
2015-08-08 02:22:50 +08:00
|
|
|
return Builder.CreateShl(Vec, ShiftVec);
|
2015-08-05 16:18:00 +08:00
|
|
|
|
2015-08-11 04:21:15 +08:00
|
|
|
if (LogicalShift)
|
|
|
|
return Builder.CreateLShr(Vec, ShiftVec);
|
|
|
|
|
|
|
|
return Builder.CreateAShr(Vec, ShiftVec);
|
2015-08-05 16:18:00 +08:00
|
|
|
}
|
|
|
|
|
2016-06-07 18:27:15 +08:00
|
|
|
// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
|
|
|
|
// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
|
|
|
|
// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
|
|
|
|
static Value *simplifyX86varShift(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
bool LogicalShift = false;
|
|
|
|
bool ShiftLeft = false;
|
|
|
|
|
|
|
|
switch (II.getIntrinsicID()) {
|
2016-11-13 15:26:19 +08:00
|
|
|
default: llvm_unreachable("Unexpected intrinsic!");
|
2016-06-07 18:27:15 +08:00
|
|
|
case Intrinsic::x86_avx2_psrav_d:
|
|
|
|
case Intrinsic::x86_avx2_psrav_d_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psrav_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psrav_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psrav_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrav_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psrav_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psrav_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psrav_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
LogicalShift = false;
|
|
|
|
ShiftLeft = false;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_avx2_psrlv_d:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_d_256:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_q:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_q_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psrlv_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psrlv_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
LogicalShift = true;
|
|
|
|
ShiftLeft = false;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_avx2_psllv_d:
|
|
|
|
case Intrinsic::x86_avx2_psllv_d_256:
|
|
|
|
case Intrinsic::x86_avx2_psllv_q:
|
|
|
|
case Intrinsic::x86_avx2_psllv_q_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psllv_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psllv_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psllv_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psllv_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psllv_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
LogicalShift = true;
|
|
|
|
ShiftLeft = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
|
|
|
|
|
|
|
|
// Simplify if all shift amounts are constant/undef.
|
|
|
|
auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
|
|
|
|
if (!CShift)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto Vec = II.getArgOperand(0);
|
|
|
|
auto VT = cast<VectorType>(II.getType());
|
|
|
|
auto SVT = VT->getVectorElementType();
|
|
|
|
int NumElts = VT->getNumElements();
|
|
|
|
int BitWidth = SVT->getIntegerBitWidth();
|
|
|
|
|
|
|
|
// Collect each element's shift amount.
|
|
|
|
// We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
|
|
|
|
bool AnyOutOfRange = false;
|
|
|
|
SmallVector<int, 8> ShiftAmts;
|
|
|
|
for (int I = 0; I < NumElts; ++I) {
|
|
|
|
auto *CElt = CShift->getAggregateElement(I);
|
|
|
|
if (CElt && isa<UndefValue>(CElt)) {
|
|
|
|
ShiftAmts.push_back(-1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
|
|
|
|
if (!COp)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Handle out of range shifts.
|
|
|
|
// If LogicalShift - set to BitWidth (special case).
|
|
|
|
// If ArithmeticShift - set to (BitWidth - 1) (sign splat).
|
|
|
|
APInt ShiftVal = COp->getValue();
|
|
|
|
if (ShiftVal.uge(BitWidth)) {
|
|
|
|
AnyOutOfRange = LogicalShift;
|
|
|
|
ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ShiftAmts.push_back((int)ShiftVal.getZExtValue());
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all elements out of range or UNDEF, return vector of zeros/undefs.
|
|
|
|
// ArithmeticShift should only hit this if they are all UNDEF.
|
|
|
|
auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
|
2016-08-12 01:20:18 +08:00
|
|
|
if (all_of(ShiftAmts, OutOfRange)) {
|
2016-06-07 18:27:15 +08:00
|
|
|
SmallVector<Constant *, 8> ConstantVec;
|
|
|
|
for (int Idx : ShiftAmts) {
|
|
|
|
if (Idx < 0) {
|
|
|
|
ConstantVec.push_back(UndefValue::get(SVT));
|
|
|
|
} else {
|
|
|
|
assert(LogicalShift && "Logical shift expected");
|
|
|
|
ConstantVec.push_back(ConstantInt::getNullValue(SVT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ConstantVector::get(ConstantVec);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can't handle only some out of range values with generic logical shifts.
|
|
|
|
if (AnyOutOfRange)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Build the shift amount constant vector.
|
|
|
|
SmallVector<Constant *, 8> ShiftVecAmts;
|
|
|
|
for (int Idx : ShiftAmts) {
|
|
|
|
if (Idx < 0)
|
|
|
|
ShiftVecAmts.push_back(UndefValue::get(SVT));
|
|
|
|
else
|
|
|
|
ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
|
|
|
|
}
|
|
|
|
auto ShiftVec = ConstantVector::get(ShiftVecAmts);
|
|
|
|
|
|
|
|
if (ShiftLeft)
|
|
|
|
return Builder.CreateShl(Vec, ShiftVec);
|
|
|
|
|
|
|
|
if (LogicalShift)
|
|
|
|
return Builder.CreateLShr(Vec, ShiftVec);
|
|
|
|
|
|
|
|
return Builder.CreateAShr(Vec, ShiftVec);
|
|
|
|
}
|
|
|
|
|
2016-06-07 16:18:35 +08:00
|
|
|
static Value *simplifyX86movmsk(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
Value *Arg = II.getArgOperand(0);
|
|
|
|
Type *ResTy = II.getType();
|
|
|
|
Type *ArgTy = Arg->getType();
|
|
|
|
|
|
|
|
// movmsk(undef) -> zero as we must ensure the upper bits are zero.
|
|
|
|
if (isa<UndefValue>(Arg))
|
|
|
|
return Constant::getNullValue(ResTy);
|
|
|
|
|
|
|
|
// We can't easily peek through x86_mmx types.
|
|
|
|
if (!ArgTy->isVectorTy())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto *C = dyn_cast<Constant>(Arg);
|
|
|
|
if (!C)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Extract signbits of the vector input and pack into integer result.
|
|
|
|
APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
|
|
|
|
for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
|
|
|
|
auto *COp = C->getAggregateElement(I);
|
|
|
|
if (!COp)
|
|
|
|
return nullptr;
|
|
|
|
if (isa<UndefValue>(COp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto *CInt = dyn_cast<ConstantInt>(COp);
|
|
|
|
auto *CFp = dyn_cast<ConstantFP>(COp);
|
|
|
|
if (!CInt && !CFp)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
|
|
|
|
Result.setBit(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Constant::getIntegerValue(ResTy, Result);
|
|
|
|
}
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86insertps(const IntrinsicInst &II,
|
2015-04-17 01:52:13 +08:00
|
|
|
InstCombiner::BuilderTy &Builder) {
|
2016-01-28 08:03:16 +08:00
|
|
|
auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
|
|
|
|
if (!CInt)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
VectorType *VecTy = cast<VectorType>(II.getType());
|
|
|
|
assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
|
|
|
|
|
|
|
|
// The immediate permute control byte looks like this:
|
|
|
|
// [3:0] - zero mask for each 32-bit lane
|
|
|
|
// [5:4] - select one 32-bit destination lane
|
|
|
|
// [7:6] - select one 32-bit source lane
|
|
|
|
|
|
|
|
uint8_t Imm = CInt->getZExtValue();
|
|
|
|
uint8_t ZMask = Imm & 0xf;
|
|
|
|
uint8_t DestLane = (Imm >> 4) & 0x3;
|
|
|
|
uint8_t SourceLane = (Imm >> 6) & 0x3;
|
|
|
|
|
|
|
|
ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
|
|
|
|
|
|
|
|
// If all zero mask bits are set, this was just a weird way to
|
|
|
|
// generate a zero vector.
|
|
|
|
if (ZMask == 0xf)
|
|
|
|
return ZeroVector;
|
|
|
|
|
|
|
|
// Initialize by passing all of the first source bits through.
|
2016-06-12 08:41:19 +08:00
|
|
|
uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
|
2016-01-28 08:03:16 +08:00
|
|
|
|
|
|
|
// We may replace the second operand with the zero vector.
|
|
|
|
Value *V1 = II.getArgOperand(1);
|
|
|
|
|
|
|
|
if (ZMask) {
|
|
|
|
// If the zero mask is being used with a single input or the zero mask
|
|
|
|
// overrides the destination lane, this is a shuffle with the zero vector.
|
|
|
|
if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
|
|
|
|
(ZMask & (1 << DestLane))) {
|
|
|
|
V1 = ZeroVector;
|
|
|
|
// We may still move 32-bits of the first source vector from one lane
|
|
|
|
// to another.
|
|
|
|
ShuffleMask[DestLane] = SourceLane;
|
|
|
|
// The zero mask may override the previous insert operation.
|
|
|
|
for (unsigned i = 0; i < 4; ++i)
|
|
|
|
if ((ZMask >> i) & 0x1)
|
|
|
|
ShuffleMask[i] = i + 4;
|
2015-04-26 04:55:25 +08:00
|
|
|
} else {
|
2016-01-28 08:03:16 +08:00
|
|
|
// TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
|
|
|
|
return nullptr;
|
2015-04-26 04:55:25 +08:00
|
|
|
}
|
2016-01-28 08:03:16 +08:00
|
|
|
} else {
|
|
|
|
// Replace the selected destination lane with the selected source lane.
|
|
|
|
ShuffleMask[DestLane] = SourceLane + 4;
|
2015-04-17 01:52:13 +08:00
|
|
|
}
|
2016-01-28 08:03:16 +08:00
|
|
|
|
|
|
|
return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
|
2015-04-17 01:52:13 +08:00
|
|
|
}
|
|
|
|
|
2015-10-17 19:40:05 +08:00
|
|
|
/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
|
|
|
|
/// or conversion to a shuffle vector.
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
|
2015-10-17 19:40:05 +08:00
|
|
|
ConstantInt *CILength, ConstantInt *CIIndex,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
auto LowConstantHighUndef = [&](uint64_t Val) {
|
|
|
|
Type *IntTy64 = Type::getInt64Ty(II.getContext());
|
|
|
|
Constant *Args[] = {ConstantInt::get(IntTy64, Val),
|
|
|
|
UndefValue::get(IntTy64)};
|
|
|
|
return ConstantVector::get(Args);
|
|
|
|
};
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
Constant *C0 = dyn_cast<Constant>(Op0);
|
|
|
|
ConstantInt *CI0 =
|
2016-09-07 20:03:03 +08:00
|
|
|
C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
|
|
|
|
// Attempt to constant fold.
|
|
|
|
if (CILength && CIIndex) {
|
|
|
|
// From AMD documentation: "The bit index and field length are each six
|
|
|
|
// bits in length other bits of the field are ignored."
|
|
|
|
APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
|
|
|
|
APInt APLength = CILength->getValue().zextOrTrunc(6);
|
|
|
|
|
|
|
|
unsigned Index = APIndex.getZExtValue();
|
|
|
|
|
|
|
|
// From AMD documentation: "a value of zero in the field length is
|
|
|
|
// defined as length of 64".
|
|
|
|
unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
|
|
|
|
|
|
|
|
// From AMD documentation: "If the sum of the bit index + length field
|
|
|
|
// is greater than 64, the results are undefined".
|
|
|
|
unsigned End = Index + Length;
|
|
|
|
|
|
|
|
// Note that both field index and field length are 8-bit quantities.
|
|
|
|
// Since variables 'Index' and 'Length' are unsigned values
|
|
|
|
// obtained from zero-extending field index and field length
|
|
|
|
// respectively, their sum should never wrap around.
|
|
|
|
if (End > 64)
|
|
|
|
return UndefValue::get(II.getType());
|
|
|
|
|
|
|
|
// If we are inserting whole bytes, we can convert this to a shuffle.
|
|
|
|
// Lowering can recognize EXTRQI shuffle masks.
|
|
|
|
if ((Length % 8) == 0 && (Index % 8) == 0) {
|
|
|
|
// Convert bit indices to byte indices.
|
|
|
|
Length /= 8;
|
|
|
|
Index /= 8;
|
|
|
|
|
|
|
|
Type *IntTy8 = Type::getInt8Ty(II.getContext());
|
|
|
|
Type *IntTy32 = Type::getInt32Ty(II.getContext());
|
|
|
|
VectorType *ShufTy = VectorType::get(IntTy8, 16);
|
|
|
|
|
|
|
|
SmallVector<Constant *, 16> ShuffleMask;
|
|
|
|
for (int i = 0; i != (int)Length; ++i)
|
|
|
|
ShuffleMask.push_back(
|
|
|
|
Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
|
|
|
|
for (int i = Length; i != 8; ++i)
|
|
|
|
ShuffleMask.push_back(
|
|
|
|
Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
|
|
|
|
for (int i = 8; i != 16; ++i)
|
|
|
|
ShuffleMask.push_back(UndefValue::get(IntTy32));
|
|
|
|
|
|
|
|
Value *SV = Builder.CreateShuffleVector(
|
|
|
|
Builder.CreateBitCast(Op0, ShufTy),
|
|
|
|
ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
|
|
|
|
return Builder.CreateBitCast(SV, II.getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constant Fold - shift Index'th bit to lowest position and mask off
|
|
|
|
// Length bits.
|
|
|
|
if (CI0) {
|
|
|
|
APInt Elt = CI0->getValue();
|
|
|
|
Elt = Elt.lshr(Index).zextOrTrunc(Length);
|
|
|
|
return LowConstantHighUndef(Elt.getZExtValue());
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
|
|
|
|
if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
|
|
|
|
Value *Args[] = {Op0, CILength, CIIndex};
|
2015-12-15 01:24:23 +08:00
|
|
|
Module *M = II.getModule();
|
2015-10-17 19:40:05 +08:00
|
|
|
Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
|
|
|
|
return Builder.CreateCall(F, Args);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constant Fold - extraction from zero is always {zero, undef}.
|
|
|
|
if (CI0 && CI0->equalsInt(0))
|
|
|
|
return LowConstantHighUndef(0);
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
|
|
|
|
/// folding or conversion to a shuffle vector.
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
|
2015-10-17 19:40:05 +08:00
|
|
|
APInt APLength, APInt APIndex,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
// From AMD documentation: "The bit index and field length are each six bits
|
|
|
|
// in length other bits of the field are ignored."
|
|
|
|
APIndex = APIndex.zextOrTrunc(6);
|
|
|
|
APLength = APLength.zextOrTrunc(6);
|
|
|
|
|
|
|
|
// Attempt to constant fold.
|
|
|
|
unsigned Index = APIndex.getZExtValue();
|
|
|
|
|
|
|
|
// From AMD documentation: "a value of zero in the field length is
|
|
|
|
// defined as length of 64".
|
|
|
|
unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
|
|
|
|
|
|
|
|
// From AMD documentation: "If the sum of the bit index + length field
|
|
|
|
// is greater than 64, the results are undefined".
|
|
|
|
unsigned End = Index + Length;
|
|
|
|
|
|
|
|
// Note that both field index and field length are 8-bit quantities.
|
|
|
|
// Since variables 'Index' and 'Length' are unsigned values
|
|
|
|
// obtained from zero-extending field index and field length
|
|
|
|
// respectively, their sum should never wrap around.
|
|
|
|
if (End > 64)
|
|
|
|
return UndefValue::get(II.getType());
|
|
|
|
|
|
|
|
// If we are inserting whole bytes, we can convert this to a shuffle.
|
|
|
|
// Lowering can recognize INSERTQI shuffle masks.
|
|
|
|
if ((Length % 8) == 0 && (Index % 8) == 0) {
|
|
|
|
// Convert bit indices to byte indices.
|
|
|
|
Length /= 8;
|
|
|
|
Index /= 8;
|
|
|
|
|
|
|
|
Type *IntTy8 = Type::getInt8Ty(II.getContext());
|
|
|
|
Type *IntTy32 = Type::getInt32Ty(II.getContext());
|
|
|
|
VectorType *ShufTy = VectorType::get(IntTy8, 16);
|
|
|
|
|
|
|
|
SmallVector<Constant *, 16> ShuffleMask;
|
|
|
|
for (int i = 0; i != (int)Index; ++i)
|
|
|
|
ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
|
|
|
|
for (int i = 0; i != (int)Length; ++i)
|
|
|
|
ShuffleMask.push_back(
|
|
|
|
Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
|
|
|
|
for (int i = Index + Length; i != 8; ++i)
|
|
|
|
ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
|
|
|
|
for (int i = 8; i != 16; ++i)
|
|
|
|
ShuffleMask.push_back(UndefValue::get(IntTy32));
|
|
|
|
|
|
|
|
Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
|
|
|
|
Builder.CreateBitCast(Op1, ShufTy),
|
|
|
|
ConstantVector::get(ShuffleMask));
|
|
|
|
return Builder.CreateBitCast(SV, II.getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
Constant *C0 = dyn_cast<Constant>(Op0);
|
|
|
|
Constant *C1 = dyn_cast<Constant>(Op1);
|
|
|
|
ConstantInt *CI00 =
|
2016-09-07 20:47:53 +08:00
|
|
|
C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
ConstantInt *CI10 =
|
2016-09-07 20:47:53 +08:00
|
|
|
C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
|
|
|
|
// Constant Fold - insert bottom Length bits starting at the Index'th bit.
|
|
|
|
if (CI00 && CI10) {
|
|
|
|
APInt V00 = CI00->getValue();
|
|
|
|
APInt V10 = CI10->getValue();
|
|
|
|
APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
|
|
|
|
V00 = V00 & ~Mask;
|
|
|
|
V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
|
|
|
|
APInt Val = V00 | V10;
|
|
|
|
Type *IntTy64 = Type::getInt64Ty(II.getContext());
|
|
|
|
Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
|
|
|
|
UndefValue::get(IntTy64)};
|
|
|
|
return ConstantVector::get(Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we were an INSERTQ call, we'll save demanded elements if we convert to
|
|
|
|
// INSERTQI.
|
|
|
|
if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
|
|
|
|
Type *IntTy8 = Type::getInt8Ty(II.getContext());
|
|
|
|
Constant *CILength = ConstantInt::get(IntTy8, Length, false);
|
|
|
|
Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
|
|
|
|
|
|
|
|
Value *Args[] = {Op0, Op1, CILength, CIIndex};
|
2015-12-15 01:24:23 +08:00
|
|
|
Module *M = II.getModule();
|
2015-10-17 19:40:05 +08:00
|
|
|
Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
|
|
|
|
return Builder.CreateCall(F, Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-04-25 01:00:34 +08:00
|
|
|
/// Attempt to convert pshufb* to shufflevector if the mask is constant.
|
|
|
|
static Value *simplifyX86pshufb(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
2016-04-30 05:34:54 +08:00
|
|
|
Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
|
|
|
|
if (!V)
|
|
|
|
return nullptr;
|
|
|
|
|
2016-05-02 03:26:21 +08:00
|
|
|
auto *VecTy = cast<VectorType>(II.getType());
|
|
|
|
auto *MaskEltTy = Type::getInt32Ty(II.getContext());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
2016-04-25 01:00:34 +08:00
|
|
|
assert((NumElts == 16 || NumElts == 32) &&
|
|
|
|
"Unexpected number of elements in shuffle mask!");
|
2016-04-30 05:34:54 +08:00
|
|
|
|
2016-05-02 03:26:21 +08:00
|
|
|
// Construct a shuffle mask from constant integers or UNDEFs.
|
2016-08-12 01:20:18 +08:00
|
|
|
Constant *Indexes[32] = {nullptr};
|
2016-04-25 01:00:34 +08:00
|
|
|
|
2016-04-30 05:34:54 +08:00
|
|
|
// Each byte in the shuffle control mask forms an index to permute the
|
|
|
|
// corresponding byte in the destination operand.
|
|
|
|
for (unsigned I = 0; I < NumElts; ++I) {
|
|
|
|
Constant *COp = V->getAggregateElement(I);
|
2016-05-02 03:26:21 +08:00
|
|
|
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
|
2016-04-30 05:34:54 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2016-05-02 03:26:21 +08:00
|
|
|
if (isa<UndefValue>(COp)) {
|
|
|
|
Indexes[I] = UndefValue::get(MaskEltTy);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-04-30 05:34:54 +08:00
|
|
|
int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
|
|
|
|
|
|
|
|
// If the most significant bit (bit[7]) of each byte of the shuffle
|
|
|
|
// control mask is set, then zero is written in the result byte.
|
|
|
|
// The zero vector is in the right-hand side of the resulting
|
|
|
|
// shufflevector.
|
|
|
|
|
2016-05-02 03:26:21 +08:00
|
|
|
// The value of each index for the high 128-bit lane is the least
|
|
|
|
// significant 4 bits of the respective shuffle control byte.
|
|
|
|
Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
|
|
|
|
Indexes[I] = ConstantInt::get(MaskEltTy, Index);
|
2016-04-30 05:34:54 +08:00
|
|
|
}
|
2016-04-25 01:00:34 +08:00
|
|
|
|
2016-05-02 03:26:21 +08:00
|
|
|
auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
|
2016-04-25 01:00:34 +08:00
|
|
|
auto V1 = II.getArgOperand(0);
|
2016-05-02 03:26:21 +08:00
|
|
|
auto V2 = Constant::getNullValue(VecTy);
|
2016-04-25 01:00:34 +08:00
|
|
|
return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
|
|
|
|
}
|
|
|
|
|
2016-04-25 01:23:46 +08:00
|
|
|
/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
|
|
|
|
static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
2016-04-30 15:23:30 +08:00
|
|
|
Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
|
|
|
|
if (!V)
|
|
|
|
return nullptr;
|
2016-04-25 01:23:46 +08:00
|
|
|
|
2016-05-02 04:22:42 +08:00
|
|
|
auto *MaskEltTy = Type::getInt32Ty(II.getContext());
|
|
|
|
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
|
|
|
|
assert(NumElts == 8 || NumElts == 4 || NumElts == 2);
|
2016-04-25 01:23:46 +08:00
|
|
|
|
2016-05-02 04:22:42 +08:00
|
|
|
// Construct a shuffle mask from constant integers or UNDEFs.
|
2016-08-12 01:20:18 +08:00
|
|
|
Constant *Indexes[8] = {nullptr};
|
2016-04-30 15:23:30 +08:00
|
|
|
|
|
|
|
// The intrinsics only read one or two bits, clear the rest.
|
2016-05-02 04:22:42 +08:00
|
|
|
for (unsigned I = 0; I < NumElts; ++I) {
|
2016-04-30 15:23:30 +08:00
|
|
|
Constant *COp = V->getAggregateElement(I);
|
2016-05-02 04:22:42 +08:00
|
|
|
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
|
2016-04-30 15:23:30 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2016-05-02 04:22:42 +08:00
|
|
|
if (isa<UndefValue>(COp)) {
|
|
|
|
Indexes[I] = UndefValue::get(MaskEltTy);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
APInt Index = cast<ConstantInt>(COp)->getValue();
|
|
|
|
Index = Index.zextOrTrunc(32).getLoBits(2);
|
2016-04-30 15:23:30 +08:00
|
|
|
|
|
|
|
// The PD variants uses bit 1 to select per-lane element index, so
|
|
|
|
// shift down to convert to generic shuffle mask index.
|
|
|
|
if (II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
|
|
|
|
II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
|
2016-05-02 04:22:42 +08:00
|
|
|
Index = Index.lshr(1);
|
|
|
|
|
|
|
|
// The _256 variants are a bit trickier since the mask bits always index
|
|
|
|
// into the corresponding 128 half. In order to convert to a generic
|
|
|
|
// shuffle, we have to make that explicit.
|
|
|
|
if ((II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
|
|
|
|
II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) &&
|
|
|
|
((NumElts / 2) <= I)) {
|
|
|
|
Index += APInt(32, NumElts / 2);
|
|
|
|
}
|
2016-04-25 01:23:46 +08:00
|
|
|
|
2016-05-02 04:22:42 +08:00
|
|
|
Indexes[I] = ConstantInt::get(MaskEltTy, Index);
|
2016-04-25 01:23:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 04:22:42 +08:00
|
|
|
auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
|
2016-04-25 01:23:46 +08:00
|
|
|
auto V1 = II.getArgOperand(0);
|
|
|
|
auto V2 = UndefValue::get(V1->getType());
|
|
|
|
return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
|
|
|
|
}
|
|
|
|
|
2016-05-02 00:41:22 +08:00
|
|
|
/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
|
|
|
|
static Value *simplifyX86vpermv(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
auto *V = dyn_cast<Constant>(II.getArgOperand(1));
|
|
|
|
if (!V)
|
|
|
|
return nullptr;
|
|
|
|
|
2016-05-02 04:43:02 +08:00
|
|
|
auto *VecTy = cast<VectorType>(II.getType());
|
|
|
|
auto *MaskEltTy = Type::getInt32Ty(II.getContext());
|
2016-05-02 00:41:22 +08:00
|
|
|
unsigned Size = VecTy->getNumElements();
|
|
|
|
assert(Size == 8 && "Unexpected shuffle mask size");
|
|
|
|
|
2016-05-02 04:43:02 +08:00
|
|
|
// Construct a shuffle mask from constant integers or UNDEFs.
|
2016-08-12 01:20:18 +08:00
|
|
|
Constant *Indexes[8] = {nullptr};
|
2016-05-02 00:41:22 +08:00
|
|
|
|
|
|
|
for (unsigned I = 0; I < Size; ++I) {
|
|
|
|
Constant *COp = V->getAggregateElement(I);
|
2016-05-02 04:43:02 +08:00
|
|
|
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
|
2016-05-02 00:41:22 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2016-05-02 04:43:02 +08:00
|
|
|
if (isa<UndefValue>(COp)) {
|
|
|
|
Indexes[I] = UndefValue::get(MaskEltTy);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-02 00:41:22 +08:00
|
|
|
APInt Index = cast<ConstantInt>(COp)->getValue();
|
2016-05-02 04:43:02 +08:00
|
|
|
Index = Index.zextOrTrunc(32).getLoBits(3);
|
|
|
|
Indexes[I] = ConstantInt::get(MaskEltTy, Index);
|
2016-05-02 00:41:22 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 04:43:02 +08:00
|
|
|
auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
|
2016-05-02 00:41:22 +08:00
|
|
|
auto V1 = II.getArgOperand(0);
|
|
|
|
auto V2 = UndefValue::get(VecTy);
|
|
|
|
return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
|
|
|
|
}
|
|
|
|
|
2015-03-21 05:47:56 +08:00
|
|
|
/// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
|
|
|
|
/// source vectors, unless a zero bit is set. If a zero bit is set,
|
|
|
|
/// then ignore that half of the mask and clear that half of the vector.
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86vperm2(const IntrinsicInst &II,
|
2015-03-21 05:47:56 +08:00
|
|
|
InstCombiner::BuilderTy &Builder) {
|
2016-01-28 08:03:16 +08:00
|
|
|
auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
|
|
|
|
if (!CInt)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
VectorType *VecTy = cast<VectorType>(II.getType());
|
|
|
|
ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
|
|
|
|
|
|
|
|
// The immediate permute control byte looks like this:
|
|
|
|
// [1:0] - select 128 bits from sources for low half of destination
|
|
|
|
// [2] - ignore
|
|
|
|
// [3] - zero low half of destination
|
|
|
|
// [5:4] - select 128 bits from sources for high half of destination
|
|
|
|
// [6] - ignore
|
|
|
|
// [7] - zero high half of destination
|
|
|
|
|
|
|
|
uint8_t Imm = CInt->getZExtValue();
|
|
|
|
|
|
|
|
bool LowHalfZero = Imm & 0x08;
|
|
|
|
bool HighHalfZero = Imm & 0x80;
|
|
|
|
|
|
|
|
// If both zero mask bits are set, this was just a weird way to
|
|
|
|
// generate a zero vector.
|
|
|
|
if (LowHalfZero && HighHalfZero)
|
|
|
|
return ZeroVector;
|
|
|
|
|
|
|
|
// If 0 or 1 zero mask bits are set, this is a simple shuffle.
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
|
|
|
unsigned HalfSize = NumElts / 2;
|
2016-06-12 08:41:19 +08:00
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(NumElts);
|
2016-01-28 08:03:16 +08:00
|
|
|
|
|
|
|
// The high bit of the selection field chooses the 1st or 2nd operand.
|
|
|
|
bool LowInputSelect = Imm & 0x02;
|
|
|
|
bool HighInputSelect = Imm & 0x20;
|
|
|
|
|
|
|
|
// The low bit of the selection field chooses the low or high half
|
|
|
|
// of the selected operand.
|
|
|
|
bool LowHalfSelect = Imm & 0x01;
|
|
|
|
bool HighHalfSelect = Imm & 0x10;
|
|
|
|
|
|
|
|
// Determine which operand(s) are actually in use for this instruction.
|
|
|
|
Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
|
|
|
|
Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
|
|
|
|
|
|
|
|
// If needed, replace operands based on zero mask.
|
|
|
|
V0 = LowHalfZero ? ZeroVector : V0;
|
|
|
|
V1 = HighHalfZero ? ZeroVector : V1;
|
|
|
|
|
|
|
|
// Permute low half of result.
|
|
|
|
unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
|
|
|
|
for (unsigned i = 0; i < HalfSize; ++i)
|
|
|
|
ShuffleMask[i] = StartIndex + i;
|
|
|
|
|
|
|
|
// Permute high half of result.
|
|
|
|
StartIndex = HighHalfSelect ? HalfSize : 0;
|
|
|
|
StartIndex += NumElts;
|
|
|
|
for (unsigned i = 0; i < HalfSize; ++i)
|
|
|
|
ShuffleMask[i + HalfSize] = StartIndex + i;
|
|
|
|
|
|
|
|
return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
|
2015-03-21 05:47:56 +08:00
|
|
|
}
|
|
|
|
|
2015-10-11 22:38:34 +08:00
|
|
|
/// Decode XOP integer vector comparison intrinsics.
|
2016-01-30 07:27:03 +08:00
|
|
|
static Value *simplifyX86vpcom(const IntrinsicInst &II,
|
2016-01-30 07:14:58 +08:00
|
|
|
InstCombiner::BuilderTy &Builder,
|
|
|
|
bool IsSigned) {
|
2015-10-11 22:38:34 +08:00
|
|
|
if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
|
|
|
|
uint64_t Imm = CInt->getZExtValue() & 0x7;
|
|
|
|
VectorType *VecTy = cast<VectorType>(II.getType());
|
|
|
|
CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
|
|
|
|
|
|
|
|
switch (Imm) {
|
|
|
|
case 0x0:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
|
|
|
|
break;
|
|
|
|
case 0x4:
|
|
|
|
Pred = ICmpInst::ICMP_EQ; break;
|
|
|
|
case 0x5:
|
|
|
|
Pred = ICmpInst::ICMP_NE; break;
|
|
|
|
case 0x6:
|
|
|
|
return ConstantInt::getSigned(VecTy, 0); // FALSE
|
|
|
|
case 0x7:
|
|
|
|
return ConstantInt::getSigned(VecTy, -1); // TRUE
|
|
|
|
}
|
|
|
|
|
2016-01-30 07:14:58 +08:00
|
|
|
if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0),
|
|
|
|
II.getArgOperand(1)))
|
2015-10-11 22:38:34 +08:00
|
|
|
return Builder.CreateSExtOrTrunc(Cmp, VecTy);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-02-01 00:35:23 +08:00
|
|
|
static Value *simplifyMinnumMaxnum(const IntrinsicInst &II) {
|
|
|
|
Value *Arg0 = II.getArgOperand(0);
|
|
|
|
Value *Arg1 = II.getArgOperand(1);
|
|
|
|
|
|
|
|
// fmin(x, x) -> x
|
|
|
|
if (Arg0 == Arg1)
|
|
|
|
return Arg0;
|
|
|
|
|
|
|
|
const auto *C1 = dyn_cast<ConstantFP>(Arg1);
|
|
|
|
|
|
|
|
// fmin(x, nan) -> x
|
|
|
|
if (C1 && C1->isNaN())
|
|
|
|
return Arg0;
|
|
|
|
|
|
|
|
// This is the value because if undef were NaN, we would return the other
|
|
|
|
// value and cannot return a NaN unless both operands are.
|
|
|
|
//
|
|
|
|
// fmin(undef, x) -> x
|
|
|
|
if (isa<UndefValue>(Arg0))
|
|
|
|
return Arg1;
|
|
|
|
|
|
|
|
// fmin(x, undef) -> x
|
|
|
|
if (isa<UndefValue>(Arg1))
|
|
|
|
return Arg0;
|
|
|
|
|
|
|
|
Value *X = nullptr;
|
|
|
|
Value *Y = nullptr;
|
|
|
|
if (II.getIntrinsicID() == Intrinsic::minnum) {
|
|
|
|
// fmin(x, fmin(x, y)) -> fmin(x, y)
|
|
|
|
// fmin(y, fmin(x, y)) -> fmin(x, y)
|
|
|
|
if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
|
|
|
|
if (Arg0 == X || Arg0 == Y)
|
|
|
|
return Arg1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fmin(fmin(x, y), x) -> fmin(x, y)
|
|
|
|
// fmin(fmin(x, y), y) -> fmin(x, y)
|
|
|
|
if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
|
|
|
|
if (Arg1 == X || Arg1 == Y)
|
|
|
|
return Arg0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fmin(nnan x, inf) -> x
|
|
|
|
// TODO: fmin(nnan ninf x, flt_max) -> x
|
|
|
|
if (C1 && C1->isInfinity()) {
|
|
|
|
// fmin(x, -inf) -> -inf
|
|
|
|
if (C1->isNegative())
|
|
|
|
return Arg1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(II.getIntrinsicID() == Intrinsic::maxnum);
|
|
|
|
// fmax(x, fmax(x, y)) -> fmax(x, y)
|
|
|
|
// fmax(y, fmax(x, y)) -> fmax(x, y)
|
|
|
|
if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
|
|
|
|
if (Arg0 == X || Arg0 == Y)
|
|
|
|
return Arg1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fmax(fmax(x, y), x) -> fmax(x, y)
|
|
|
|
// fmax(fmax(x, y), y) -> fmax(x, y)
|
|
|
|
if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
|
|
|
|
if (Arg1 == X || Arg1 == Y)
|
|
|
|
return Arg0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fmax(nnan x, -inf) -> x
|
|
|
|
// TODO: fmax(nnan ninf x, -flt_max) -> x
|
|
|
|
if (C1 && C1->isInfinity()) {
|
|
|
|
// fmax(x, inf) -> inf
|
|
|
|
if (!C1->isNegative())
|
|
|
|
return Arg1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-07-14 14:58:42 +08:00
|
|
|
static bool maskIsAllOneOrUndef(Value *Mask) {
|
|
|
|
auto *ConstMask = dyn_cast<Constant>(Mask);
|
2016-02-02 01:00:10 +08:00
|
|
|
if (!ConstMask)
|
2016-07-14 14:58:42 +08:00
|
|
|
return false;
|
|
|
|
if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
|
|
|
|
return true;
|
|
|
|
for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
|
|
|
|
++I) {
|
|
|
|
if (auto *MaskElt = ConstMask->getAggregateElement(I))
|
|
|
|
if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
|
|
|
|
continue;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2016-02-02 01:00:10 +08:00
|
|
|
|
2016-07-14 14:58:42 +08:00
|
|
|
static Value *simplifyMaskedLoad(const IntrinsicInst &II,
|
|
|
|
InstCombiner::BuilderTy &Builder) {
|
|
|
|
// If the mask is all ones or undefs, this is a plain vector load of the 1st
|
|
|
|
// argument.
|
|
|
|
if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
|
2016-02-02 01:00:10 +08:00
|
|
|
Value *LoadPtr = II.getArgOperand(0);
|
|
|
|
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
|
|
|
|
return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-02-02 03:39:52 +08:00
|
|
|
static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
|
|
|
|
if (!ConstMask)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// If the mask is all zeros, this instruction does nothing.
|
|
|
|
if (ConstMask->isNullValue())
|
2016-02-02 06:23:39 +08:00
|
|
|
return IC.eraseInstFromFunction(II);
|
2016-02-02 03:39:52 +08:00
|
|
|
|
|
|
|
// If the mask is all ones, this is a plain vector store of the 1st argument.
|
|
|
|
if (ConstMask->isAllOnesValue()) {
|
|
|
|
Value *StorePtr = II.getArgOperand(1);
|
|
|
|
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
|
|
|
|
return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-02-02 06:10:26 +08:00
|
|
|
static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
// If the mask is all zeros, return the "passthru" argument of the gather.
|
|
|
|
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
|
|
|
|
if (ConstMask && ConstMask->isNullValue())
|
2016-02-02 06:23:39 +08:00
|
|
|
return IC.replaceInstUsesWith(II, II.getArgOperand(3));
|
2016-02-02 06:10:26 +08:00
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
// If the mask is all zeros, a scatter does nothing.
|
|
|
|
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
|
|
|
|
if (ConstMask && ConstMask->isNullValue())
|
2016-02-02 06:23:39 +08:00
|
|
|
return IC.eraseInstFromFunction(II);
|
2016-02-02 06:10:26 +08:00
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-08-19 04:43:50 +08:00
|
|
|
static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
assert((II.getIntrinsicID() == Intrinsic::cttz ||
|
|
|
|
II.getIntrinsicID() == Intrinsic::ctlz) &&
|
|
|
|
"Expected cttz or ctlz intrinsic");
|
2016-08-06 06:42:46 +08:00
|
|
|
Value *Op0 = II.getArgOperand(0);
|
|
|
|
// FIXME: Try to simplify vectors of integers.
|
|
|
|
auto *IT = dyn_cast<IntegerType>(Op0->getType());
|
|
|
|
if (!IT)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
unsigned BitWidth = IT->getBitWidth();
|
|
|
|
APInt KnownZero(BitWidth, 0);
|
|
|
|
APInt KnownOne(BitWidth, 0);
|
|
|
|
IC.computeKnownBits(Op0, KnownZero, KnownOne, 0, &II);
|
|
|
|
|
|
|
|
// Create a mask for bits above (ctlz) or below (cttz) the first known one.
|
|
|
|
bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
|
|
|
|
unsigned NumMaskBits = IsTZ ? KnownOne.countTrailingZeros()
|
|
|
|
: KnownOne.countLeadingZeros();
|
|
|
|
APInt Mask = IsTZ ? APInt::getLowBitsSet(BitWidth, NumMaskBits)
|
|
|
|
: APInt::getHighBitsSet(BitWidth, NumMaskBits);
|
|
|
|
|
|
|
|
// If all bits above (ctlz) or below (cttz) the first known one are known
|
|
|
|
// zero, this value is constant.
|
|
|
|
// FIXME: This should be in InstSimplify because we're replacing an
|
|
|
|
// instruction with a constant.
|
2016-08-19 04:43:50 +08:00
|
|
|
if ((Mask & KnownZero) == Mask) {
|
|
|
|
auto *C = ConstantInt::get(IT, APInt(BitWidth, NumMaskBits));
|
|
|
|
return IC.replaceInstUsesWith(II, C);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the input to cttz/ctlz is known to be non-zero,
|
|
|
|
// then change the 'ZeroIsUndef' parameter to 'true'
|
|
|
|
// because we know the zero behavior can't affect the result.
|
|
|
|
if (KnownOne != 0 || isKnownNonZero(Op0, IC.getDataLayout())) {
|
|
|
|
if (!match(II.getArgOperand(1), m_One())) {
|
|
|
|
II.setOperand(1, IC.Builder->getTrue());
|
|
|
|
return &II;
|
|
|
|
}
|
|
|
|
}
|
2016-08-06 06:42:46 +08:00
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-03-01 07:16:48 +08:00
|
|
|
// TODO: If the x86 backend knew how to convert a bool vector mask back to an
|
|
|
|
// XMM register mask efficiently, we could transform all x86 masked intrinsics
|
|
|
|
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
|
|
|
|
static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
Value *Ptr = II.getOperand(0);
|
|
|
|
Value *Mask = II.getOperand(1);
|
2016-04-13 07:16:23 +08:00
|
|
|
Constant *ZeroVec = Constant::getNullValue(II.getType());
|
2016-03-01 07:16:48 +08:00
|
|
|
|
|
|
|
// Special case a zero mask since that's not a ConstantDataVector.
|
2016-04-13 07:16:23 +08:00
|
|
|
// This masked load instruction creates a zero vector.
|
2016-03-01 07:16:48 +08:00
|
|
|
if (isa<ConstantAggregateZero>(Mask))
|
2016-04-13 07:16:23 +08:00
|
|
|
return IC.replaceInstUsesWith(II, ZeroVec);
|
2016-03-01 07:16:48 +08:00
|
|
|
|
|
|
|
auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
|
|
|
|
if (!ConstMask)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
|
|
|
|
// to allow target-independent optimizations.
|
|
|
|
|
|
|
|
// First, cast the x86 intrinsic scalar pointer to a vector pointer to match
|
|
|
|
// the LLVM intrinsic definition for the pointer argument.
|
|
|
|
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
|
|
|
|
PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
|
|
|
|
Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
|
|
|
|
|
|
|
|
// Second, convert the x86 XMM integer vector mask to a vector of bools based
|
|
|
|
// on each element's most significant bit (the sign bit).
|
|
|
|
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
|
|
|
|
|
2016-04-13 07:16:23 +08:00
|
|
|
// The pass-through vector for an x86 masked load is a zero vector.
|
|
|
|
CallInst *NewMaskedLoad =
|
|
|
|
IC.Builder->CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
|
2016-03-01 07:16:48 +08:00
|
|
|
return IC.replaceInstUsesWith(II, NewMaskedLoad);
|
|
|
|
}
|
|
|
|
|
[x86, InstCombine] transform x86 AVX masked stores to LLVM intrinsics
The intended effect of this patch in conjunction with:
http://reviews.llvm.org/rL259392
http://reviews.llvm.org/rL260145
is that customers using the AVX intrinsics in C will benefit from combines when
the store mask is constant:
void mstore_zero_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0), v);
}
void mstore_fake_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(1), v);
}
void mstore_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0x80000000), v);
}
void mstore_one_set_elt_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set_epi32(0x80000000, 0, 0, 0), v);
}
...so none of the above will actually generate a masked store for optimized code.
Differential Revision: http://reviews.llvm.org/D17485
llvm-svn: 262064
2016-02-27 05:04:14 +08:00
|
|
|
// TODO: If the x86 backend knew how to convert a bool vector mask back to an
|
|
|
|
// XMM register mask efficiently, we could transform all x86 masked intrinsics
|
|
|
|
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
|
|
|
|
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
|
|
|
|
Value *Ptr = II.getOperand(0);
|
|
|
|
Value *Mask = II.getOperand(1);
|
|
|
|
Value *Vec = II.getOperand(2);
|
|
|
|
|
|
|
|
// Special case a zero mask since that's not a ConstantDataVector:
|
|
|
|
// this masked store instruction does nothing.
|
|
|
|
if (isa<ConstantAggregateZero>(Mask)) {
|
|
|
|
IC.eraseInstFromFunction(II);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-12 23:16:59 +08:00
|
|
|
// The SSE2 version is too weird (eg, unaligned but non-temporal) to do
|
|
|
|
// anything else at this level.
|
|
|
|
if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
|
|
|
|
return false;
|
|
|
|
|
[x86, InstCombine] transform x86 AVX masked stores to LLVM intrinsics
The intended effect of this patch in conjunction with:
http://reviews.llvm.org/rL259392
http://reviews.llvm.org/rL260145
is that customers using the AVX intrinsics in C will benefit from combines when
the store mask is constant:
void mstore_zero_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0), v);
}
void mstore_fake_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(1), v);
}
void mstore_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0x80000000), v);
}
void mstore_one_set_elt_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set_epi32(0x80000000, 0, 0, 0), v);
}
...so none of the above will actually generate a masked store for optimized code.
Differential Revision: http://reviews.llvm.org/D17485
llvm-svn: 262064
2016-02-27 05:04:14 +08:00
|
|
|
auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
|
|
|
|
if (!ConstMask)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
|
|
|
|
// to allow target-independent optimizations.
|
|
|
|
|
|
|
|
// First, cast the x86 intrinsic scalar pointer to a vector pointer to match
|
|
|
|
// the LLVM intrinsic definition for the pointer argument.
|
|
|
|
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
|
|
|
|
PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
|
|
|
|
Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
|
|
|
|
|
|
|
|
// Second, convert the x86 XMM integer vector mask to a vector of bools based
|
|
|
|
// on each element's most significant bit (the sign bit).
|
|
|
|
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
|
|
|
|
|
|
|
|
IC.Builder->CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
|
|
|
|
|
|
|
|
// 'Replace uses' doesn't work for stores. Erase the original masked store.
|
|
|
|
IC.eraseInstFromFunction(II);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-10 17:24:49 +08:00
|
|
|
// Returns true iff the 2 intrinsics have the same operands, limiting the
|
|
|
|
// comparison to the first NumOperands.
|
|
|
|
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
|
|
|
|
unsigned NumOperands) {
|
|
|
|
assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
|
|
|
|
assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
|
|
|
|
for (unsigned i = 0; i < NumOperands; i++)
|
|
|
|
if (I.getArgOperand(i) != E.getArgOperand(i))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove trivially empty start/end intrinsic ranges, i.e. a start
|
|
|
|
// immediately followed by an end (ignoring debuginfo or other
|
|
|
|
// start/end intrinsics in between). As this handles only the most trivial
|
|
|
|
// cases, tracking the nesting level is not needed:
|
|
|
|
//
|
|
|
|
// call @llvm.foo.start(i1 0) ; &I
|
|
|
|
// call @llvm.foo.start(i1 0)
|
|
|
|
// call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
|
|
|
|
// call @llvm.foo.end(i1 0)
|
|
|
|
static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
|
|
|
|
unsigned EndID, InstCombiner &IC) {
|
|
|
|
assert(I.getIntrinsicID() == StartID &&
|
|
|
|
"Start intrinsic does not have expected ID");
|
|
|
|
BasicBlock::iterator BI(I), BE(I.getParent()->end());
|
|
|
|
for (++BI; BI != BE; ++BI) {
|
|
|
|
if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
|
|
|
|
if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
|
|
|
|
continue;
|
|
|
|
if (E->getIntrinsicID() == EndID &&
|
|
|
|
haveSameOperands(I, *E, E->getNumArgOperands())) {
|
|
|
|
IC.eraseInstFromFunction(*E);
|
|
|
|
IC.eraseInstFromFunction(I);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
|
|
|
|
removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
|
|
|
|
removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// CallInst simplification. This mostly only handles folding of intrinsic
|
|
|
|
/// instructions. For normal calls, it allows visitCallSite to do the heavy
|
|
|
|
/// lifting.
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
2015-05-22 11:56:46 +08:00
|
|
|
auto Args = CI.arg_operands();
|
|
|
|
if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
|
2016-08-05 09:06:44 +08:00
|
|
|
&TLI, &DT, &AC))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, V);
|
2015-05-22 11:56:46 +08:00
|
|
|
|
2016-08-05 09:06:44 +08:00
|
|
|
if (isFreeCall(&CI, &TLI))
|
2010-01-05 15:32:13 +08:00
|
|
|
return visitFree(CI);
|
|
|
|
|
|
|
|
// If the caller function is nounwind, mark the call as nounwind, even if the
|
|
|
|
// callee isn't.
|
2016-08-11 23:16:06 +08:00
|
|
|
if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
|
2010-01-05 15:32:13 +08:00
|
|
|
CI.setDoesNotThrow();
|
|
|
|
return &CI;
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
|
|
|
|
if (!II) return visitCallSite(&CI);
|
2010-06-24 20:58:35 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Intrinsics cannot occur in an invoke, so handle them here instead of in
|
|
|
|
// visitCallSite.
|
|
|
|
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// memmove/cpy/set of zero bytes is a noop.
|
|
|
|
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
|
2010-10-01 13:51:02 +08:00
|
|
|
if (NumBytes->isNullValue())
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(CI);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
|
|
|
|
if (CI->getZExtValue() == 1) {
|
|
|
|
// Replace the instruction with just byte operations. We would
|
|
|
|
// transform other cases to loads/stores, but we don't know if
|
|
|
|
// alignment is sufficient.
|
|
|
|
}
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-10-01 13:51:02 +08:00
|
|
|
// No other transformations apply to volatile transfers.
|
|
|
|
if (MI->isVolatile())
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// If we have a memmove and the source operation is a constant global,
|
|
|
|
// then the source and dest pointers can't alias, so we can change this
|
|
|
|
// into a call to memcpy.
|
|
|
|
if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
|
|
|
|
if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
|
|
|
|
if (GVSrc->isConstant()) {
|
2015-12-15 01:24:23 +08:00
|
|
|
Module *M = CI.getModule();
|
2010-01-05 15:32:13 +08:00
|
|
|
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
|
2011-07-12 22:06:48 +08:00
|
|
|
Type *Tys[3] = { CI.getArgOperand(0)->getType(),
|
|
|
|
CI.getArgOperand(1)->getType(),
|
|
|
|
CI.getArgOperand(2)->getType() };
|
2011-07-15 01:45:39 +08:00
|
|
|
CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
|
2010-01-05 15:32:13 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
|
|
|
|
// memmove(x,x,size) -> noop.
|
|
|
|
if (MTI->getSource() == MTI->getDest())
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(CI);
|
2010-04-17 07:37:20 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2010-04-17 07:37:20 +08:00
|
|
|
// If we can determine a pointer alignment that is bigger than currently
|
|
|
|
// set, update the alignment.
|
2015-11-19 13:56:52 +08:00
|
|
|
if (isa<MemTransferInst>(MI)) {
|
|
|
|
if (Instruction *I = SimplifyMemTransfer(MI))
|
2010-01-05 15:32:13 +08:00
|
|
|
return I;
|
|
|
|
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
|
|
|
|
if (Instruction *I = SimplifyMemSet(MSI))
|
|
|
|
return I;
|
|
|
|
}
|
2010-06-24 21:42:49 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
if (Changed) return II;
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2016-01-21 00:41:43 +08:00
|
|
|
auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
|
|
|
|
unsigned DemandedWidth) {
|
2015-09-18 04:32:45 +08:00
|
|
|
APInt UndefElts(Width, 0);
|
|
|
|
APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
|
|
|
|
return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
|
|
|
|
};
|
2016-04-25 02:12:42 +08:00
|
|
|
auto SimplifyDemandedVectorEltsHigh = [this](Value *Op, unsigned Width,
|
|
|
|
unsigned DemandedWidth) {
|
|
|
|
APInt UndefElts(Width, 0);
|
|
|
|
APInt DemandedElts = APInt::getHighBitsSet(Width, DemandedWidth);
|
|
|
|
return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
|
|
|
|
};
|
2015-09-18 04:32:45 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
switch (II->getIntrinsicID()) {
|
|
|
|
default: break;
|
2010-02-10 05:24:27 +08:00
|
|
|
case Intrinsic::objectsize: {
|
2012-06-21 23:45:28 +08:00
|
|
|
uint64_t Size;
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getObjectSize(II->getArgOperand(0), Size, DL, &TLI)) {
|
2016-04-12 09:05:35 +08:00
|
|
|
APInt APSize(II->getType()->getIntegerBitWidth(), Size);
|
|
|
|
// Equality check to be sure that `Size` can fit in a value of type
|
|
|
|
// `II->getType()`
|
|
|
|
if (APSize == Size)
|
|
|
|
return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), APSize));
|
|
|
|
}
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-02-10 05:24:27 +08:00
|
|
|
}
|
2012-12-13 11:13:36 +08:00
|
|
|
case Intrinsic::bswap: {
|
|
|
|
Value *IIOperand = II->getArgOperand(0);
|
2014-04-25 13:29:35 +08:00
|
|
|
Value *X = nullptr;
|
2012-12-13 11:13:36 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// bswap(bswap(x)) -> x
|
2012-12-13 11:13:36 +08:00
|
|
|
if (match(IIOperand, m_BSwap(m_Value(X))))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, X);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
|
2012-12-13 11:13:36 +08:00
|
|
|
if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
|
|
|
|
unsigned C = X->getType()->getPrimitiveSizeInBits() -
|
|
|
|
IIOperand->getType()->getPrimitiveSizeInBits();
|
|
|
|
Value *CV = ConstantInt::get(X->getType(), C);
|
|
|
|
Value *V = Builder->CreateLShr(X, CV);
|
|
|
|
return new TruncInst(V, IIOperand->getType());
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
break;
|
2012-12-13 11:13:36 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 20:39:41 +08:00
|
|
|
case Intrinsic::bitreverse: {
|
|
|
|
Value *IIOperand = II->getArgOperand(0);
|
|
|
|
Value *X = nullptr;
|
|
|
|
|
|
|
|
// bitreverse(bitreverse(x)) -> x
|
|
|
|
if (match(IIOperand, m_Intrinsic<Intrinsic::bitreverse>(m_Value(X))))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, X);
|
2015-11-12 20:39:41 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-02-02 01:00:10 +08:00
|
|
|
case Intrinsic::masked_load:
|
|
|
|
if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, SimplifiedMaskedOp);
|
2016-02-02 01:00:10 +08:00
|
|
|
break;
|
2016-02-02 03:39:52 +08:00
|
|
|
case Intrinsic::masked_store:
|
|
|
|
return simplifyMaskedStore(*II, *this);
|
2016-02-02 06:10:26 +08:00
|
|
|
case Intrinsic::masked_gather:
|
|
|
|
return simplifyMaskedGather(*II, *this);
|
|
|
|
case Intrinsic::masked_scatter:
|
|
|
|
return simplifyMaskedScatter(*II, *this);
|
2016-02-02 01:00:10 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
case Intrinsic::powi:
|
2010-06-24 20:58:35 +08:00
|
|
|
if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
|
2010-01-05 15:32:13 +08:00
|
|
|
// powi(x, 0) -> 1.0
|
|
|
|
if (Power->isZero())
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
|
2010-01-05 15:32:13 +08:00
|
|
|
// powi(x, 1) -> x
|
|
|
|
if (Power->isOne())
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, II->getArgOperand(0));
|
2010-01-05 15:32:13 +08:00
|
|
|
// powi(x, -1) -> 1/x
|
|
|
|
if (Power->isAllOnesValue())
|
|
|
|
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
|
2010-06-24 20:58:35 +08:00
|
|
|
II->getArgOperand(0));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
break;
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2016-08-06 06:42:46 +08:00
|
|
|
case Intrinsic::cttz:
|
|
|
|
case Intrinsic::ctlz:
|
2016-08-19 04:43:50 +08:00
|
|
|
if (auto *I = foldCttzCtlz(*II, *this))
|
|
|
|
return I;
|
2010-01-05 15:32:13 +08:00
|
|
|
break;
|
2015-04-08 12:27:22 +08:00
|
|
|
|
2015-04-14 03:17:37 +08:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::umul_with_overflow:
|
|
|
|
case Intrinsic::smul_with_overflow:
|
2010-06-29 00:50:57 +08:00
|
|
|
if (isa<Constant>(II->getArgOperand(0)) &&
|
|
|
|
!isa<Constant>(II->getArgOperand(1))) {
|
2015-04-08 12:27:22 +08:00
|
|
|
// Canonicalize constants into the RHS.
|
2010-06-29 00:50:57 +08:00
|
|
|
Value *LHS = II->getArgOperand(0);
|
|
|
|
II->setArgOperand(0, II->getArgOperand(1));
|
|
|
|
II->setArgOperand(1, LHS);
|
2010-01-05 15:32:13 +08:00
|
|
|
return II;
|
|
|
|
}
|
2016-08-18 04:30:52 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2015-04-14 03:17:37 +08:00
|
|
|
case Intrinsic::usub_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow: {
|
2015-04-08 12:27:22 +08:00
|
|
|
OverflowCheckFlavor OCF =
|
|
|
|
IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
|
|
|
|
assert(OCF != OCF_INVALID && "unexpected!");
|
2014-07-04 18:22:21 +08:00
|
|
|
|
2015-04-08 12:27:22 +08:00
|
|
|
Value *OperationResult = nullptr;
|
|
|
|
Constant *OverflowResult = nullptr;
|
|
|
|
if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
|
|
|
|
*II, OperationResult, OverflowResult))
|
|
|
|
return CreateOverflowTuple(II, OperationResult, OverflowResult);
|
2014-07-04 18:22:21 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
break;
|
2014-12-11 16:02:30 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2014-10-22 07:00:20 +08:00
|
|
|
case Intrinsic::minnum:
|
|
|
|
case Intrinsic::maxnum: {
|
|
|
|
Value *Arg0 = II->getArgOperand(0);
|
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
2016-02-01 00:35:23 +08:00
|
|
|
// Canonicalize constants to the RHS.
|
|
|
|
if (isa<ConstantFP>(Arg0) && !isa<ConstantFP>(Arg1)) {
|
2014-10-22 07:00:20 +08:00
|
|
|
II->setArgOperand(0, Arg1);
|
|
|
|
II->setArgOperand(1, Arg0);
|
|
|
|
return II;
|
|
|
|
}
|
2016-02-01 00:35:23 +08:00
|
|
|
if (Value *V = simplifyMinnumMaxnum(*II))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2014-10-22 07:00:20 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
case Intrinsic::ppc_altivec_lvx:
|
|
|
|
case Intrinsic::ppc_altivec_lvxl:
|
2011-04-13 08:36:11 +08:00
|
|
|
// Turn PPC lvx -> load if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
|
|
|
&DT) >= 16) {
|
2010-06-24 20:58:35 +08:00
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
2010-01-05 15:32:13 +08:00
|
|
|
PointerType::getUnqual(II->getType()));
|
|
|
|
return new LoadInst(Ptr);
|
|
|
|
}
|
|
|
|
break;
|
[PowerPC] Add vec_vsx_ld and vec_vsx_st intrinsics
This patch enables the vec_vsx_ld and vec_vsx_st intrinsics for
PowerPC, which provide programmer access to the lxvd2x, lxvw4x,
stxvd2x, and stxvw4x instructions.
New LLVM intrinsics are provided to represent these four instructions
in IntrinsicsPowerPC.td. These are patterned after the similar
intrinsics for lvx and stvx (Altivec). In PPCInstrVSX.td, these
intrinsics are tied to the code gen patterns, with additional patterns
to allow plain vanilla loads and stores to still generate these
instructions.
At -O1 and higher the intrinsics are immediately converted to loads
and stores in InstCombineCalls.cpp. This will open up more
optimization opportunities while still allowing the correct
instructions to be generated. (Similar code exists for aligned
Altivec loads and stores.)
The new intrinsics are added to the code that checks for consecutive
loads and stores in PPCISelLowering.cpp, as well as to
PPCTargetLowering::getTgtMemIntrinsic().
There's a new test to verify the correct instructions are generated.
The loads and stores tend to be reordered, so the test just counts
their number. It runs at -O2, as it's not very effective to test this
at -O0, when many unnecessary loads and stores are generated.
I ended up having to modify vsx-fma-m.ll. It turns out this test case
is slightly unreliable, but I don't know a good way to prevent
problems with it. The xvmaddmdp instructions read and write the same
register, which is one of the multiplicands. Commutativity allows
either to be chosen. If the FMAs are reordered differently than
expected by the test, the register assignment can be different as a
result. Hopefully this doesn't change often.
There is a companion patch for Clang.
llvm-svn: 221767
2014-11-12 12:19:40 +08:00
|
|
|
case Intrinsic::ppc_vsx_lxvw4x:
|
|
|
|
case Intrinsic::ppc_vsx_lxvd2x: {
|
|
|
|
// Turn PPC VSX loads into normal loads.
|
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
|
|
|
PointerType::getUnqual(II->getType()));
|
|
|
|
return new LoadInst(Ptr, Twine(""), false, 1);
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
case Intrinsic::ppc_altivec_stvx:
|
|
|
|
case Intrinsic::ppc_altivec_stvxl:
|
|
|
|
// Turn stvx -> store if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
|
|
|
&DT) >= 16) {
|
2012-02-03 08:07:04 +08:00
|
|
|
Type *OpPtrTy =
|
2010-06-24 23:51:11 +08:00
|
|
|
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
|
|
|
return new StoreInst(II->getArgOperand(0), Ptr);
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
break;
|
[PowerPC] Add vec_vsx_ld and vec_vsx_st intrinsics
This patch enables the vec_vsx_ld and vec_vsx_st intrinsics for
PowerPC, which provide programmer access to the lxvd2x, lxvw4x,
stxvd2x, and stxvw4x instructions.
New LLVM intrinsics are provided to represent these four instructions
in IntrinsicsPowerPC.td. These are patterned after the similar
intrinsics for lvx and stvx (Altivec). In PPCInstrVSX.td, these
intrinsics are tied to the code gen patterns, with additional patterns
to allow plain vanilla loads and stores to still generate these
instructions.
At -O1 and higher the intrinsics are immediately converted to loads
and stores in InstCombineCalls.cpp. This will open up more
optimization opportunities while still allowing the correct
instructions to be generated. (Similar code exists for aligned
Altivec loads and stores.)
The new intrinsics are added to the code that checks for consecutive
loads and stores in PPCISelLowering.cpp, as well as to
PPCTargetLowering::getTgtMemIntrinsic().
There's a new test to verify the correct instructions are generated.
The loads and stores tend to be reordered, so the test just counts
their number. It runs at -O2, as it's not very effective to test this
at -O0, when many unnecessary loads and stores are generated.
I ended up having to modify vsx-fma-m.ll. It turns out this test case
is slightly unreliable, but I don't know a good way to prevent
problems with it. The xvmaddmdp instructions read and write the same
register, which is one of the multiplicands. Commutativity allows
either to be chosen. If the FMAs are reordered differently than
expected by the test, the register assignment can be different as a
result. Hopefully this doesn't change often.
There is a companion patch for Clang.
llvm-svn: 221767
2014-11-12 12:19:40 +08:00
|
|
|
case Intrinsic::ppc_vsx_stxvw4x:
|
|
|
|
case Intrinsic::ppc_vsx_stxvd2x: {
|
|
|
|
// Turn PPC VSX stores into normal stores.
|
|
|
|
Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
|
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
|
|
|
return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
|
|
|
|
}
|
2015-02-27 02:56:03 +08:00
|
|
|
case Intrinsic::ppc_qpx_qvlfs:
|
|
|
|
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
|
|
|
&DT) >= 16) {
|
2015-05-11 14:37:03 +08:00
|
|
|
Type *VTy = VectorType::get(Builder->getFloatTy(),
|
|
|
|
II->getType()->getVectorNumElements());
|
2015-02-27 02:56:03 +08:00
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
2015-05-11 14:37:03 +08:00
|
|
|
PointerType::getUnqual(VTy));
|
|
|
|
Value *Load = Builder->CreateLoad(Ptr);
|
|
|
|
return new FPExtInst(Load, II->getType());
|
2015-02-27 02:56:03 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case Intrinsic::ppc_qpx_qvlfd:
|
|
|
|
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
|
|
|
|
&DT) >= 32) {
|
2015-02-27 02:56:03 +08:00
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
|
|
|
PointerType::getUnqual(II->getType()));
|
|
|
|
return new LoadInst(Ptr);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case Intrinsic::ppc_qpx_qvstfs:
|
|
|
|
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
|
|
|
&DT) >= 16) {
|
2015-05-11 14:37:03 +08:00
|
|
|
Type *VTy = VectorType::get(Builder->getFloatTy(),
|
|
|
|
II->getArgOperand(0)->getType()->getVectorNumElements());
|
|
|
|
Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
|
|
|
|
Type *OpPtrTy = PointerType::getUnqual(VTy);
|
2015-02-27 02:56:03 +08:00
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
2015-05-11 14:37:03 +08:00
|
|
|
return new StoreInst(TOp, Ptr);
|
2015-02-27 02:56:03 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case Intrinsic::ppc_qpx_qvstfd:
|
|
|
|
// Turn PPC QPX qvstfd -> store if the pointer is known aligned.
|
2016-08-05 09:06:44 +08:00
|
|
|
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
|
|
|
|
&DT) >= 32) {
|
2015-02-27 02:56:03 +08:00
|
|
|
Type *OpPtrTy =
|
|
|
|
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
|
|
|
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
|
|
|
return new StoreInst(II->getArgOperand(0), Ptr);
|
|
|
|
}
|
|
|
|
break;
|
2015-09-12 21:39:53 +08:00
|
|
|
|
|
|
|
case Intrinsic::x86_vcvtph2ps_128:
|
|
|
|
case Intrinsic::x86_vcvtph2ps_256: {
|
|
|
|
auto Arg = II->getArgOperand(0);
|
|
|
|
auto ArgType = cast<VectorType>(Arg->getType());
|
|
|
|
auto RetType = cast<VectorType>(II->getType());
|
|
|
|
unsigned ArgWidth = ArgType->getNumElements();
|
|
|
|
unsigned RetWidth = RetType->getNumElements();
|
|
|
|
assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
|
|
|
|
assert(ArgType->isIntOrIntVectorTy() &&
|
|
|
|
ArgType->getScalarSizeInBits() == 16 &&
|
|
|
|
"CVTPH2PS input type should be 16-bit integer vector");
|
|
|
|
assert(RetType->getScalarType()->isFloatTy() &&
|
|
|
|
"CVTPH2PS output type should be 32-bit float vector");
|
|
|
|
|
|
|
|
// Constant folding: Convert to generic half to single conversion.
|
2015-09-12 22:00:17 +08:00
|
|
|
if (isa<ConstantAggregateZero>(Arg))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
|
2015-09-12 21:39:53 +08:00
|
|
|
|
2015-09-12 22:00:17 +08:00
|
|
|
if (isa<ConstantDataVector>(Arg)) {
|
2015-09-12 21:39:53 +08:00
|
|
|
auto VectorHalfAsShorts = Arg;
|
|
|
|
if (RetWidth < ArgWidth) {
|
2016-06-12 08:41:19 +08:00
|
|
|
SmallVector<uint32_t, 8> SubVecMask;
|
2015-09-12 21:39:53 +08:00
|
|
|
for (unsigned i = 0; i != RetWidth; ++i)
|
|
|
|
SubVecMask.push_back((int)i);
|
|
|
|
VectorHalfAsShorts = Builder->CreateShuffleVector(
|
|
|
|
Arg, UndefValue::get(ArgType), SubVecMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto VectorHalfType =
|
|
|
|
VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
|
|
|
|
auto VectorHalfs =
|
|
|
|
Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
|
|
|
|
auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, VectorFloats);
|
2015-09-12 21:39:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// We only use the lowest lanes of the argument.
|
2015-09-19 19:41:53 +08:00
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
|
2015-09-12 21:39:53 +08:00
|
|
|
II->setArgOperand(0, V);
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-01-10 15:19:37 +08:00
|
|
|
case Intrinsic::x86_sse_cvtss2si:
|
|
|
|
case Intrinsic::x86_sse_cvtss2si64:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si:
|
|
|
|
case Intrinsic::x86_sse_cvttss2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvtsd2si64:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si:
|
|
|
|
case Intrinsic::x86_sse2_cvttsd2si64: {
|
|
|
|
// These intrinsics only demand the 0th element of their input vectors. If
|
2010-01-05 15:32:13 +08:00
|
|
|
// we can simplify the input based on that, do so now.
|
2015-09-19 19:41:53 +08:00
|
|
|
Value *Arg = II->getArgOperand(0);
|
|
|
|
unsigned VWidth = Arg->getType()->getVectorNumElements();
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
|
2010-06-29 00:50:57 +08:00
|
|
|
II->setArgOperand(0, V);
|
2010-01-05 15:32:13 +08:00
|
|
|
return II;
|
|
|
|
}
|
2015-08-05 16:18:00 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-06-07 16:18:35 +08:00
|
|
|
case Intrinsic::x86_mmx_pmovmskb:
|
|
|
|
case Intrinsic::x86_sse_movmsk_ps:
|
|
|
|
case Intrinsic::x86_sse2_movmsk_pd:
|
|
|
|
case Intrinsic::x86_sse2_pmovmskb_128:
|
|
|
|
case Intrinsic::x86_avx_movmsk_pd_256:
|
|
|
|
case Intrinsic::x86_avx_movmsk_ps_256:
|
|
|
|
case Intrinsic::x86_avx2_pmovmskb: {
|
|
|
|
if (Value *V = simplifyX86movmsk(*II, *Builder))
|
|
|
|
return replaceInstUsesWith(*II, V);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-02-21 07:17:35 +08:00
|
|
|
case Intrinsic::x86_sse_comieq_ss:
|
|
|
|
case Intrinsic::x86_sse_comige_ss:
|
|
|
|
case Intrinsic::x86_sse_comigt_ss:
|
|
|
|
case Intrinsic::x86_sse_comile_ss:
|
|
|
|
case Intrinsic::x86_sse_comilt_ss:
|
|
|
|
case Intrinsic::x86_sse_comineq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomieq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomige_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomigt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomile_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomilt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomineq_ss:
|
|
|
|
case Intrinsic::x86_sse2_comieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_comige_sd:
|
|
|
|
case Intrinsic::x86_sse2_comigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comile_sd:
|
|
|
|
case Intrinsic::x86_sse2_comilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comineq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomige_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomile_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomineq_sd: {
|
|
|
|
// These intrinsics only demand the 0th element of their input vectors. If
|
|
|
|
// we can simplify the input based on that, do so now.
|
2016-04-25 01:57:27 +08:00
|
|
|
bool MadeChange = false;
|
2016-02-21 07:17:35 +08:00
|
|
|
Value *Arg0 = II->getArgOperand(0);
|
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth = Arg0->getType()->getVectorNumElements();
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
|
|
|
|
II->setArgOperand(0, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2016-02-21 07:17:35 +08:00
|
|
|
}
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
|
|
|
|
II->setArgOperand(1, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2016-02-21 07:17:35 +08:00
|
|
|
}
|
2016-04-25 01:57:27 +08:00
|
|
|
if (MadeChange)
|
|
|
|
return II;
|
2016-02-21 07:17:35 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-04-25 02:12:42 +08:00
|
|
|
case Intrinsic::x86_sse_min_ss:
|
|
|
|
case Intrinsic::x86_sse_max_ss:
|
|
|
|
case Intrinsic::x86_sse_cmp_ss:
|
|
|
|
case Intrinsic::x86_sse2_min_sd:
|
|
|
|
case Intrinsic::x86_sse2_max_sd:
|
|
|
|
case Intrinsic::x86_sse2_cmp_sd: {
|
|
|
|
// These intrinsics only demand the lowest element of the second input
|
|
|
|
// vector.
|
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth = Arg1->getType()->getVectorNumElements();
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
|
|
|
|
II->setArgOperand(1, V);
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::x86_sse41_round_ss:
|
|
|
|
case Intrinsic::x86_sse41_round_sd: {
|
|
|
|
// These intrinsics demand the upper elements of the first input vector and
|
|
|
|
// the lowest element of the second input vector.
|
|
|
|
bool MadeChange = false;
|
|
|
|
Value *Arg0 = II->getArgOperand(0);
|
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth = Arg0->getType()->getVectorNumElements();
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsHigh(Arg0, VWidth, VWidth - 1)) {
|
|
|
|
II->setArgOperand(0, V);
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
|
|
|
|
II->setArgOperand(1, V);
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
if (MadeChange)
|
|
|
|
return II;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-08-11 04:21:15 +08:00
|
|
|
// Constant fold ashr( <A x Bi>, Ci ).
|
2015-08-13 15:39:03 +08:00
|
|
|
// Constant fold lshr( <A x Bi>, Ci ).
|
|
|
|
// Constant fold shl( <A x Bi>, Ci ).
|
2015-08-11 04:21:15 +08:00
|
|
|
case Intrinsic::x86_sse2_psrai_d:
|
|
|
|
case Intrinsic::x86_sse2_psrai_w:
|
|
|
|
case Intrinsic::x86_avx2_psrai_d:
|
|
|
|
case Intrinsic::x86_avx2_psrai_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psrai_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psrai_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psrai_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrai_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrai_w_512:
|
2015-08-05 16:18:00 +08:00
|
|
|
case Intrinsic::x86_sse2_psrli_d:
|
|
|
|
case Intrinsic::x86_sse2_psrli_q:
|
|
|
|
case Intrinsic::x86_sse2_psrli_w:
|
|
|
|
case Intrinsic::x86_avx2_psrli_d:
|
|
|
|
case Intrinsic::x86_avx2_psrli_q:
|
|
|
|
case Intrinsic::x86_avx2_psrli_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psrli_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrli_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrli_w_512:
|
2015-08-13 15:39:03 +08:00
|
|
|
case Intrinsic::x86_sse2_pslli_d:
|
|
|
|
case Intrinsic::x86_sse2_pslli_q:
|
|
|
|
case Intrinsic::x86_sse2_pslli_w:
|
|
|
|
case Intrinsic::x86_avx2_pslli_d:
|
|
|
|
case Intrinsic::x86_avx2_pslli_q:
|
|
|
|
case Intrinsic::x86_avx2_pslli_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_pslli_d_512:
|
|
|
|
case Intrinsic::x86_avx512_pslli_q_512:
|
|
|
|
case Intrinsic::x86_avx512_pslli_w_512:
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86immShift(*II, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-08-05 16:18:00 +08:00
|
|
|
break;
|
|
|
|
|
2015-08-13 15:39:03 +08:00
|
|
|
case Intrinsic::x86_sse2_psra_d:
|
|
|
|
case Intrinsic::x86_sse2_psra_w:
|
|
|
|
case Intrinsic::x86_avx2_psra_d:
|
|
|
|
case Intrinsic::x86_avx2_psra_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psra_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psra_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psra_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psra_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psra_w_512:
|
2015-08-13 15:39:03 +08:00
|
|
|
case Intrinsic::x86_sse2_psrl_d:
|
|
|
|
case Intrinsic::x86_sse2_psrl_q:
|
|
|
|
case Intrinsic::x86_sse2_psrl_w:
|
|
|
|
case Intrinsic::x86_avx2_psrl_d:
|
|
|
|
case Intrinsic::x86_avx2_psrl_q:
|
|
|
|
case Intrinsic::x86_avx2_psrl_w:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx512_psrl_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrl_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psrl_w_512:
|
2015-08-05 16:18:00 +08:00
|
|
|
case Intrinsic::x86_sse2_psll_d:
|
|
|
|
case Intrinsic::x86_sse2_psll_q:
|
|
|
|
case Intrinsic::x86_sse2_psll_w:
|
2014-04-24 08:58:18 +08:00
|
|
|
case Intrinsic::x86_avx2_psll_d:
|
|
|
|
case Intrinsic::x86_avx2_psll_q:
|
2016-11-13 09:51:55 +08:00
|
|
|
case Intrinsic::x86_avx2_psll_w:
|
|
|
|
case Intrinsic::x86_avx512_psll_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psll_q_512:
|
|
|
|
case Intrinsic::x86_avx512_psll_w_512: {
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86immShift(*II, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-08-13 15:39:03 +08:00
|
|
|
|
|
|
|
// SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
|
|
|
|
// operand to compute the shift amount.
|
2015-09-19 19:41:53 +08:00
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
|
|
|
assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
|
2015-08-13 15:39:03 +08:00
|
|
|
"Unexpected packed shift size");
|
2015-09-19 19:41:53 +08:00
|
|
|
unsigned VWidth = Arg1->getType()->getVectorNumElements();
|
2015-08-13 15:39:03 +08:00
|
|
|
|
2015-09-19 19:41:53 +08:00
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
|
2015-08-13 15:39:03 +08:00
|
|
|
II->setArgOperand(1, V);
|
|
|
|
return II;
|
|
|
|
}
|
2015-08-05 16:18:00 +08:00
|
|
|
break;
|
2015-08-13 15:39:03 +08:00
|
|
|
}
|
2015-08-05 16:18:00 +08:00
|
|
|
|
2016-06-07 18:27:15 +08:00
|
|
|
case Intrinsic::x86_avx2_psllv_d:
|
|
|
|
case Intrinsic::x86_avx2_psllv_d_256:
|
|
|
|
case Intrinsic::x86_avx2_psllv_q:
|
|
|
|
case Intrinsic::x86_avx2_psllv_q_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psllv_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psllv_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psllv_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psllv_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psllv_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
case Intrinsic::x86_avx2_psrav_d:
|
|
|
|
case Intrinsic::x86_avx2_psrav_d_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psrav_q_128:
|
|
|
|
case Intrinsic::x86_avx512_psrav_q_256:
|
|
|
|
case Intrinsic::x86_avx512_psrav_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrav_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psrav_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psrav_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psrav_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
case Intrinsic::x86_avx2_psrlv_d:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_d_256:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_q:
|
|
|
|
case Intrinsic::x86_avx2_psrlv_q_256:
|
2016-11-13 15:26:19 +08:00
|
|
|
case Intrinsic::x86_avx512_psrlv_d_512:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_q_512:
|
2016-11-18 14:04:33 +08:00
|
|
|
case Intrinsic::x86_avx512_psrlv_w_128:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_w_256:
|
|
|
|
case Intrinsic::x86_avx512_psrlv_w_512:
|
2016-06-07 18:27:15 +08:00
|
|
|
if (Value *V = simplifyX86varShift(*II, *Builder))
|
|
|
|
return replaceInstUsesWith(*II, V);
|
|
|
|
break;
|
|
|
|
|
2015-04-17 01:52:13 +08:00
|
|
|
case Intrinsic::x86_sse41_insertps:
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86insertps(*II, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-04-17 01:52:13 +08:00
|
|
|
break;
|
2015-07-26 04:41:00 +08:00
|
|
|
|
2015-09-18 04:32:45 +08:00
|
|
|
case Intrinsic::x86_sse4a_extrq: {
|
|
|
|
Value *Op0 = II->getArgOperand(0);
|
|
|
|
Value *Op1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth0 = Op0->getType()->getVectorNumElements();
|
|
|
|
unsigned VWidth1 = Op1->getType()->getVectorNumElements();
|
2015-10-17 19:40:05 +08:00
|
|
|
assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
|
|
|
|
Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
|
|
|
|
VWidth1 == 16 && "Unexpected operand sizes");
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
Constant *C1 = dyn_cast<Constant>(Op1);
|
|
|
|
ConstantInt *CILength =
|
2016-09-07 20:03:03 +08:00
|
|
|
C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
ConstantInt *CIIndex =
|
2016-09-07 20:03:03 +08:00
|
|
|
C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
|
|
|
|
// Attempt to simplify to a constant, shuffle vector or EXTRQI call.
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-09-18 04:32:45 +08:00
|
|
|
|
2015-10-17 19:40:05 +08:00
|
|
|
// EXTRQ only uses the lowest 64-bits of the first 128-bit vector
|
|
|
|
// operands and the lowest 16-bits of the second.
|
2016-04-25 01:57:27 +08:00
|
|
|
bool MadeChange = false;
|
2015-09-18 04:32:45 +08:00
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
|
|
|
|
II->setArgOperand(0, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2015-09-18 04:32:45 +08:00
|
|
|
}
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
|
|
|
|
II->setArgOperand(1, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2015-09-18 04:32:45 +08:00
|
|
|
}
|
2016-04-25 01:57:27 +08:00
|
|
|
if (MadeChange)
|
|
|
|
return II;
|
2015-09-18 04:32:45 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::x86_sse4a_extrqi: {
|
2015-10-17 19:40:05 +08:00
|
|
|
// EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
|
|
|
|
// bits of the lower 64-bits. The upper 64-bits are undefined.
|
|
|
|
Value *Op0 = II->getArgOperand(0);
|
|
|
|
unsigned VWidth = Op0->getType()->getVectorNumElements();
|
|
|
|
assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
|
|
|
|
"Unexpected operand size");
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
|
|
|
|
ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
|
2015-09-18 04:32:45 +08:00
|
|
|
|
2015-10-17 19:40:05 +08:00
|
|
|
// Attempt to simplify to a constant or shuffle vector.
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-10-17 19:40:05 +08:00
|
|
|
|
|
|
|
// EXTRQI only uses the lowest 64-bits of the first 128-bit vector
|
|
|
|
// operand.
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
|
2015-09-18 04:32:45 +08:00
|
|
|
II->setArgOperand(0, V);
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::x86_sse4a_insertq: {
|
2015-10-17 19:40:05 +08:00
|
|
|
Value *Op0 = II->getArgOperand(0);
|
|
|
|
Value *Op1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth = Op0->getType()->getVectorNumElements();
|
|
|
|
assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
|
|
|
|
Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
|
|
|
|
Op1->getType()->getVectorNumElements() == 2 &&
|
|
|
|
"Unexpected operand size");
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
Constant *C1 = dyn_cast<Constant>(Op1);
|
|
|
|
ConstantInt *CI11 =
|
2016-09-07 20:47:53 +08:00
|
|
|
C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
|
2015-10-17 19:40:05 +08:00
|
|
|
: nullptr;
|
|
|
|
|
|
|
|
// Attempt to simplify to a constant, shuffle vector or INSERTQI call.
|
|
|
|
if (CI11) {
|
2016-06-08 18:01:20 +08:00
|
|
|
const APInt &V11 = CI11->getValue();
|
2015-10-17 19:40:05 +08:00
|
|
|
APInt Len = V11.zextOrTrunc(6);
|
|
|
|
APInt Idx = V11.lshr(8).zextOrTrunc(6);
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-10-17 19:40:05 +08:00
|
|
|
}
|
2015-09-18 04:32:45 +08:00
|
|
|
|
2015-10-17 19:40:05 +08:00
|
|
|
// INSERTQ only uses the lowest 64-bits of the first 128-bit vector
|
|
|
|
// operand.
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
|
2015-09-18 04:32:45 +08:00
|
|
|
II->setArgOperand(0, V);
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-04-24 08:38:14 +08:00
|
|
|
case Intrinsic::x86_sse4a_insertqi: {
|
2015-10-17 19:40:05 +08:00
|
|
|
// INSERTQI: Extract lowest Length bits from lower half of second source and
|
|
|
|
// insert over first source starting at Index bit. The upper 64-bits are
|
|
|
|
// undefined.
|
2015-09-18 04:32:45 +08:00
|
|
|
Value *Op0 = II->getArgOperand(0);
|
|
|
|
Value *Op1 = II->getArgOperand(1);
|
|
|
|
unsigned VWidth0 = Op0->getType()->getVectorNumElements();
|
|
|
|
unsigned VWidth1 = Op1->getType()->getVectorNumElements();
|
2015-10-17 19:40:05 +08:00
|
|
|
assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
|
|
|
|
Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
|
|
|
|
VWidth1 == 2 && "Unexpected operand sizes");
|
|
|
|
|
|
|
|
// See if we're dealing with constant values.
|
|
|
|
ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
|
|
|
|
ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
|
|
|
|
|
|
|
|
// Attempt to simplify to a constant or shuffle vector.
|
|
|
|
if (CILength && CIIndex) {
|
|
|
|
APInt Len = CILength->getValue().zextOrTrunc(6);
|
|
|
|
APInt Idx = CIIndex->getValue().zextOrTrunc(6);
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-10-17 19:40:05 +08:00
|
|
|
}
|
2015-09-18 04:32:45 +08:00
|
|
|
|
2015-10-17 19:40:05 +08:00
|
|
|
// INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
|
|
|
|
// operands.
|
2016-04-25 01:57:27 +08:00
|
|
|
bool MadeChange = false;
|
2015-09-18 04:32:45 +08:00
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
|
|
|
|
II->setArgOperand(0, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2015-09-18 04:32:45 +08:00
|
|
|
}
|
|
|
|
if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
|
|
|
|
II->setArgOperand(1, V);
|
2016-04-25 01:57:27 +08:00
|
|
|
MadeChange = true;
|
2015-09-18 04:32:45 +08:00
|
|
|
}
|
2016-04-25 01:57:27 +08:00
|
|
|
if (MadeChange)
|
|
|
|
return II;
|
2014-04-24 08:38:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-05-27 11:42:20 +08:00
|
|
|
case Intrinsic::x86_sse41_pblendvb:
|
|
|
|
case Intrinsic::x86_sse41_blendvps:
|
|
|
|
case Intrinsic::x86_sse41_blendvpd:
|
|
|
|
case Intrinsic::x86_avx_blendv_ps_256:
|
|
|
|
case Intrinsic::x86_avx_blendv_pd_256:
|
|
|
|
case Intrinsic::x86_avx2_pblendvb: {
|
|
|
|
// Convert blendv* to vector selects if the mask is constant.
|
|
|
|
// This optimization is convoluted because the intrinsic is defined as
|
|
|
|
// getting a vector of floats or doubles for the ps and pd versions.
|
|
|
|
// FIXME: That should be changed.
|
2015-08-12 16:08:56 +08:00
|
|
|
|
|
|
|
Value *Op0 = II->getArgOperand(0);
|
|
|
|
Value *Op1 = II->getArgOperand(1);
|
2014-05-27 11:42:20 +08:00
|
|
|
Value *Mask = II->getArgOperand(2);
|
2015-08-12 16:08:56 +08:00
|
|
|
|
|
|
|
// fold (blend A, A, Mask) -> A
|
|
|
|
if (Op0 == Op1)
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, Op0);
|
2015-08-12 16:08:56 +08:00
|
|
|
|
|
|
|
// Zero Mask - select 1st argument.
|
2015-08-12 16:23:36 +08:00
|
|
|
if (isa<ConstantAggregateZero>(Mask))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, Op0);
|
2015-08-12 16:08:56 +08:00
|
|
|
|
|
|
|
// Constant Mask - select 1st/2nd argument lane based on top bit of mask.
|
2016-02-22 01:29:33 +08:00
|
|
|
if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
|
|
|
|
Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
|
2015-08-12 16:08:56 +08:00
|
|
|
return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
|
2014-05-27 11:42:20 +08:00
|
|
|
}
|
2015-08-12 16:08:56 +08:00
|
|
|
break;
|
2014-05-27 11:42:20 +08:00
|
|
|
}
|
|
|
|
|
2015-10-01 00:44:39 +08:00
|
|
|
case Intrinsic::x86_ssse3_pshuf_b_128:
|
2016-04-25 01:00:34 +08:00
|
|
|
case Intrinsic::x86_avx2_pshuf_b:
|
|
|
|
if (Value *V = simplifyX86pshufb(*II, *Builder))
|
|
|
|
return replaceInstUsesWith(*II, V);
|
|
|
|
break;
|
2015-10-01 00:44:39 +08:00
|
|
|
|
2014-04-22 06:06:04 +08:00
|
|
|
case Intrinsic::x86_avx_vpermilvar_ps:
|
|
|
|
case Intrinsic::x86_avx_vpermilvar_ps_256:
|
|
|
|
case Intrinsic::x86_avx_vpermilvar_pd:
|
2016-04-25 01:23:46 +08:00
|
|
|
case Intrinsic::x86_avx_vpermilvar_pd_256:
|
|
|
|
if (Value *V = simplifyX86vpermilvar(*II, *Builder))
|
|
|
|
return replaceInstUsesWith(*II, V);
|
|
|
|
break;
|
2014-04-22 06:06:04 +08:00
|
|
|
|
2016-05-02 00:41:22 +08:00
|
|
|
case Intrinsic::x86_avx2_permd:
|
|
|
|
case Intrinsic::x86_avx2_permps:
|
|
|
|
if (Value *V = simplifyX86vpermv(*II, *Builder))
|
|
|
|
return replaceInstUsesWith(*II, V);
|
|
|
|
break;
|
|
|
|
|
2015-03-21 05:47:56 +08:00
|
|
|
case Intrinsic::x86_avx_vperm2f128_pd_256:
|
|
|
|
case Intrinsic::x86_avx_vperm2f128_ps_256:
|
|
|
|
case Intrinsic::x86_avx_vperm2f128_si_256:
|
2015-03-25 06:39:29 +08:00
|
|
|
case Intrinsic::x86_avx2_vperm2i128:
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86vperm2(*II, *Builder))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-03-21 05:47:56 +08:00
|
|
|
break;
|
|
|
|
|
2016-03-01 07:16:48 +08:00
|
|
|
case Intrinsic::x86_avx_maskload_ps:
|
2016-03-01 07:59:00 +08:00
|
|
|
case Intrinsic::x86_avx_maskload_pd:
|
|
|
|
case Intrinsic::x86_avx_maskload_ps_256:
|
|
|
|
case Intrinsic::x86_avx_maskload_pd_256:
|
|
|
|
case Intrinsic::x86_avx2_maskload_d:
|
|
|
|
case Intrinsic::x86_avx2_maskload_q:
|
|
|
|
case Intrinsic::x86_avx2_maskload_d_256:
|
|
|
|
case Intrinsic::x86_avx2_maskload_q_256:
|
2016-03-01 07:16:48 +08:00
|
|
|
if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
|
|
|
|
return I;
|
|
|
|
break;
|
|
|
|
|
2016-03-12 23:16:59 +08:00
|
|
|
case Intrinsic::x86_sse2_maskmov_dqu:
|
[x86, InstCombine] transform x86 AVX masked stores to LLVM intrinsics
The intended effect of this patch in conjunction with:
http://reviews.llvm.org/rL259392
http://reviews.llvm.org/rL260145
is that customers using the AVX intrinsics in C will benefit from combines when
the store mask is constant:
void mstore_zero_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0), v);
}
void mstore_fake_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(1), v);
}
void mstore_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0x80000000), v);
}
void mstore_one_set_elt_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set_epi32(0x80000000, 0, 0, 0), v);
}
...so none of the above will actually generate a masked store for optimized code.
Differential Revision: http://reviews.llvm.org/D17485
llvm-svn: 262064
2016-02-27 05:04:14 +08:00
|
|
|
case Intrinsic::x86_avx_maskstore_ps:
|
|
|
|
case Intrinsic::x86_avx_maskstore_pd:
|
|
|
|
case Intrinsic::x86_avx_maskstore_ps_256:
|
|
|
|
case Intrinsic::x86_avx_maskstore_pd_256:
|
2016-02-27 05:51:44 +08:00
|
|
|
case Intrinsic::x86_avx2_maskstore_d:
|
|
|
|
case Intrinsic::x86_avx2_maskstore_q:
|
|
|
|
case Intrinsic::x86_avx2_maskstore_d_256:
|
|
|
|
case Intrinsic::x86_avx2_maskstore_q_256:
|
[x86, InstCombine] transform x86 AVX masked stores to LLVM intrinsics
The intended effect of this patch in conjunction with:
http://reviews.llvm.org/rL259392
http://reviews.llvm.org/rL260145
is that customers using the AVX intrinsics in C will benefit from combines when
the store mask is constant:
void mstore_zero_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0), v);
}
void mstore_fake_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(1), v);
}
void mstore_ones_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set1_epi32(0x80000000), v);
}
void mstore_one_set_elt_mask(float *f, __m128 v) {
_mm_maskstore_ps(f, _mm_set_epi32(0x80000000, 0, 0, 0), v);
}
...so none of the above will actually generate a masked store for optimized code.
Differential Revision: http://reviews.llvm.org/D17485
llvm-svn: 262064
2016-02-27 05:04:14 +08:00
|
|
|
if (simplifyX86MaskedStore(*II, *this))
|
|
|
|
return nullptr;
|
|
|
|
break;
|
|
|
|
|
2015-10-11 22:38:34 +08:00
|
|
|
case Intrinsic::x86_xop_vpcomb:
|
|
|
|
case Intrinsic::x86_xop_vpcomd:
|
|
|
|
case Intrinsic::x86_xop_vpcomq:
|
|
|
|
case Intrinsic::x86_xop_vpcomw:
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86vpcom(*II, *Builder, true))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-10-11 22:38:34 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Intrinsic::x86_xop_vpcomub:
|
|
|
|
case Intrinsic::x86_xop_vpcomud:
|
|
|
|
case Intrinsic::x86_xop_vpcomuq:
|
|
|
|
case Intrinsic::x86_xop_vpcomuw:
|
2016-01-30 07:27:03 +08:00
|
|
|
if (Value *V = simplifyX86vpcom(*II, *Builder, false))
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(*II, V);
|
2015-10-11 22:38:34 +08:00
|
|
|
break;
|
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
case Intrinsic::ppc_altivec_vperm:
|
|
|
|
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
|
2014-06-06 03:46:04 +08:00
|
|
|
// Note that ppc_altivec_vperm has a big-endian bias, so when creating
|
|
|
|
// a vectorshuffle for little endian, we must undo the transformation
|
|
|
|
// performed on vec_perm in altivec.h. That is, we must complement
|
|
|
|
// the permutation mask with respect to 31 and reverse the order of
|
|
|
|
// V1 and V2.
|
2012-01-27 11:08:05 +08:00
|
|
|
if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
|
|
|
|
assert(Mask->getType()->getVectorNumElements() == 16 &&
|
|
|
|
"Bad type for intrinsic!");
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Check that all of the elements are integer constants or undefs.
|
|
|
|
bool AllEltsOk = true;
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
2012-01-27 11:08:05 +08:00
|
|
|
Constant *Elt = Mask->getAggregateElement(i);
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
|
2010-01-05 15:32:13 +08:00
|
|
|
AllEltsOk = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
if (AllEltsOk) {
|
|
|
|
// Cast the input vectors to byte vectors.
|
2010-07-22 18:37:47 +08:00
|
|
|
Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
|
|
|
|
Mask->getType());
|
|
|
|
Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
|
|
|
|
Mask->getType());
|
2010-01-05 15:32:13 +08:00
|
|
|
Value *Result = UndefValue::get(Op0->getType());
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Only extract each element once.
|
|
|
|
Value *ExtractedElts[32];
|
|
|
|
memset(ExtractedElts, 0, sizeof(ExtractedElts));
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
2012-01-27 11:08:05 +08:00
|
|
|
if (isa<UndefValue>(Mask->getAggregateElement(i)))
|
2010-01-05 15:32:13 +08:00
|
|
|
continue;
|
2012-02-03 08:07:04 +08:00
|
|
|
unsigned Idx =
|
2012-01-27 11:08:05 +08:00
|
|
|
cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
|
2010-01-05 15:32:13 +08:00
|
|
|
Idx &= 31; // Match the hardware behavior.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.isLittleEndian())
|
2014-06-06 03:46:04 +08:00
|
|
|
Idx = 31 - Idx;
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!ExtractedElts[Idx]) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
|
|
|
|
Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
|
2012-02-03 08:07:04 +08:00
|
|
|
ExtractedElts[Idx] =
|
2014-06-06 03:46:04 +08:00
|
|
|
Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
|
2011-09-28 04:39:19 +08:00
|
|
|
Builder->getInt32(Idx&15));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Insert this value into the result vector.
|
|
|
|
Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
|
2011-09-28 04:39:19 +08:00
|
|
|
Builder->getInt32(i));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
return CastInst::Create(Instruction::BitCast, Result, CI.getType());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-10-23 05:41:48 +08:00
|
|
|
case Intrinsic::arm_neon_vld1:
|
|
|
|
case Intrinsic::arm_neon_vld2:
|
|
|
|
case Intrinsic::arm_neon_vld3:
|
|
|
|
case Intrinsic::arm_neon_vld4:
|
|
|
|
case Intrinsic::arm_neon_vld2lane:
|
|
|
|
case Intrinsic::arm_neon_vld3lane:
|
|
|
|
case Intrinsic::arm_neon_vld4lane:
|
|
|
|
case Intrinsic::arm_neon_vst1:
|
|
|
|
case Intrinsic::arm_neon_vst2:
|
|
|
|
case Intrinsic::arm_neon_vst3:
|
|
|
|
case Intrinsic::arm_neon_vst4:
|
|
|
|
case Intrinsic::arm_neon_vst2lane:
|
|
|
|
case Intrinsic::arm_neon_vst3lane:
|
|
|
|
case Intrinsic::arm_neon_vst4lane: {
|
2016-08-05 09:06:44 +08:00
|
|
|
unsigned MemAlign =
|
|
|
|
getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
2010-10-23 05:41:48 +08:00
|
|
|
unsigned AlignArg = II->getNumArgOperands() - 1;
|
|
|
|
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
|
|
|
|
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
|
|
|
|
II->setArgOperand(AlignArg,
|
|
|
|
ConstantInt::get(Type::getInt32Ty(II->getContext()),
|
|
|
|
MemAlign, false));
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-05-01 08:20:38 +08:00
|
|
|
case Intrinsic::arm_neon_vmulls:
|
2014-03-29 18:18:08 +08:00
|
|
|
case Intrinsic::arm_neon_vmullu:
|
2014-05-24 20:50:23 +08:00
|
|
|
case Intrinsic::aarch64_neon_smull:
|
|
|
|
case Intrinsic::aarch64_neon_umull: {
|
2012-05-01 08:20:38 +08:00
|
|
|
Value *Arg0 = II->getArgOperand(0);
|
|
|
|
Value *Arg1 = II->getArgOperand(1);
|
|
|
|
|
|
|
|
// Handle mul by zero first:
|
|
|
|
if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
|
2012-05-01 08:20:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for constant LHS & RHS - in this case we just simplify.
|
2014-03-29 18:18:08 +08:00
|
|
|
bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
|
2014-05-24 20:50:23 +08:00
|
|
|
II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
|
2012-05-01 08:20:38 +08:00
|
|
|
VectorType *NewVT = cast<VectorType>(II->getType());
|
2014-02-14 02:23:24 +08:00
|
|
|
if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
|
|
|
|
if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
|
|
|
|
CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
|
|
|
|
CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
|
|
|
|
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
|
2012-05-01 08:20:38 +08:00
|
|
|
}
|
|
|
|
|
2014-01-25 01:20:08 +08:00
|
|
|
// Couldn't simplify - canonicalize constant to the RHS.
|
2012-05-01 08:20:38 +08:00
|
|
|
std::swap(Arg0, Arg1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle mul by one:
|
2014-02-14 02:23:24 +08:00
|
|
|
if (Constant *CV1 = dyn_cast<Constant>(Arg1))
|
2012-05-01 08:20:38 +08:00
|
|
|
if (ConstantInt *Splat =
|
2014-02-14 02:23:24 +08:00
|
|
|
dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
|
|
|
|
if (Splat->isOne())
|
|
|
|
return CastInst::CreateIntegerCast(Arg0, II->getType(),
|
|
|
|
/*isSigned=*/!Zext);
|
2012-05-01 08:20:38 +08:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-01-23 05:30:34 +08:00
|
|
|
case Intrinsic::amdgcn_rcp: {
|
2014-06-19 09:19:19 +08:00
|
|
|
if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
|
|
|
|
const APFloat &ArgVal = C->getValueAPF();
|
|
|
|
APFloat Val(ArgVal.getSemantics(), 1.0);
|
|
|
|
APFloat::opStatus Status = Val.divide(ArgVal,
|
|
|
|
APFloat::rmNearestTiesToEven);
|
|
|
|
// Only do this if it was exact and therefore not dependent on the
|
|
|
|
// rounding mode.
|
|
|
|
if (Status == APFloat::opOK)
|
2016-02-02 06:23:39 +08:00
|
|
|
return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
|
2014-06-19 09:19:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2016-03-31 06:28:52 +08:00
|
|
|
case Intrinsic::amdgcn_frexp_mant:
|
|
|
|
case Intrinsic::amdgcn_frexp_exp: {
|
2016-03-31 06:28:26 +08:00
|
|
|
Value *Src = II->getArgOperand(0);
|
|
|
|
if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
|
|
|
|
int Exp;
|
|
|
|
APFloat Significand = frexp(C->getValueAPF(), Exp,
|
|
|
|
APFloat::rmNearestTiesToEven);
|
|
|
|
|
2016-03-31 06:28:52 +08:00
|
|
|
if (II->getIntrinsicID() == Intrinsic::amdgcn_frexp_mant) {
|
|
|
|
return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
|
|
|
|
Significand));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Match instruction special case behavior.
|
|
|
|
if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
|
|
|
|
Exp = 0;
|
|
|
|
|
|
|
|
return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<UndefValue>(Src))
|
|
|
|
return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
|
2016-03-31 06:28:26 +08:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2016-09-03 15:06:58 +08:00
|
|
|
case Intrinsic::amdgcn_class: {
|
|
|
|
enum {
|
|
|
|
S_NAN = 1 << 0, // Signaling NaN
|
|
|
|
Q_NAN = 1 << 1, // Quiet NaN
|
|
|
|
N_INFINITY = 1 << 2, // Negative infinity
|
|
|
|
N_NORMAL = 1 << 3, // Negative normal
|
|
|
|
N_SUBNORMAL = 1 << 4, // Negative subnormal
|
|
|
|
N_ZERO = 1 << 5, // Negative zero
|
|
|
|
P_ZERO = 1 << 6, // Positive zero
|
|
|
|
P_SUBNORMAL = 1 << 7, // Positive subnormal
|
|
|
|
P_NORMAL = 1 << 8, // Positive normal
|
|
|
|
P_INFINITY = 1 << 9 // Positive infinity
|
|
|
|
};
|
|
|
|
|
|
|
|
const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
|
|
|
|
N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
|
|
|
|
|
|
|
|
Value *Src0 = II->getArgOperand(0);
|
|
|
|
Value *Src1 = II->getArgOperand(1);
|
|
|
|
const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
|
|
|
|
if (!CMask) {
|
|
|
|
if (isa<UndefValue>(Src0))
|
|
|
|
return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
|
|
|
|
|
|
|
if (isa<UndefValue>(Src1))
|
|
|
|
return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Mask = CMask->getZExtValue();
|
|
|
|
|
|
|
|
// If all tests are made, it doesn't matter what the value is.
|
|
|
|
if ((Mask & FullMask) == FullMask)
|
|
|
|
return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
|
|
|
|
|
|
|
|
if ((Mask & FullMask) == 0)
|
|
|
|
return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
|
|
|
|
|
|
|
|
if (Mask == (S_NAN | Q_NAN)) {
|
|
|
|
// Equivalent of isnan. Replace with standard fcmp.
|
|
|
|
Value *FCmp = Builder->CreateFCmpUNO(Src0, Src0);
|
|
|
|
FCmp->takeName(II);
|
|
|
|
return replaceInstUsesWith(*II, FCmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
|
|
|
|
if (!CVal) {
|
|
|
|
if (isa<UndefValue>(Src0))
|
|
|
|
return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
|
|
|
|
|
|
|
// Clamp mask to used bits
|
|
|
|
if ((Mask & FullMask) != Mask) {
|
|
|
|
CallInst *NewCall = Builder->CreateCall(II->getCalledFunction(),
|
|
|
|
{ Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
|
|
|
|
);
|
|
|
|
|
|
|
|
NewCall->takeName(II);
|
|
|
|
return replaceInstUsesWith(*II, NewCall);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
const APFloat &Val = CVal->getValueAPF();
|
|
|
|
|
|
|
|
bool Result =
|
|
|
|
((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
|
|
|
|
((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
|
|
|
|
((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
|
|
|
|
((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
|
|
|
|
((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
|
|
|
|
((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
|
|
|
|
((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
|
|
|
|
((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
|
|
|
|
((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
|
|
|
|
((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
|
|
|
|
|
|
|
|
return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
case Intrinsic::stackrestore: {
|
|
|
|
// If the save is right next to the restore, remove the restore. This can
|
|
|
|
// happen when variable allocas are DCE'd.
|
2010-06-24 20:58:35 +08:00
|
|
|
if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
|
2010-01-05 15:32:13 +08:00
|
|
|
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
|
2015-10-14 00:59:33 +08:00
|
|
|
if (&*++SS->getIterator() == II)
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(CI);
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Scan down this block to see if there is another stack restore in the
|
|
|
|
// same block without an intervening call/alloca.
|
2015-10-14 00:59:33 +08:00
|
|
|
BasicBlock::iterator BI(II);
|
2010-01-05 15:32:13 +08:00
|
|
|
TerminatorInst *TI = II->getParent()->getTerminator();
|
|
|
|
bool CannotRemove = false;
|
|
|
|
for (++BI; &*BI != TI; ++BI) {
|
2012-06-21 23:45:28 +08:00
|
|
|
if (isa<AllocaInst>(BI)) {
|
2010-01-05 15:32:13 +08:00
|
|
|
CannotRemove = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
|
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
|
|
|
|
// If there is a stackrestore below this one, remove this one.
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::stackrestore)
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(CI);
|
2016-02-27 08:53:54 +08:00
|
|
|
|
|
|
|
// Bail if we cross over an intrinsic with side effects, such as
|
|
|
|
// llvm.stacksave, llvm.read_register, or llvm.setjmp.
|
|
|
|
if (II->mayHaveSideEffects()) {
|
|
|
|
CannotRemove = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
} else {
|
|
|
|
// If we found a non-intrinsic call, we can't remove the stack
|
|
|
|
// restore.
|
|
|
|
CannotRemove = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2011-07-31 14:30:59 +08:00
|
|
|
// If the stack restore is in a return, resume, or unwind block and if there
|
|
|
|
// are no allocas or calls between the restore and the return, nuke the
|
|
|
|
// restore.
|
2012-02-07 05:16:41 +08:00
|
|
|
if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(CI);
|
2010-01-05 15:32:13 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-07-29 06:50:48 +08:00
|
|
|
case Intrinsic::lifetime_start:
|
2016-07-29 06:59:03 +08:00
|
|
|
// Asan needs to poison memory to detect invalid access which is possible
|
|
|
|
// even for empty lifetime range.
|
|
|
|
if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
|
|
|
|
break;
|
|
|
|
|
2016-05-10 17:24:49 +08:00
|
|
|
if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
|
|
|
|
Intrinsic::lifetime_end, *this))
|
|
|
|
return nullptr;
|
[InstCombine] Remove trivially empty lifetime start/end ranges.
Summary:
Some passes may open up opportunities for optimizations, leaving empty
lifetime start/end ranges. For example, with the following code:
void foo(char *, char *);
void bar(int Size, bool flag) {
for (int i = 0; i < Size; ++i) {
char text[1];
char buff[1];
if (flag)
foo(text, buff); // BBFoo
}
}
the loop unswitch pass will create 2 versions of the loop, one with
flag==true, and the other one with flag==false, but always leaving
the BBFoo basic block, with lifetime ranges covering the scope of the for
loop. Simplify CFG will then remove BBFoo in the case where flag==false,
but will leave the lifetime markers.
This patch teaches InstCombine to remove trivially empty lifetime marker
ranges, that is ranges ending right after they were started (ignoring
debug info or other lifetime markers in the range).
This fixes PR24598: excessive compile time after r234581.
Reviewers: reames, chandlerc
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D13305
llvm-svn: 249018
2015-10-01 22:54:31 +08:00
|
|
|
break;
|
2014-07-26 05:45:17 +08:00
|
|
|
case Intrinsic::assume: {
|
2016-04-09 00:37:12 +08:00
|
|
|
Value *IIOperand = II->getArgOperand(0);
|
|
|
|
// Remove an assume if it is immediately followed by an identical assume.
|
|
|
|
if (match(II->getNextNode(),
|
|
|
|
m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
|
|
|
|
return eraseInstFromFunction(CI);
|
|
|
|
|
2014-07-26 05:45:17 +08:00
|
|
|
// Canonicalize assume(a && b) -> assume(a); assume(b);
|
2014-09-07 20:44:26 +08:00
|
|
|
// Note: New assumption intrinsics created here are registered by
|
|
|
|
// the InstCombineIRInserter object.
|
2016-04-09 00:37:12 +08:00
|
|
|
Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
|
2014-07-26 05:45:17 +08:00
|
|
|
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
|
|
|
|
Builder->CreateCall(AssumeIntrinsic, A, II->getName());
|
|
|
|
Builder->CreateCall(AssumeIntrinsic, B, II->getName());
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*II);
|
2014-07-26 05:45:17 +08:00
|
|
|
}
|
|
|
|
// assume(!(a || b)) -> assume(!a); assume(!b);
|
|
|
|
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
|
2014-09-07 20:44:26 +08:00
|
|
|
Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
|
|
|
|
II->getName());
|
|
|
|
Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
|
|
|
|
II->getName());
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*II);
|
2014-07-26 05:45:17 +08:00
|
|
|
}
|
2014-10-05 05:27:06 +08:00
|
|
|
|
2014-11-12 07:33:19 +08:00
|
|
|
// assume( (load addr) != null ) -> add 'nonnull' metadata to load
|
|
|
|
// (if assume is valid at the load)
|
|
|
|
if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
|
|
|
|
Value *LHS = ICmp->getOperand(0);
|
|
|
|
Value *RHS = ICmp->getOperand(1);
|
|
|
|
if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
|
|
|
|
isa<LoadInst>(LHS) &&
|
|
|
|
isa<Constant>(RHS) &&
|
|
|
|
RHS->getType()->isPointerTy() &&
|
|
|
|
cast<Constant>(RHS)->isNullValue()) {
|
|
|
|
LoadInst* LI = cast<LoadInst>(LHS);
|
2016-08-05 09:06:44 +08:00
|
|
|
if (isValidAssumeForContext(II, LI, &DT)) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
MDNode *MD = MDNode::get(II->getContext(), None);
|
2014-11-12 07:33:19 +08:00
|
|
|
LI->setMetadata(LLVMContext::MD_nonnull, MD);
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*II);
|
2014-11-12 07:33:19 +08:00
|
|
|
}
|
|
|
|
}
|
2015-02-10 16:07:32 +08:00
|
|
|
// TODO: apply nonnull return attributes to calls and invokes
|
2014-11-12 07:33:19 +08:00
|
|
|
// TODO: apply range metadata for range check patterns?
|
|
|
|
}
|
2014-10-05 05:27:06 +08:00
|
|
|
// If there is a dominating assume with the same condition as this one,
|
|
|
|
// then this one is redundant, and should be removed.
|
2014-10-05 08:53:02 +08:00
|
|
|
APInt KnownZero(1, 0), KnownOne(1, 0);
|
|
|
|
computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
|
|
|
|
if (KnownOne.isAllOnesValue())
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*II);
|
2014-10-05 05:27:06 +08:00
|
|
|
|
2014-07-26 05:45:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-12-30 07:27:30 +08:00
|
|
|
case Intrinsic::experimental_gc_relocate: {
|
|
|
|
// Translate facts known about a pointer before relocating into
|
|
|
|
// facts about the relocate value, while being careful to
|
|
|
|
// preserve relocation semantics.
|
2016-01-05 12:03:00 +08:00
|
|
|
Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
|
2014-12-30 07:27:30 +08:00
|
|
|
|
|
|
|
// Remove the relocation if unused, note that this check is required
|
|
|
|
// to prevent the cases below from looping forever.
|
|
|
|
if (II->use_empty())
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*II);
|
2014-12-30 07:27:30 +08:00
|
|
|
|
|
|
|
// Undef is undef, even after relocation.
|
|
|
|
// TODO: provide a hook for this in GCStrategy. This is clearly legal for
|
|
|
|
// most practical collectors, but there was discussion in the review thread
|
|
|
|
// about whether it was legal for all possible collectors.
|
2016-02-10 05:09:22 +08:00
|
|
|
if (isa<UndefValue>(DerivedPtr))
|
|
|
|
// Use undef of gc_relocate's type to replace it.
|
|
|
|
return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
|
|
|
|
|
|
|
if (auto *PT = dyn_cast<PointerType>(II->getType())) {
|
|
|
|
// The relocation of null will be null for most any collector.
|
|
|
|
// TODO: provide a hook for this in GCStrategy. There might be some
|
|
|
|
// weird collector this property does not hold for.
|
|
|
|
if (isa<ConstantPointerNull>(DerivedPtr))
|
|
|
|
// Use null-pointer of gc_relocate's type to replace it.
|
|
|
|
return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
|
2016-04-25 01:00:34 +08:00
|
|
|
|
2016-02-10 05:09:22 +08:00
|
|
|
// isKnownNonNull -> nonnull attribute
|
2016-08-05 09:06:44 +08:00
|
|
|
if (isKnownNonNullAt(DerivedPtr, II, &DT))
|
2016-02-10 05:09:22 +08:00
|
|
|
II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
|
2015-02-15 03:37:54 +08:00
|
|
|
}
|
2014-12-30 07:27:30 +08:00
|
|
|
|
|
|
|
// TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
|
|
|
|
// Canonicalize on the type from the uses to the defs
|
2015-02-15 03:37:54 +08:00
|
|
|
|
2014-12-30 07:27:30 +08:00
|
|
|
// TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
|
2016-02-10 05:09:22 +08:00
|
|
|
break;
|
2014-12-30 07:27:30 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return visitCallSite(II);
|
|
|
|
}
|
|
|
|
|
|
|
|
// InvokeInst simplification
|
|
|
|
//
|
|
|
|
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
|
|
|
|
return visitCallSite(&II);
|
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// If this cast does not affect the value passed through the varargs area, we
|
|
|
|
/// can eliminate the use of the cast.
|
2010-01-05 15:32:13 +08:00
|
|
|
static bool isSafeToEliminateVarargsCast(const CallSite CS,
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL,
|
|
|
|
const CastInst *const CI,
|
2010-01-05 15:32:13 +08:00
|
|
|
const int ix) {
|
|
|
|
if (!CI->isLosslessCast())
|
|
|
|
return false;
|
|
|
|
|
[Statepoints 3/4] Statepoint infrastructure for garbage collection: SelectionDAGBuilder
This is the third patch in a small series. It contains the CodeGen support for lowering the gc.statepoint intrinsic sequences (223078) to the STATEPOINT pseudo machine instruction (223085). The change also includes the set of helper routines and classes for working with gc.statepoints, gc.relocates, and gc.results since the lowering code uses them.
With this change, gc.statepoints should be functionally complete. The documentation will follow in the fourth change, and there will likely be some cleanup changes, but interested parties can start experimenting now.
I'm not particularly happy with the amount of code or complexity involved with the lowering step, but at least it's fairly well isolated. The statepoint lowering code is split into it's own files and anyone not working on the statepoint support itself should be able to ignore it.
During the lowering process, we currently spill aggressively to stack. This is not entirely ideal (and we have plans to do better), but it's functional, relatively straight forward, and matches closely the implementations of the patchpoint intrinsics. Most of the complexity comes from trying to keep relocated copies of values in the same stack slots across statepoints. Doing so avoids the insertion of pointless load and store instructions to reshuffle the stack. The current implementation isn't as effective as I'd like, but it is functional and 'good enough' for many common use cases.
In the long term, I'd like to figure out how to integrate the statepoint lowering with the register allocator. In principal, we shouldn't need to eagerly spill at all. The register allocator should do any spilling required and the statepoint should simply record that fact. Depending on how challenging that turns out to be, we may invest in a smarter global stack slot assignment mechanism as a stop gap measure.
Reviewed by: atrick, ributzka
llvm-svn: 223137
2014-12-03 02:50:36 +08:00
|
|
|
// If this is a GC intrinsic, avoid munging types. We need types for
|
|
|
|
// statepoint reconstruction in SelectionDAG.
|
|
|
|
// TODO: This is probably something which should be expanded to all
|
|
|
|
// intrinsics since the entire point of intrinsics is that
|
|
|
|
// they are understandable by the optimizer.
|
|
|
|
if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
|
|
|
|
return false;
|
|
|
|
|
2014-01-28 10:38:36 +08:00
|
|
|
// The size of ByVal or InAlloca arguments is derived from the type, so we
|
2010-01-05 15:32:13 +08:00
|
|
|
// can't change to a type with a different size. If the size were
|
|
|
|
// passed explicitly we could avoid this check.
|
2014-01-28 10:38:36 +08:00
|
|
|
if (!CS.isByValOrInAllocaArgument(ix))
|
2010-01-05 15:32:13 +08:00
|
|
|
return true;
|
|
|
|
|
2012-02-03 08:07:04 +08:00
|
|
|
Type* SrcTy =
|
2010-01-05 15:32:13 +08:00
|
|
|
cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
|
2010-01-05 15:32:13 +08:00
|
|
|
if (!SrcTy->isSized() || !DstTy->isSized())
|
|
|
|
return false;
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
|
2010-01-05 15:32:13 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!CI->getCalledFunction()) return nullptr;
|
2010-03-06 18:50:38 +08:00
|
|
|
|
2015-01-21 19:23:40 +08:00
|
|
|
auto InstCombineRAUW = [this](Instruction *From, Value *With) {
|
2016-02-02 06:23:39 +08:00
|
|
|
replaceInstUsesWith(*From, With);
|
2015-01-21 19:23:40 +08:00
|
|
|
};
|
2016-08-05 09:06:44 +08:00
|
|
|
LibCallSimplifier Simplifier(DL, &TLI, InstCombineRAUW);
|
2015-01-21 19:23:40 +08:00
|
|
|
if (Value *With = Simplifier.optimizeCall(CI)) {
|
2012-11-30 12:05:06 +08:00
|
|
|
++NumSimplified;
|
2016-02-02 06:23:39 +08:00
|
|
|
return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
|
2012-11-30 12:05:06 +08:00
|
|
|
}
|
2012-10-14 00:45:24 +08:00
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-03-06 18:50:38 +08:00
|
|
|
}
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
|
2011-09-06 21:37:06 +08:00
|
|
|
// Strip off at most one level of pointer casts, looking for an alloca. This
|
|
|
|
// is good enough in practice and simpler than handling any number of casts.
|
|
|
|
Value *Underlying = TrampMem->stripPointerCasts();
|
|
|
|
if (Underlying != TrampMem &&
|
2014-03-09 11:16:01 +08:00
|
|
|
(!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
if (!isa<AllocaInst>(Underlying))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
IntrinsicInst *InitTrampoline = nullptr;
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : TrampMem->users()) {
|
|
|
|
IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
|
2011-09-06 21:37:06 +08:00
|
|
|
if (!II)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
|
|
|
|
if (InitTrampoline)
|
|
|
|
// More than one init_trampoline writes to this value. Give up.
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
InitTrampoline = II;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
|
|
|
|
// Allow any number of calls to adjust.trampoline.
|
|
|
|
continue;
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// No call to init.trampoline found.
|
|
|
|
if (!InitTrampoline)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
|
|
|
|
// Check that the alloca is being used in the expected way.
|
|
|
|
if (InitTrampoline->getOperand(0) != TrampMem)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
|
|
|
|
return InitTrampoline;
|
|
|
|
}
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
|
2011-09-06 21:37:06 +08:00
|
|
|
Value *TrampMem) {
|
|
|
|
// Visit all the previous instructions in the basic block, and try to find a
|
|
|
|
// init.trampoline which has a direct path to the adjust.trampoline.
|
2015-10-14 00:59:33 +08:00
|
|
|
for (BasicBlock::iterator I = AdjustTramp->getIterator(),
|
|
|
|
E = AdjustTramp->getParent()->begin();
|
|
|
|
I != E;) {
|
|
|
|
Instruction *Inst = &*--I;
|
2011-09-06 21:37:06 +08:00
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
|
|
|
|
II->getOperand(0) == TrampMem)
|
|
|
|
return II;
|
|
|
|
if (Inst->mayWriteToMemory())
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
}
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Given a call to llvm.adjust.trampoline, find and return the corresponding
|
|
|
|
// call to llvm.init.trampoline if the call to the trampoline can be optimized
|
|
|
|
// to a direct call to a function. Otherwise return NULL.
|
|
|
|
//
|
2016-01-30 07:27:03 +08:00
|
|
|
static IntrinsicInst *findInitTrampoline(Value *Callee) {
|
2011-09-06 21:37:06 +08:00
|
|
|
Callee = Callee->stripPointerCasts();
|
|
|
|
IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
|
|
|
|
if (!AdjustTramp ||
|
|
|
|
AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
|
|
|
|
Value *TrampMem = AdjustTramp->getOperand(0);
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
|
2011-09-06 21:37:06 +08:00
|
|
|
return IT;
|
2016-01-30 07:27:03 +08:00
|
|
|
if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
|
2011-09-06 21:37:06 +08:00
|
|
|
return IT;
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-09-06 21:37:06 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// Improvements for call and invoke instructions.
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
2016-08-05 09:06:44 +08:00
|
|
|
if (isAllocLikeFn(CS.getInstruction(), &TLI))
|
2012-07-10 02:38:20 +08:00
|
|
|
return visitAllocSite(*CS.getInstruction());
|
2012-06-22 05:25:05 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
2015-06-17 04:24:25 +08:00
|
|
|
// Mark any parameters that are known to be non-null with the nonnull
|
|
|
|
// attribute. This is helpful for inlining calls to functions with null
|
|
|
|
// checks on their arguments.
|
2015-12-02 14:58:49 +08:00
|
|
|
SmallVector<unsigned, 4> Indices;
|
2015-06-17 04:24:25 +08:00
|
|
|
unsigned ArgNo = 0;
|
2015-12-02 14:58:49 +08:00
|
|
|
|
2015-06-17 04:24:25 +08:00
|
|
|
for (Value *V : CS.args()) {
|
2016-01-30 07:14:58 +08:00
|
|
|
if (V->getType()->isPointerTy() &&
|
|
|
|
!CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
|
2016-08-05 09:06:44 +08:00
|
|
|
isKnownNonNullAt(V, CS.getInstruction(), &DT))
|
2015-12-02 14:58:49 +08:00
|
|
|
Indices.push_back(ArgNo + 1);
|
2015-06-17 04:24:25 +08:00
|
|
|
ArgNo++;
|
|
|
|
}
|
2015-12-02 14:58:49 +08:00
|
|
|
|
2015-06-17 04:24:25 +08:00
|
|
|
assert(ArgNo == CS.arg_size() && "sanity check");
|
|
|
|
|
2015-12-02 14:58:49 +08:00
|
|
|
if (!Indices.empty()) {
|
|
|
|
AttributeSet AS = CS.getAttributes();
|
|
|
|
LLVMContext &Ctx = CS.getInstruction()->getContext();
|
|
|
|
AS = AS.addAttribute(Ctx, Indices,
|
|
|
|
Attribute::get(Ctx, Attribute::NonNull));
|
|
|
|
CS.setAttributes(AS);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2010-12-20 16:25:06 +08:00
|
|
|
// If the callee is a pointer to a function, attempt to move any casts to the
|
|
|
|
// arguments of the call/invoke.
|
2010-01-05 15:32:13 +08:00
|
|
|
Value *Callee = CS.getCalledValue();
|
2010-12-20 16:25:06 +08:00
|
|
|
if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2016-03-15 04:18:54 +08:00
|
|
|
if (Function *CalleeF = dyn_cast<Function>(Callee)) {
|
|
|
|
// Remove the convergent attr on calls when the callee is not convergent.
|
2016-06-21 03:04:44 +08:00
|
|
|
if (CS.isConvergent() && !CalleeF->isConvergent() &&
|
|
|
|
!CalleeF->isIntrinsic()) {
|
2016-03-15 04:18:54 +08:00
|
|
|
DEBUG(dbgs() << "Removing convergent attr from instr "
|
|
|
|
<< CS.getInstruction() << "\n");
|
|
|
|
CS.setNotConvergent();
|
|
|
|
return CS.getInstruction();
|
|
|
|
}
|
|
|
|
|
2010-02-02 02:11:34 +08:00
|
|
|
// If the call and callee calling conventions don't match, this call must
|
|
|
|
// be unreachable, as the call is undefined.
|
|
|
|
if (CalleeF->getCallingConv() != CS.getCallingConv() &&
|
|
|
|
// Only do this for calls to a function with a body. A prototype may
|
|
|
|
// not actually end up matching the implementation's calling conv for a
|
|
|
|
// variety of reasons (e.g. it may be written in assembly).
|
|
|
|
!CalleeF->isDeclaration()) {
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *OldCall = CS.getInstruction();
|
|
|
|
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
|
2012-02-03 08:07:04 +08:00
|
|
|
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
|
2010-01-05 15:32:13 +08:00
|
|
|
OldCall);
|
2012-12-13 08:18:46 +08:00
|
|
|
// If OldCall does not return void then replaceAllUsesWith undef.
|
2010-01-05 15:32:13 +08:00
|
|
|
// This allows ValueHandlers and custom metadata to adjust itself.
|
|
|
|
if (!OldCall->getType()->isVoidTy())
|
2016-02-02 06:23:39 +08:00
|
|
|
replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
|
2010-02-02 02:04:58 +08:00
|
|
|
if (isa<CallInst>(OldCall))
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*OldCall);
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-02-02 02:04:58 +08:00
|
|
|
// We cannot remove an invoke, because it would change the CFG, just
|
|
|
|
// change the callee to a null pointer.
|
2010-03-21 05:00:25 +08:00
|
|
|
cast<InvokeInst>(OldCall)->setCalledFunction(
|
2010-02-02 02:04:58 +08:00
|
|
|
Constant::getNullValue(CalleeF->getType()));
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
2016-03-15 04:18:54 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
|
2010-06-24 20:58:35 +08:00
|
|
|
// If CS does not return void then replaceAllUsesWith undef.
|
2010-01-05 15:32:13 +08:00
|
|
|
// This allows ValueHandlers and custom metadata to adjust itself.
|
|
|
|
if (!CS.getInstruction()->getType()->isVoidTy())
|
2016-02-02 06:23:39 +08:00
|
|
|
replaceInstUsesWith(*CS.getInstruction(),
|
2011-05-18 08:32:01 +08:00
|
|
|
UndefValue::get(CS.getInstruction()->getType()));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2012-06-22 07:52:14 +08:00
|
|
|
if (isa<InvokeInst>(CS.getInstruction())) {
|
|
|
|
// Can't remove an invoke because we cannot change the CFG.
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
2012-06-22 07:52:14 +08:00
|
|
|
|
|
|
|
// This instruction is not reachable, just remove it. We insert a store to
|
|
|
|
// undef so that we know that this code is not reachable, despite the fact
|
|
|
|
// that we can't modify the CFG here.
|
|
|
|
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
|
|
|
|
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
|
|
|
|
CS.getInstruction());
|
|
|
|
|
2016-02-02 06:23:39 +08:00
|
|
|
return eraseInstFromFunction(*CS.getInstruction());
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
2016-01-30 07:27:03 +08:00
|
|
|
if (IntrinsicInst *II = findInitTrampoline(Callee))
|
2011-09-06 21:37:06 +08:00
|
|
|
return transformCallThroughTrampoline(CS, II);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PTy = cast<PointerType>(Callee->getType());
|
|
|
|
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
2010-01-05 15:32:13 +08:00
|
|
|
if (FTy->isVarArg()) {
|
2011-11-29 09:18:23 +08:00
|
|
|
int ix = FTy->getNumParams();
|
2010-01-05 15:32:13 +08:00
|
|
|
// See if we can optimize any arguments passed through the varargs area of
|
|
|
|
// the call.
|
2013-06-28 08:25:40 +08:00
|
|
|
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
|
2010-01-05 15:32:13 +08:00
|
|
|
E = CS.arg_end(); I != E; ++I, ++ix) {
|
|
|
|
CastInst *CI = dyn_cast<CastInst>(*I);
|
2015-03-10 10:37:25 +08:00
|
|
|
if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
|
2010-01-05 15:32:13 +08:00
|
|
|
*I = CI->getOperand(0);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
|
|
|
|
// Inline asm calls cannot throw - mark them 'nounwind'.
|
|
|
|
CS.setDoesNotThrow();
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2012-10-09 00:38:25 +08:00
|
|
|
// Try to optimize the call if possible, we require DataLayout for most of
|
2010-03-06 18:50:38 +08:00
|
|
|
// this. None of these calls are seen as possibly dead so go ahead and
|
|
|
|
// delete the instruction now.
|
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
|
2015-03-10 10:37:25 +08:00
|
|
|
Instruction *I = tryOptimizeCall(CI);
|
2010-03-06 18:59:25 +08:00
|
|
|
// If we changed something return the result, etc. Otherwise let
|
|
|
|
// the fallthrough check.
|
2016-02-02 06:23:39 +08:00
|
|
|
if (I) return eraseInstFromFunction(*I);
|
2010-03-06 18:50:38 +08:00
|
|
|
}
|
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
return Changed ? CS.getInstruction() : nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// If the callee is a constexpr cast of a function, attempt to move the cast to
|
|
|
|
/// the arguments of the call/invoke.
|
2010-01-05 15:32:13 +08:00
|
|
|
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
2016-08-11 23:21:21 +08:00
|
|
|
auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!Callee)
|
2010-01-05 15:32:13 +08:00
|
|
|
return false;
|
2016-08-11 23:23:56 +08:00
|
|
|
|
|
|
|
// The prototype of a thunk is a lie. Don't directly call such a function.
|
2015-01-22 06:32:04 +08:00
|
|
|
if (Callee->hasFnAttribute("thunk"))
|
|
|
|
return false;
|
2016-08-11 23:23:56 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *Caller = CS.getInstruction();
|
2012-12-08 07:16:57 +08:00
|
|
|
const AttributeSet &CallerPAL = CS.getAttributes();
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Okay, this is a cast from a function to a different type. Unless doing so
|
|
|
|
// would cause a type conversion of one of our arguments, change this call to
|
|
|
|
// be a direct call with arguments casted to the appropriate types.
|
|
|
|
//
|
2011-07-18 12:54:35 +08:00
|
|
|
FunctionType *FT = Callee->getFunctionType();
|
|
|
|
Type *OldRetTy = Caller->getType();
|
|
|
|
Type *NewRetTy = FT->getReturnType();
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Check to see if we are changing the return type...
|
|
|
|
if (OldRetTy != NewRetTy) {
|
2014-01-19 06:47:12 +08:00
|
|
|
|
|
|
|
if (NewRetTy->isStructTy())
|
|
|
|
return false; // TODO: Handle multiple return values.
|
|
|
|
|
2015-01-06 16:41:31 +08:00
|
|
|
if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
|
2013-09-18 05:10:14 +08:00
|
|
|
if (Callee->isDeclaration())
|
|
|
|
return false; // Cannot transform this return value.
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2013-09-18 05:10:14 +08:00
|
|
|
if (!Caller->use_empty() &&
|
|
|
|
// void -> non-void is handled specially
|
|
|
|
!NewRetTy->isVoidTy())
|
2014-10-23 12:08:42 +08:00
|
|
|
return false; // Cannot transform this return value.
|
2013-09-18 05:10:14 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
|
2013-01-19 05:53:16 +08:00
|
|
|
AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
|
2015-05-07 07:19:56 +08:00
|
|
|
if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
|
2010-01-05 15:32:13 +08:00
|
|
|
return false; // Attribute not compatible with transformed value.
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the callsite is an invoke instruction, and the return value is used by
|
|
|
|
// a PHI node in a successor, we cannot change the return type of the call
|
|
|
|
// because there is no place to put the cast instruction (without breaking
|
|
|
|
// the critical edge). Bail out in this case.
|
|
|
|
if (!Caller->use_empty())
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : II->users())
|
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(U))
|
2010-01-05 15:32:13 +08:00
|
|
|
if (PN->getParent() == II->getNormalDest() ||
|
|
|
|
PN->getParent() == II->getUnwindDest())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-06-28 08:25:40 +08:00
|
|
|
unsigned NumActualArgs = CS.arg_size();
|
2010-01-05 15:32:13 +08:00
|
|
|
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
|
|
|
|
|
2015-01-06 16:41:31 +08:00
|
|
|
// Prevent us turning:
|
|
|
|
// declare void @takes_i32_inalloca(i32* inalloca)
|
|
|
|
// call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
|
|
|
|
//
|
|
|
|
// into:
|
|
|
|
// call void @takes_i32_inalloca(i32* null)
|
2015-03-12 02:03:05 +08:00
|
|
|
//
|
|
|
|
// Similarly, avoid folding away bitcasts of byval calls.
|
|
|
|
if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
|
|
|
|
Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
|
2015-01-06 16:41:31 +08:00
|
|
|
return false;
|
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
CallSite::arg_iterator AI = CS.arg_begin();
|
|
|
|
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ParamTy = FT->getParamType(i);
|
|
|
|
Type *ActTy = (*AI)->getType();
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2015-01-06 16:41:31 +08:00
|
|
|
if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
|
2010-01-05 15:32:13 +08:00
|
|
|
return false; // Cannot transform this parameter value.
|
|
|
|
|
2013-01-23 14:14:59 +08:00
|
|
|
if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
|
2015-05-07 07:19:56 +08:00
|
|
|
overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
|
2010-01-05 15:32:13 +08:00
|
|
|
return false; // Attribute not compatible with transformed value.
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2014-01-28 10:38:36 +08:00
|
|
|
if (CS.isInAllocaArgument(i))
|
|
|
|
return false; // Cannot transform to and from inalloca.
|
|
|
|
|
2010-12-20 16:36:38 +08:00
|
|
|
// If the parameter is passed as a byval argument, then we have to have a
|
|
|
|
// sized type and the sized type has to have the same size as the old type.
|
2013-01-23 14:14:59 +08:00
|
|
|
if (ParamTy != ActTy &&
|
|
|
|
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
|
|
|
|
Attribute::ByVal)) {
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!ParamPTy || !ParamPTy->getElementType()->isSized())
|
2010-12-20 16:36:38 +08:00
|
|
|
return false;
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2013-09-28 06:18:51 +08:00
|
|
|
Type *CurElTy = ActTy->getPointerElementType();
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.getTypeAllocSize(CurElTy) !=
|
|
|
|
DL.getTypeAllocSize(ParamPTy->getElementType()))
|
2010-12-20 16:36:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
2011-02-24 13:10:56 +08:00
|
|
|
if (Callee->isDeclaration()) {
|
|
|
|
// Do not delete arguments unless we have a function body.
|
|
|
|
if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the callee is just a declaration, don't change the varargsness of the
|
|
|
|
// call. We don't want to introduce a varargs call where one doesn't
|
|
|
|
// already exist.
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
|
2011-02-24 13:10:56 +08:00
|
|
|
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
|
|
|
|
return false;
|
2012-02-03 08:00:55 +08:00
|
|
|
|
|
|
|
// If both the callee and the cast type are varargs, we still have to make
|
|
|
|
// sure the number of fixed parameters are the same or we have the same
|
|
|
|
// ABI issues as if we introduce a varargs call.
|
2012-02-03 08:26:07 +08:00
|
|
|
if (FT->isVarArg() &&
|
|
|
|
cast<FunctionType>(APTy->getElementType())->isVarArg() &&
|
|
|
|
FT->getNumParams() !=
|
2012-02-03 08:00:55 +08:00
|
|
|
cast<FunctionType>(APTy->getElementType())->getNumParams())
|
|
|
|
return false;
|
2011-02-24 13:10:56 +08:00
|
|
|
}
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2012-02-03 08:00:50 +08:00
|
|
|
if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
|
|
|
|
!CallerPAL.isEmpty())
|
|
|
|
// In this case we have more arguments than the new function type, but we
|
|
|
|
// won't be dropping them. Check that these extra arguments have attributes
|
|
|
|
// that are compatible with being a vararg call argument.
|
|
|
|
for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
|
2013-01-26 07:09:36 +08:00
|
|
|
unsigned Index = CallerPAL.getSlotIndex(i - 1);
|
|
|
|
if (Index <= FT->getNumParams())
|
2012-02-03 08:00:50 +08:00
|
|
|
break;
|
2013-01-26 07:09:36 +08:00
|
|
|
|
2012-12-19 16:57:40 +08:00
|
|
|
// Check if it has an attribute that's incompatible with varargs.
|
2013-01-26 07:09:36 +08:00
|
|
|
AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
|
|
|
|
if (PAttrs.hasAttribute(Index, Attribute::StructRet))
|
2012-02-03 08:00:50 +08:00
|
|
|
return false;
|
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2012-02-03 08:07:04 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
// Okay, we decided that this is a safe thing to do: go ahead and start
|
2011-02-24 13:10:56 +08:00
|
|
|
// inserting cast instructions as necessary.
|
2010-01-05 15:32:13 +08:00
|
|
|
std::vector<Value*> Args;
|
|
|
|
Args.reserve(NumActualArgs);
|
2013-01-27 10:08:22 +08:00
|
|
|
SmallVector<AttributeSet, 8> attrVec;
|
2010-01-05 15:32:13 +08:00
|
|
|
attrVec.reserve(NumCommonArgs);
|
|
|
|
|
|
|
|
// Get any return attributes.
|
2013-01-19 05:53:16 +08:00
|
|
|
AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// If the return value is not being used, the type may not be compatible
|
|
|
|
// with the existing attributes. Wipe out any problematic attributes.
|
2015-05-07 07:19:56 +08:00
|
|
|
RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Add the new return attributes.
|
2012-10-09 08:01:21 +08:00
|
|
|
if (RAttrs.hasAttributes())
|
2013-01-27 10:08:22 +08:00
|
|
|
attrVec.push_back(AttributeSet::get(Caller->getContext(),
|
|
|
|
AttributeSet::ReturnIndex, RAttrs));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
AI = CS.arg_begin();
|
|
|
|
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ParamTy = FT->getParamType(i);
|
2013-07-31 04:45:05 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
if ((*AI)->getType() == ParamTy) {
|
|
|
|
Args.push_back(*AI);
|
|
|
|
} else {
|
2015-01-06 16:41:31 +08:00
|
|
|
Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add any parameter attributes.
|
2013-01-23 14:14:59 +08:00
|
|
|
AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
|
2012-10-14 16:54:26 +08:00
|
|
|
if (PAttrs.hasAttributes())
|
2013-01-27 10:08:22 +08:00
|
|
|
attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
|
|
|
|
PAttrs));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the function takes more arguments than the call was taking, add them
|
|
|
|
// now.
|
|
|
|
for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
|
|
|
|
Args.push_back(Constant::getNullValue(FT->getParamType(i)));
|
|
|
|
|
|
|
|
// If we are removing arguments to the function, emit an obnoxious warning.
|
|
|
|
if (FT->getNumParams() < NumActualArgs) {
|
2012-12-27 06:00:35 +08:00
|
|
|
// TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
|
|
|
|
if (FT->isVarArg()) {
|
2010-01-05 15:32:13 +08:00
|
|
|
// Add all of the arguments in their promoted form to the arg list.
|
|
|
|
for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *PTy = getPromotedType((*AI)->getType());
|
2010-01-05 15:32:13 +08:00
|
|
|
if (PTy != (*AI)->getType()) {
|
|
|
|
// Must promote to pass through va_arg area!
|
|
|
|
Instruction::CastOps opcode =
|
|
|
|
CastInst::getCastOpcode(*AI, false, PTy, false);
|
2011-09-28 04:39:19 +08:00
|
|
|
Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
|
2010-01-05 15:32:13 +08:00
|
|
|
} else {
|
|
|
|
Args.push_back(*AI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add any parameter attributes.
|
2013-01-23 14:14:59 +08:00
|
|
|
AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
|
2012-10-14 16:54:26 +08:00
|
|
|
if (PAttrs.hasAttributes())
|
2013-01-27 10:08:22 +08:00
|
|
|
attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
|
|
|
|
PAttrs));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-22 05:57:28 +08:00
|
|
|
AttributeSet FnAttrs = CallerPAL.getFnAttributes();
|
2013-01-19 05:11:39 +08:00
|
|
|
if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
|
2013-01-27 10:08:22 +08:00
|
|
|
attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
if (NewRetTy->isVoidTy())
|
|
|
|
Caller->setName(""); // Void type should not have a name.
|
|
|
|
|
2012-12-08 07:16:57 +08:00
|
|
|
const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
|
2013-01-22 05:57:28 +08:00
|
|
|
attrVec);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2015-11-25 08:42:19 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
2015-11-25 08:42:24 +08:00
|
|
|
CS.getOperandBundlesAsDefs(OpBundles);
|
2015-11-25 08:42:19 +08:00
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *NC;
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
|
2015-11-25 08:42:19 +08:00
|
|
|
NC = Builder->CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(),
|
|
|
|
Args, OpBundles);
|
2011-05-18 09:28:27 +08:00
|
|
|
NC->takeName(II);
|
2010-01-05 15:32:13 +08:00
|
|
|
cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
|
|
|
|
cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
|
|
|
|
} else {
|
|
|
|
CallInst *CI = cast<CallInst>(Caller);
|
2015-11-25 08:42:19 +08:00
|
|
|
NC = Builder->CreateCall(Callee, Args, OpBundles);
|
2011-05-18 09:28:27 +08:00
|
|
|
NC->takeName(CI);
|
2016-11-26 06:35:09 +08:00
|
|
|
cast<CallInst>(NC)->setTailCallKind(CI->getTailCallKind());
|
2010-01-05 15:32:13 +08:00
|
|
|
cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
|
|
|
|
cast<CallInst>(NC)->setAttributes(NewCallerPAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a cast of the return type as necessary.
|
|
|
|
Value *NV = NC;
|
|
|
|
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
|
|
|
|
if (!NV->getType()->isVoidTy()) {
|
2015-01-06 16:41:31 +08:00
|
|
|
NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
|
2011-05-27 08:19:40 +08:00
|
|
|
NC->setDebugLoc(Caller->getDebugLoc());
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// If this is an invoke instruction, we should insert it after the first
|
|
|
|
// non-phi, instruction in the normal successor block.
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
|
2011-08-25 09:08:34 +08:00
|
|
|
BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
|
2010-01-05 15:32:13 +08:00
|
|
|
InsertNewInstBefore(NC, *I);
|
|
|
|
} else {
|
2010-12-20 16:25:06 +08:00
|
|
|
// Otherwise, it's a call, just insert cast right after the call.
|
2010-01-05 15:32:13 +08:00
|
|
|
InsertNewInstBefore(NC, *Caller);
|
|
|
|
}
|
|
|
|
Worklist.AddUsersToWorkList(*Caller);
|
|
|
|
} else {
|
|
|
|
NV = UndefValue::get(Caller->getType());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Caller->use_empty())
|
2016-02-02 06:23:39 +08:00
|
|
|
replaceInstUsesWith(*Caller, NV);
|
2014-10-23 12:08:42 +08:00
|
|
|
else if (Caller->hasValueHandle()) {
|
|
|
|
if (OldRetTy == NV->getType())
|
|
|
|
ValueHandleBase::ValueIsRAUWd(Caller, NV);
|
|
|
|
else
|
|
|
|
// We cannot call ValueIsRAUWd with a different type, and the
|
|
|
|
// actual tracked value will disappear.
|
|
|
|
ValueHandleBase::ValueIsDeleted(Caller);
|
|
|
|
}
|
2011-05-18 08:32:01 +08:00
|
|
|
|
2016-02-02 06:23:39 +08:00
|
|
|
eraseInstFromFunction(*Caller);
|
2010-01-05 15:32:13 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-21 06:24:38 +08:00
|
|
|
/// Turn a call to a function created by init_trampoline / adjust_trampoline
|
|
|
|
/// intrinsic pair into a direct call to the underlying function.
|
2011-09-06 21:37:06 +08:00
|
|
|
Instruction *
|
|
|
|
InstCombiner::transformCallThroughTrampoline(CallSite CS,
|
|
|
|
IntrinsicInst *Tramp) {
|
2010-01-05 15:32:13 +08:00
|
|
|
Value *Callee = CS.getCalledValue();
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PTy = cast<PointerType>(Callee->getType());
|
|
|
|
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
|
2012-12-08 07:16:57 +08:00
|
|
|
const AttributeSet &Attrs = CS.getAttributes();
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// If the call already has the 'nest' attribute somewhere then give up -
|
|
|
|
// otherwise 'nest' would occur twice after splicing in the chain.
|
2012-12-31 08:49:59 +08:00
|
|
|
if (Attrs.hasAttrSomewhere(Attribute::Nest))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2011-09-06 21:37:06 +08:00
|
|
|
assert(Tramp &&
|
|
|
|
"transformCallThroughTrampoline called with incorrect CallSite.");
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2010-07-22 18:37:47 +08:00
|
|
|
Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
|
2016-01-17 04:30:46 +08:00
|
|
|
FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2012-12-08 07:16:57 +08:00
|
|
|
const AttributeSet &NestAttrs = NestF->getAttributes();
|
2010-01-05 15:32:13 +08:00
|
|
|
if (!NestAttrs.isEmpty()) {
|
|
|
|
unsigned NestIdx = 1;
|
2014-04-25 13:29:35 +08:00
|
|
|
Type *NestTy = nullptr;
|
2013-01-23 14:14:59 +08:00
|
|
|
AttributeSet NestAttr;
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// Look for a parameter marked with the 'nest' attribute.
|
|
|
|
for (FunctionType::param_iterator I = NestFTy->param_begin(),
|
|
|
|
E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
|
2013-01-23 14:14:59 +08:00
|
|
|
if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
|
2010-01-05 15:32:13 +08:00
|
|
|
// Record the parameter type and any other attributes.
|
|
|
|
NestTy = *I;
|
|
|
|
NestAttr = NestAttrs.getParamAttributes(NestIdx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NestTy) {
|
|
|
|
Instruction *Caller = CS.getInstruction();
|
|
|
|
std::vector<Value*> NewArgs;
|
2013-06-28 08:25:40 +08:00
|
|
|
NewArgs.reserve(CS.arg_size() + 1);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2013-01-27 10:08:22 +08:00
|
|
|
SmallVector<AttributeSet, 8> NewAttrs;
|
2010-01-05 15:32:13 +08:00
|
|
|
NewAttrs.reserve(Attrs.getNumSlots() + 1);
|
|
|
|
|
|
|
|
// Insert the nest argument into the call argument list, which may
|
|
|
|
// mean appending it. Likewise for attributes.
|
|
|
|
|
|
|
|
// Add any result attributes.
|
2013-01-19 05:53:16 +08:00
|
|
|
if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
|
2013-01-27 10:08:22 +08:00
|
|
|
NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
|
|
|
|
Attrs.getRetAttributes()));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
unsigned Idx = 1;
|
|
|
|
CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
|
|
|
|
do {
|
|
|
|
if (Idx == NestIdx) {
|
|
|
|
// Add the chain argument and attributes.
|
2010-06-24 20:58:35 +08:00
|
|
|
Value *NestVal = Tramp->getArgOperand(2);
|
2010-01-05 15:32:13 +08:00
|
|
|
if (NestVal->getType() != NestTy)
|
2011-05-19 07:58:37 +08:00
|
|
|
NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
|
2010-01-05 15:32:13 +08:00
|
|
|
NewArgs.push_back(NestVal);
|
2013-01-27 10:08:22 +08:00
|
|
|
NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
|
|
|
|
NestAttr));
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (I == E)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Add the original argument and attributes.
|
|
|
|
NewArgs.push_back(*I);
|
2013-01-23 14:14:59 +08:00
|
|
|
AttributeSet Attr = Attrs.getParamAttributes(Idx);
|
|
|
|
if (Attr.hasAttributes(Idx)) {
|
2013-01-27 10:08:22 +08:00
|
|
|
AttrBuilder B(Attr, Idx);
|
|
|
|
NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
|
|
|
|
Idx + (Idx >= NestIdx), B));
|
2013-01-23 14:14:59 +08:00
|
|
|
}
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2016-02-19 06:09:30 +08:00
|
|
|
++Idx;
|
|
|
|
++I;
|
2016-08-12 01:20:18 +08:00
|
|
|
} while (true);
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add any function attributes.
|
2013-01-19 05:11:39 +08:00
|
|
|
if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
|
2013-01-27 10:08:22 +08:00
|
|
|
NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
|
|
|
|
Attrs.getFnAttributes()));
|
2010-01-05 15:32:13 +08:00
|
|
|
|
|
|
|
// The trampoline may have been bitcast to a bogus type (FTy).
|
|
|
|
// Handle this by synthesizing a new function type, equal to FTy
|
|
|
|
// with the chain parameter inserted.
|
|
|
|
|
2011-07-12 22:06:48 +08:00
|
|
|
std::vector<Type*> NewTypes;
|
2010-01-05 15:32:13 +08:00
|
|
|
NewTypes.reserve(FTy->getNumParams()+1);
|
|
|
|
|
|
|
|
// Insert the chain's type into the list of parameter types, which may
|
|
|
|
// mean appending it.
|
|
|
|
{
|
|
|
|
unsigned Idx = 1;
|
|
|
|
FunctionType::param_iterator I = FTy->param_begin(),
|
|
|
|
E = FTy->param_end();
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (Idx == NestIdx)
|
|
|
|
// Add the chain's type.
|
|
|
|
NewTypes.push_back(NestTy);
|
|
|
|
|
|
|
|
if (I == E)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Add the original type.
|
|
|
|
NewTypes.push_back(*I);
|
|
|
|
|
2016-02-19 06:09:30 +08:00
|
|
|
++Idx;
|
|
|
|
++I;
|
2016-08-12 01:20:18 +08:00
|
|
|
} while (true);
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replace the trampoline call with a direct call. Let the generic
|
|
|
|
// code sort out any function type mismatches.
|
2012-02-03 08:07:04 +08:00
|
|
|
FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
|
2010-01-05 15:32:13 +08:00
|
|
|
FTy->isVarArg());
|
|
|
|
Constant *NewCallee =
|
|
|
|
NestF->getType() == PointerType::getUnqual(NewFTy) ?
|
2012-02-03 08:07:04 +08:00
|
|
|
NestF : ConstantExpr::getBitCast(NestF,
|
2010-01-05 15:32:13 +08:00
|
|
|
PointerType::getUnqual(NewFTy));
|
2013-04-06 05:20:12 +08:00
|
|
|
const AttributeSet &NewPAL =
|
|
|
|
AttributeSet::get(FTy->getContext(), NewAttrs);
|
2010-01-05 15:32:13 +08:00
|
|
|
|
2016-04-29 16:07:20 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
CS.getOperandBundlesAsDefs(OpBundles);
|
|
|
|
|
2010-01-05 15:32:13 +08:00
|
|
|
Instruction *NewCaller;
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
|
|
|
|
NewCaller = InvokeInst::Create(NewCallee,
|
|
|
|
II->getNormalDest(), II->getUnwindDest(),
|
2016-04-29 16:07:20 +08:00
|
|
|
NewArgs, OpBundles);
|
2010-01-05 15:32:13 +08:00
|
|
|
cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
|
|
|
|
cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
|
|
|
|
} else {
|
2016-04-29 16:07:20 +08:00
|
|
|
NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
|
2016-11-26 06:35:09 +08:00
|
|
|
cast<CallInst>(NewCaller)->setTailCallKind(
|
|
|
|
cast<CallInst>(Caller)->getTailCallKind());
|
|
|
|
cast<CallInst>(NewCaller)->setCallingConv(
|
|
|
|
cast<CallInst>(Caller)->getCallingConv());
|
2010-01-05 15:32:13 +08:00
|
|
|
cast<CallInst>(NewCaller)->setAttributes(NewPAL);
|
|
|
|
}
|
2011-05-19 03:57:14 +08:00
|
|
|
|
|
|
|
return NewCaller;
|
2010-01-05 15:32:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace the trampoline call with a direct call. Since there is no 'nest'
|
|
|
|
// parameter, there is no need to adjust the argument list. Let the generic
|
|
|
|
// code sort out any function type mismatches.
|
|
|
|
Constant *NewCallee =
|
2012-02-03 08:07:04 +08:00
|
|
|
NestF->getType() == PTy ? NestF :
|
2010-01-05 15:32:13 +08:00
|
|
|
ConstantExpr::getBitCast(NestF, PTy);
|
|
|
|
CS.setCalledFunction(NewCallee);
|
|
|
|
return CS.getInstruction();
|
|
|
|
}
|