forked from OSchip/llvm-project
Revert r312154 "Re-enable "[MachineCopyPropagation] Extend pass to do COPY source forwarding""
It caused PR34387: Assertion failed: (RegNo < NumRegs && "Attempting to access record for invalid register number!") > Issues identified by buildbots addressed since original review: > - Fixed ARMLoadStoreOptimizer bug exposed by this change in r311907. > - The pass no longer forwards COPYs to physical register uses, since > doing so can break code that implicitly relies on the physical > register number of the use. > - The pass no longer forwards COPYs to undef uses, since doing so > can break the machine verifier by creating LiveRanges that don't > end on a use (since the undef operand is not considered a use). > > [MachineCopyPropagation] Extend pass to do COPY source forwarding > > This change extends MachineCopyPropagation to do COPY source forwarding. > > This change also extends the MachineCopyPropagation pass to be able to > be run during register allocation, after physical registers have been > assigned, but before the virtual registers have been re-written, which > allows it to remove virtual register COPY LiveIntervals that become dead > through the forwarding of all of their uses. llvm-svn: 312178
This commit is contained in:
parent
01d0265106
commit
24775a0a6c
|
@ -278,11 +278,6 @@ namespace llvm {
|
|||
/// MachineSinking - This pass performs sinking on machine instructions.
|
||||
extern char &MachineSinkingID;
|
||||
|
||||
/// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation
|
||||
/// on machine instructions after register allocation but before virtual
|
||||
/// register re-writing..
|
||||
extern char &MachineCopyPropagationPreRegRewriteID;
|
||||
|
||||
/// MachineCopyPropagation - This pass performs copy propagation on
|
||||
/// machine instructions.
|
||||
extern char &MachineCopyPropagationID;
|
||||
|
|
|
@ -232,7 +232,6 @@ void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
|
|||
void initializeMachineCSEPass(PassRegistry&);
|
||||
void initializeMachineCombinerPass(PassRegistry&);
|
||||
void initializeMachineCopyPropagationPass(PassRegistry&);
|
||||
void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&);
|
||||
void initializeMachineDominanceFrontierPass(PassRegistry&);
|
||||
void initializeMachineDominatorTreePass(PassRegistry&);
|
||||
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
|
||||
|
|
|
@ -53,7 +53,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
|
|||
initializeMachineCSEPass(Registry);
|
||||
initializeMachineCombinerPass(Registry);
|
||||
initializeMachineCopyPropagationPass(Registry);
|
||||
initializeMachineCopyPropagationPreRegRewritePass(Registry);
|
||||
initializeMachineDominatorTreePass(Registry);
|
||||
initializeMachineFunctionPrinterPassPass(Registry);
|
||||
initializeMachineLICMPass(Registry);
|
||||
|
|
|
@ -7,71 +7,25 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This is a simple MachineInstr-level copy forwarding pass. It may be run at
|
||||
// two places in the codegen pipeline:
|
||||
// - After register allocation but before virtual registers have been remapped
|
||||
// to physical registers.
|
||||
// - After physical register remapping.
|
||||
//
|
||||
// The optimizations done vary slightly based on whether virtual registers are
|
||||
// still present. In both cases, this pass forwards the source of COPYs to the
|
||||
// users of their destinations when doing so is legal. For example:
|
||||
//
|
||||
// %vreg1 = COPY %vreg0
|
||||
// ...
|
||||
// ... = OP %vreg1
|
||||
//
|
||||
// If
|
||||
// - the physical register assigned to %vreg0 has not been clobbered by the
|
||||
// time of the use of %vreg1
|
||||
// - the register class constraints are satisfied
|
||||
// - the COPY def is the only value that reaches OP
|
||||
// then this pass replaces the above with:
|
||||
//
|
||||
// %vreg1 = COPY %vreg0
|
||||
// ...
|
||||
// ... = OP %vreg0
|
||||
//
|
||||
// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals).
|
||||
// COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if
|
||||
// all uses of %vreg1 are changed to %vreg0) are removed.
|
||||
//
|
||||
// When being run with only physical registers, this pass will also remove some
|
||||
// redundant COPYs. For example:
|
||||
//
|
||||
// %R1 = COPY %R0
|
||||
// ... // No clobber of %R1
|
||||
// %R0 = COPY %R1 <<< Removed
|
||||
//
|
||||
// or
|
||||
//
|
||||
// %R1 = COPY %R0
|
||||
// ... // No clobber of %R0
|
||||
// %R1 = COPY %R0 <<< Removed
|
||||
// This is an extremely simple MachineInstr-level copy propagation pass.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "LiveDebugVariables.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/CodeGen/LiveRangeEdit.h"
|
||||
#include "llvm/CodeGen/LiveStackAnalysis.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/CodeGen/MachineOperand.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/CodeGen/VirtRegMap.h"
|
||||
#include "llvm/MC/MCRegisterInfo.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/DebugCounter.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
@ -84,9 +38,6 @@ using namespace llvm;
|
|||
#define DEBUG_TYPE "machine-cp"
|
||||
|
||||
STATISTIC(NumDeletes, "Number of dead copies deleted");
|
||||
STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
|
||||
DEBUG_COUNTER(FwdCounter, "machine-cp-fwd",
|
||||
"Controls which register COPYs are forwarded");
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -94,42 +45,19 @@ using RegList = SmallVector<unsigned, 4>;
|
|||
using SourceMap = DenseMap<unsigned, RegList>;
|
||||
using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
|
||||
|
||||
class MachineCopyPropagation : public MachineFunctionPass,
|
||||
private LiveRangeEdit::Delegate {
|
||||
class MachineCopyPropagation : public MachineFunctionPass {
|
||||
const TargetRegisterInfo *TRI;
|
||||
const TargetInstrInfo *TII;
|
||||
MachineRegisterInfo *MRI;
|
||||
MachineFunction *MF;
|
||||
SlotIndexes *Indexes;
|
||||
LiveIntervals *LIS;
|
||||
const VirtRegMap *VRM;
|
||||
// True if this pass being run before virtual registers are remapped to
|
||||
// physical ones.
|
||||
bool PreRegRewrite;
|
||||
bool NoSubRegLiveness;
|
||||
|
||||
protected:
|
||||
MachineCopyPropagation(char &ID, bool PreRegRewrite)
|
||||
: MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {}
|
||||
const MachineRegisterInfo *MRI;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
MachineCopyPropagation() : MachineCopyPropagation(ID, false) {
|
||||
MachineCopyPropagation() : MachineFunctionPass(ID) {
|
||||
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
if (PreRegRewrite) {
|
||||
AU.addRequired<SlotIndexes>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequired<LiveIntervals>();
|
||||
AU.addPreserved<LiveIntervals>();
|
||||
AU.addRequired<VirtRegMap>();
|
||||
AU.addPreserved<VirtRegMap>();
|
||||
AU.addPreserved<LiveDebugVariables>();
|
||||
AU.addPreserved<LiveStacks>();
|
||||
}
|
||||
AU.setPreservesCFG();
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
@ -137,10 +65,6 @@ using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
|
|||
bool runOnMachineFunction(MachineFunction &MF) override;
|
||||
|
||||
MachineFunctionProperties getRequiredProperties() const override {
|
||||
if (PreRegRewrite)
|
||||
return MachineFunctionProperties()
|
||||
.set(MachineFunctionProperties::Property::NoPHIs)
|
||||
.set(MachineFunctionProperties::Property::TracksLiveness);
|
||||
return MachineFunctionProperties().set(
|
||||
MachineFunctionProperties::Property::NoVRegs);
|
||||
}
|
||||
|
@ -150,28 +74,6 @@ using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
|
|||
void ReadRegister(unsigned Reg);
|
||||
void CopyPropagateBlock(MachineBasicBlock &MBB);
|
||||
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
|
||||
unsigned getPhysReg(unsigned Reg, unsigned SubReg);
|
||||
unsigned getPhysReg(const MachineOperand &Opnd) {
|
||||
return getPhysReg(Opnd.getReg(), Opnd.getSubReg());
|
||||
}
|
||||
unsigned getFullPhysReg(const MachineOperand &Opnd) {
|
||||
return getPhysReg(Opnd.getReg(), 0);
|
||||
}
|
||||
void forwardUses(MachineInstr &MI);
|
||||
bool isForwardableRegClassCopy(const MachineInstr &Copy,
|
||||
const MachineInstr &UseI);
|
||||
std::tuple<unsigned, unsigned, bool>
|
||||
checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse);
|
||||
bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
|
||||
void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse,
|
||||
unsigned NewUseReg, unsigned NewUseSubReg);
|
||||
void updateForwardedCopyLiveInterval(const MachineInstr &Copy,
|
||||
const MachineInstr &UseMI,
|
||||
unsigned OrigUseReg,
|
||||
unsigned NewUseReg,
|
||||
unsigned NewUseSubReg);
|
||||
/// LiveRangeEdit callback for eliminateDeadDefs().
|
||||
void LRE_WillEraseInstruction(MachineInstr *MI) override;
|
||||
|
||||
/// Candidates for deletion.
|
||||
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;
|
||||
|
@ -188,14 +90,6 @@ using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
|
|||
bool Changed;
|
||||
};
|
||||
|
||||
class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation {
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
MachineCopyPropagationPreRegRewrite()
|
||||
: MachineCopyPropagation(ID, true) {
|
||||
initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
||||
char MachineCopyPropagation::ID = 0;
|
||||
|
@ -205,29 +99,6 @@ char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
|
|||
INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE,
|
||||
"Machine Copy Propagation Pass", false, false)
|
||||
|
||||
/// We have two separate passes that are very similar, the only difference being
|
||||
/// where they are meant to be run in the pipeline. This is done for several
|
||||
/// reasons:
|
||||
/// - the two passes have different dependencies
|
||||
/// - some targets want to disable the later run of this pass, but not the
|
||||
/// earlier one (e.g. NVPTX and WebAssembly)
|
||||
/// - it allows for easier debugging via llc
|
||||
|
||||
char MachineCopyPropagationPreRegRewrite::ID = 0;
|
||||
char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID;
|
||||
|
||||
INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite,
|
||||
"machine-cp-prerewrite",
|
||||
"Machine Copy Propagation Pre-Register Rewrite Pass",
|
||||
false, false)
|
||||
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
||||
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
||||
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
|
||||
INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite,
|
||||
"machine-cp-prerewrite",
|
||||
"Machine Copy Propagation Pre-Register Rewrite Pass", false,
|
||||
false)
|
||||
|
||||
/// Remove any entry in \p Map where the register is a subregister or equal to
|
||||
/// a register contained in \p Regs.
|
||||
static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs,
|
||||
|
@ -268,10 +139,6 @@ void MachineCopyPropagation::ClobberRegister(unsigned Reg) {
|
|||
}
|
||||
|
||||
void MachineCopyPropagation::ReadRegister(unsigned Reg) {
|
||||
// We don't track MaybeDeadCopies when running pre-VirtRegRewriter.
|
||||
if (PreRegRewrite)
|
||||
return;
|
||||
|
||||
// If 'Reg' is defined by a copy, the copy is no longer a candidate
|
||||
// for elimination.
|
||||
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
|
||||
|
@ -303,46 +170,6 @@ static bool isNopCopy(const MachineInstr &PreviousCopy, unsigned Src,
|
|||
return SubIdx == TRI->getSubRegIndex(PreviousDef, Def);
|
||||
}
|
||||
|
||||
/// Return the physical register assigned to \p Reg if it is a virtual register,
|
||||
/// otherwise just return the physical reg from the operand itself.
|
||||
///
|
||||
/// If \p SubReg is 0 then return the full physical register assigned to the
|
||||
/// virtual register ignoring subregs. If we aren't tracking sub-reg liveness
|
||||
/// then we need to use this to be more conservative with clobbers by killing
|
||||
/// all super reg and their sub reg COPYs as well. This is to prevent COPY
|
||||
/// forwarding in cases like the following:
|
||||
///
|
||||
/// %vreg2 = COPY %vreg1:sub1
|
||||
/// %vreg3 = COPY %vreg1:sub0
|
||||
/// ... = OP1 %vreg2
|
||||
/// ... = OP2 %vreg3
|
||||
///
|
||||
/// After forward %vreg2 (assuming this is the last use of %vreg1) and
|
||||
/// VirtRegRewriter adding kill markers we have:
|
||||
///
|
||||
/// %vreg3 = COPY %vreg1:sub0
|
||||
/// ... = OP1 %vreg1:sub1<kill>
|
||||
/// ... = OP2 %vreg3
|
||||
///
|
||||
/// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have:
|
||||
///
|
||||
/// ... = OP1 R0:sub1, R0<imp-use,kill>
|
||||
/// ... = OP2 R0:sub0
|
||||
///
|
||||
/// and the use of R0 by OP2 will not have a valid definition.
|
||||
unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) {
|
||||
|
||||
// Physical registers cannot have subregs.
|
||||
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
||||
return Reg;
|
||||
|
||||
assert(PreRegRewrite && "Unexpected virtual register encountered");
|
||||
Reg = VRM->getPhys(Reg);
|
||||
if (SubReg && !NoSubRegLiveness)
|
||||
Reg = TRI->getSubReg(Reg, SubReg);
|
||||
return Reg;
|
||||
}
|
||||
|
||||
/// Remove instruction \p Copy if there exists a previous copy that copies the
|
||||
/// register \p Src to the register \p Def; This may happen indirectly by
|
||||
/// copying the super registers.
|
||||
|
@ -380,350 +207,6 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src,
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
/// Decide whether we should forward the destination of \param Copy to its use
|
||||
/// in \param UseI based on the register class of the Copy operands. Same-class
|
||||
/// COPYs are always accepted by this function, but cross-class COPYs are only
|
||||
/// accepted if they are forwarded to another COPY with the operand register
|
||||
/// classes reversed. For example:
|
||||
///
|
||||
/// RegClassA = COPY RegClassB // Copy parameter
|
||||
/// ...
|
||||
/// RegClassB = COPY RegClassA // UseI parameter
|
||||
///
|
||||
/// which after forwarding becomes
|
||||
///
|
||||
/// RegClassA = COPY RegClassB
|
||||
/// ...
|
||||
/// RegClassB = COPY RegClassB
|
||||
///
|
||||
/// so we have reduced the number of cross-class COPYs and potentially
|
||||
/// introduced a no COPY that can be removed.
|
||||
bool MachineCopyPropagation::isForwardableRegClassCopy(
|
||||
const MachineInstr &Copy, const MachineInstr &UseI) {
|
||||
auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) {
|
||||
unsigned DstReg = Dst.getReg();
|
||||
unsigned SrcPhysReg = getPhysReg(Src);
|
||||
const TargetRegisterClass *DstRC;
|
||||
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
|
||||
DstRC = MRI->getRegClass(DstReg);
|
||||
unsigned DstSubReg = Dst.getSubReg();
|
||||
if (DstSubReg)
|
||||
SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC);
|
||||
} else
|
||||
DstRC = TRI->getMinimalPhysRegClass(DstReg);
|
||||
|
||||
return !DstRC->contains(SrcPhysReg);
|
||||
};
|
||||
|
||||
const MachineOperand &CopyDst = Copy.getOperand(0);
|
||||
const MachineOperand &CopySrc = Copy.getOperand(1);
|
||||
|
||||
if (!isCross(CopyDst, CopySrc))
|
||||
return true;
|
||||
|
||||
if (!UseI.isCopy())
|
||||
return false;
|
||||
|
||||
assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst));
|
||||
return !isCross(UseI.getOperand(0), CopySrc);
|
||||
}
|
||||
|
||||
/// Check that the subregs on the copy source operand (\p CopySrc) and the use
|
||||
/// operand to be forwarded to (\p MOUse) are compatible with doing the
|
||||
/// forwarding. Also computes the new register and subregister to be used in
|
||||
/// the forwarded-to instruction.
|
||||
std::tuple<unsigned, unsigned, bool> MachineCopyPropagation::checkUseSubReg(
|
||||
const MachineOperand &CopySrc, const MachineOperand &MOUse) {
|
||||
unsigned NewUseReg = CopySrc.getReg();
|
||||
unsigned NewUseSubReg;
|
||||
|
||||
if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) {
|
||||
// If MOUse is a virtual reg, we need to apply it to the new physical reg
|
||||
// we're going to replace it with.
|
||||
if (MOUse.getSubReg())
|
||||
NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg());
|
||||
// If the original use subreg isn't valid on the new src reg, we can't
|
||||
// forward it here.
|
||||
if (!NewUseReg)
|
||||
return std::make_tuple(0, 0, false);
|
||||
NewUseSubReg = 0;
|
||||
} else {
|
||||
// %v1 = COPY %v2:sub1
|
||||
// USE %v1:sub2
|
||||
// The new use is %v2:sub1:sub2
|
||||
NewUseSubReg =
|
||||
TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg());
|
||||
// Check that NewUseSubReg is valid on NewUseReg
|
||||
if (NewUseSubReg &&
|
||||
!TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg))
|
||||
return std::make_tuple(0, 0, false);
|
||||
}
|
||||
|
||||
return std::make_tuple(NewUseReg, NewUseSubReg, true);
|
||||
}
|
||||
|
||||
/// Check that \p MI does not have implicit uses that overlap with it's \p Use
|
||||
/// operand (the register being replaced), since these can sometimes be
|
||||
/// implicitly tied to other operands. For example, on AMDGPU:
|
||||
///
|
||||
/// V_MOVRELS_B32_e32 %VGPR2, %M0<imp-use>, %EXEC<imp-use>, %VGPR2_VGPR3_VGPR4_VGPR5<imp-use>
|
||||
///
|
||||
/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
|
||||
/// way of knowing we need to update the latter when updating the former.
|
||||
bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
|
||||
const MachineOperand &Use) {
|
||||
if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg()))
|
||||
return false;
|
||||
|
||||
for (const MachineOperand &MIUse : MI.uses())
|
||||
if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
|
||||
TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Narrow the register class of the forwarded vreg so it matches any
|
||||
/// instruction constraints. \p MI is the instruction being forwarded to. \p
|
||||
/// MOUse is the operand being replaced in \p MI (which hasn't yet been updated
|
||||
/// at the time this function is called). \p NewUseReg and \p NewUseSubReg are
|
||||
/// what the \p MOUse will be changed to after forwarding.
|
||||
///
|
||||
/// If we are forwarding
|
||||
/// A:RCA = COPY B:RCB
|
||||
/// into
|
||||
/// ... = OP A:RCA
|
||||
///
|
||||
/// then we need to narrow the register class of B so that it is a subclass
|
||||
/// of RCA so that it meets the instruction register class constraints.
|
||||
void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI,
|
||||
const MachineOperand &MOUse,
|
||||
unsigned NewUseReg,
|
||||
unsigned NewUseSubReg) {
|
||||
if (!TargetRegisterInfo::isVirtualRegister(NewUseReg))
|
||||
return;
|
||||
|
||||
// Make sure the virtual reg class allows the subreg.
|
||||
if (NewUseSubReg) {
|
||||
const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
|
||||
const TargetRegisterClass *NewUseRC =
|
||||
TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg);
|
||||
if (CurUseRC != NewUseRC) {
|
||||
DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
|
||||
<< " to " << TRI->getRegClassName(NewUseRC) << "\n");
|
||||
MRI->setRegClass(NewUseReg, NewUseRC);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned MOUseOpNo = &MOUse - &MI.getOperand(0);
|
||||
const TargetRegisterClass *InstRC =
|
||||
TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF);
|
||||
if (InstRC) {
|
||||
const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
|
||||
if (NewUseSubReg)
|
||||
InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg);
|
||||
if (!InstRC->hasSubClassEq(CurUseRC)) {
|
||||
const TargetRegisterClass *NewUseRC =
|
||||
TRI->getCommonSubClass(InstRC, CurUseRC);
|
||||
DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
|
||||
<< " to " << TRI->getRegClassName(NewUseRC) << "\n");
|
||||
MRI->setRegClass(NewUseReg, NewUseRC);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the LiveInterval information to reflect the destination of \p Copy
|
||||
/// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being
|
||||
/// forwarded through. It should be the destination register of \p Copy and has
|
||||
/// already been replaced in \p UseMI at the point this function is called. \p
|
||||
/// NewUseReg and \p NewUseSubReg are the register and subregister being
|
||||
/// forwarded. They should be the source register of the \p Copy and should be
|
||||
/// the value of the \p UseMI operand being forwarded at the point this function
|
||||
/// is called.
|
||||
void MachineCopyPropagation::updateForwardedCopyLiveInterval(
|
||||
const MachineInstr &Copy, const MachineInstr &UseMI, unsigned OrigUseReg,
|
||||
unsigned NewUseReg, unsigned NewUseSubReg) {
|
||||
|
||||
assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0),
|
||||
getFullPhysReg(Copy.getOperand(0))) &&
|
||||
"OrigUseReg mismatch");
|
||||
assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)),
|
||||
getPhysReg(NewUseReg, 0)) &&
|
||||
"NewUseReg mismatch");
|
||||
|
||||
// Extend live range starting from COPY early-clobber slot, since that
|
||||
// is where the original src live range ends.
|
||||
SlotIndex CopyUseIdx =
|
||||
Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/);
|
||||
SlotIndex UseIdx = Indexes->getInstructionIndex(UseMI).getRegSlot();
|
||||
if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) {
|
||||
LiveInterval &LI = LIS->getInterval(NewUseReg);
|
||||
LI.extendInBlock(CopyUseIdx, UseIdx);
|
||||
LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg);
|
||||
for (auto &S : LI.subranges())
|
||||
if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx))
|
||||
S.extendInBlock(CopyUseIdx, UseIdx);
|
||||
} else {
|
||||
assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!");
|
||||
for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) {
|
||||
LiveRange &LR = LIS->getRegUnit(*UI);
|
||||
LR.extendInBlock(CopyUseIdx, UseIdx);
|
||||
}
|
||||
}
|
||||
|
||||
if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg))
|
||||
return;
|
||||
|
||||
LiveInterval &LI = LIS->getInterval(OrigUseReg);
|
||||
|
||||
// Can happen for undef uses.
|
||||
if (LI.empty())
|
||||
return;
|
||||
|
||||
SlotIndex UseIndex = Indexes->getInstructionIndex(UseMI);
|
||||
const LiveRange::Segment *UseSeg = LI.getSegmentContaining(UseIndex);
|
||||
|
||||
// Only shrink if forwarded use is the end of a segment.
|
||||
if (UseSeg->end != UseIndex.getRegSlot())
|
||||
return;
|
||||
|
||||
SmallVector<MachineInstr *, 4> DeadInsts;
|
||||
LIS->shrinkToUses(&LI, &DeadInsts);
|
||||
if (!DeadInsts.empty()) {
|
||||
SmallVector<unsigned, 8> NewRegs;
|
||||
LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
|
||||
.eliminateDeadDefs(DeadInsts);
|
||||
}
|
||||
}
|
||||
|
||||
void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) {
|
||||
// Remove this COPY from further consideration for forwarding.
|
||||
ClobberRegister(getFullPhysReg(MI->getOperand(0)));
|
||||
Changed = true;
|
||||
}
|
||||
|
||||
/// Look for available copies whose destination register is used by \p MI and
|
||||
/// replace the use in \p MI with the copy's source register.
|
||||
void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
|
||||
// We can't generally forward uses after virtual registers have been renamed
|
||||
// because some targets generate code that has implicit dependencies on the
|
||||
// physical register numbers. For example, in PowerPC, when spilling
|
||||
// condition code registers, the following code pattern is generated:
|
||||
//
|
||||
// %CR7 = COPY %CR0
|
||||
// %R6 = MFOCRF %CR7
|
||||
// %R6 = RLWINM %R6, 29, 31, 31
|
||||
//
|
||||
// where the shift amount in the RLWINM instruction depends on the source
|
||||
// register number of the MFOCRF instruction. If we were to forward %CR0 to
|
||||
// the MFOCRF instruction, the shift amount would no longer be correct.
|
||||
//
|
||||
// FIXME: It may be possible to define a target hook that checks the register
|
||||
// class or user opcode and allows some cases, but prevents cases like the
|
||||
// above from being broken to enable later register copy forwarding.
|
||||
if (!PreRegRewrite)
|
||||
return;
|
||||
|
||||
if (AvailCopyMap.empty())
|
||||
return;
|
||||
|
||||
// Look for non-tied explicit vreg uses that have an active COPY
|
||||
// instruction that defines the physical register allocated to them.
|
||||
// Replace the vreg with the source of the active COPY.
|
||||
for (MachineOperand &MOUse : MI.explicit_uses()) {
|
||||
// Don't forward into undef use operands since doing so can cause problems
|
||||
// with the machine verifier, since it doesn't treat undef reads as reads,
|
||||
// so we can end up with a live range the ends on an undef read, leading to
|
||||
// an error that the live range doesn't end on a read of the live range
|
||||
// register.
|
||||
if (!MOUse.isReg() || MOUse.isTied() || MOUse.isUndef())
|
||||
continue;
|
||||
|
||||
unsigned UseReg = MOUse.getReg();
|
||||
if (!UseReg)
|
||||
continue;
|
||||
|
||||
// See comment above check for !PreRegRewrite regarding forwarding changing
|
||||
// physical registers.
|
||||
if (!TargetRegisterInfo::isVirtualRegister(UseReg))
|
||||
continue;
|
||||
|
||||
UseReg = VRM->getPhys(UseReg);
|
||||
|
||||
// Don't forward COPYs via non-allocatable regs since they can have
|
||||
// non-standard semantics.
|
||||
if (!MRI->isAllocatable(UseReg))
|
||||
continue;
|
||||
|
||||
auto CI = AvailCopyMap.find(UseReg);
|
||||
if (CI == AvailCopyMap.end())
|
||||
continue;
|
||||
|
||||
MachineInstr &Copy = *CI->second;
|
||||
MachineOperand &CopyDst = Copy.getOperand(0);
|
||||
MachineOperand &CopySrc = Copy.getOperand(1);
|
||||
|
||||
// Don't forward COPYs that are already NOPs due to register assignment.
|
||||
if (getPhysReg(CopyDst) == getPhysReg(CopySrc))
|
||||
continue;
|
||||
|
||||
// FIXME: Don't handle partial uses of wider COPYs yet.
|
||||
if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst))
|
||||
continue;
|
||||
|
||||
// Don't forward COPYs of non-allocatable regs unless they are constant.
|
||||
unsigned CopySrcReg = CopySrc.getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) &&
|
||||
!MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
|
||||
continue;
|
||||
|
||||
if (!isForwardableRegClassCopy(Copy, MI))
|
||||
continue;
|
||||
|
||||
unsigned NewUseReg, NewUseSubReg;
|
||||
bool SubRegOK;
|
||||
std::tie(NewUseReg, NewUseSubReg, SubRegOK) =
|
||||
checkUseSubReg(CopySrc, MOUse);
|
||||
if (!SubRegOK)
|
||||
continue;
|
||||
|
||||
if (hasImplicitOverlap(MI, MOUse))
|
||||
continue;
|
||||
|
||||
if (!DebugCounter::shouldExecute(FwdCounter))
|
||||
continue;
|
||||
|
||||
DEBUG(dbgs() << "MCP: Replacing "
|
||||
<< PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg())
|
||||
<< "\n with "
|
||||
<< PrintReg(NewUseReg, TRI, CopySrc.getSubReg())
|
||||
<< "\n in "
|
||||
<< MI
|
||||
<< " from "
|
||||
<< Copy);
|
||||
|
||||
narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg);
|
||||
|
||||
unsigned OrigUseReg = MOUse.getReg();
|
||||
MOUse.setReg(NewUseReg);
|
||||
MOUse.setSubReg(NewUseSubReg);
|
||||
|
||||
DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
|
||||
|
||||
if (PreRegRewrite)
|
||||
updateForwardedCopyLiveInterval(Copy, MI, OrigUseReg, NewUseReg,
|
||||
NewUseSubReg);
|
||||
else
|
||||
for (MachineInstr &KMI :
|
||||
make_range(Copy.getIterator(), std::next(MI.getIterator())))
|
||||
KMI.clearRegisterKills(NewUseReg, TRI);
|
||||
|
||||
++NumCopyForwards;
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
||||
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
|
||||
|
||||
|
@ -732,8 +215,12 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
++I;
|
||||
|
||||
if (MI->isCopy()) {
|
||||
unsigned Def = getPhysReg(MI->getOperand(0));
|
||||
unsigned Src = getPhysReg(MI->getOperand(1));
|
||||
unsigned Def = MI->getOperand(0).getReg();
|
||||
unsigned Src = MI->getOperand(1).getReg();
|
||||
|
||||
assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
|
||||
!TargetRegisterInfo::isVirtualRegister(Src) &&
|
||||
"MachineCopyPropagation should be run after register allocation!");
|
||||
|
||||
// The two copies cancel out and the source of the first copy
|
||||
// hasn't been overridden, eliminate the second one. e.g.
|
||||
|
@ -750,16 +237,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
// %ECX<def> = COPY %EAX
|
||||
// =>
|
||||
// %ECX<def> = COPY %EAX
|
||||
if (!PreRegRewrite)
|
||||
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
|
||||
continue;
|
||||
|
||||
forwardUses(*MI);
|
||||
|
||||
// Src may have been changed by forwardUses()
|
||||
Src = getPhysReg(MI->getOperand(1));
|
||||
unsigned DefClobber = getFullPhysReg(MI->getOperand(0));
|
||||
unsigned SrcClobber = getFullPhysReg(MI->getOperand(1));
|
||||
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
|
||||
continue;
|
||||
|
||||
// If Src is defined by a previous copy, the previous copy cannot be
|
||||
// eliminated.
|
||||
|
@ -776,10 +255,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
|
||||
|
||||
// Copy is now a candidate for deletion.
|
||||
// Only look for dead COPYs if we're not running just before
|
||||
// VirtRegRewriter, since presumably these COPYs will have already been
|
||||
// removed.
|
||||
if (!PreRegRewrite && !MRI->isReserved(Def))
|
||||
if (!MRI->isReserved(Def))
|
||||
MaybeDeadCopies.insert(MI);
|
||||
|
||||
// If 'Def' is previously source of another copy, then this earlier copy's
|
||||
|
@ -789,11 +265,11 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
// %xmm2<def> = copy %xmm0
|
||||
// ...
|
||||
// %xmm2<def> = copy %xmm9
|
||||
ClobberRegister(DefClobber);
|
||||
ClobberRegister(Def);
|
||||
for (const MachineOperand &MO : MI->implicit_operands()) {
|
||||
if (!MO.isReg() || !MO.isDef())
|
||||
continue;
|
||||
unsigned Reg = getFullPhysReg(MO);
|
||||
unsigned Reg = MO.getReg();
|
||||
if (!Reg)
|
||||
continue;
|
||||
ClobberRegister(Reg);
|
||||
|
@ -808,27 +284,13 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
|
||||
// Remember source that's copied to Def. Once it's clobbered, then
|
||||
// it's no longer available for copy propagation.
|
||||
RegList &DestList = SrcMap[SrcClobber];
|
||||
if (!is_contained(DestList, DefClobber))
|
||||
DestList.push_back(DefClobber);
|
||||
RegList &DestList = SrcMap[Src];
|
||||
if (!is_contained(DestList, Def))
|
||||
DestList.push_back(Def);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Clobber any earlyclobber regs first.
|
||||
for (const MachineOperand &MO : MI->operands())
|
||||
if (MO.isReg() && MO.isEarlyClobber()) {
|
||||
unsigned Reg = getFullPhysReg(MO);
|
||||
// If we have a tied earlyclobber, that means it is also read by this
|
||||
// instruction, so we need to make sure we don't remove it as dead
|
||||
// later.
|
||||
if (MO.isTied())
|
||||
ReadRegister(Reg);
|
||||
ClobberRegister(Reg);
|
||||
}
|
||||
|
||||
forwardUses(*MI);
|
||||
|
||||
// Not a copy.
|
||||
SmallVector<unsigned, 2> Defs;
|
||||
const MachineOperand *RegMask = nullptr;
|
||||
|
@ -837,11 +299,14 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
RegMask = &MO;
|
||||
if (!MO.isReg())
|
||||
continue;
|
||||
unsigned Reg = getFullPhysReg(MO);
|
||||
unsigned Reg = MO.getReg();
|
||||
if (!Reg)
|
||||
continue;
|
||||
|
||||
if (MO.isDef() && !MO.isEarlyClobber()) {
|
||||
assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
|
||||
"MachineCopyPropagation should be run after register allocation!");
|
||||
|
||||
if (MO.isDef()) {
|
||||
Defs.push_back(Reg);
|
||||
continue;
|
||||
} else if (MO.readsReg())
|
||||
|
@ -898,8 +363,6 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
// since we don't want to trust live-in lists.
|
||||
if (MBB.succ_empty()) {
|
||||
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
|
||||
DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
|
||||
MaybeDead->dump());
|
||||
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
|
||||
MaybeDead->eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -922,13 +385,6 @@ bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
|
|||
TRI = MF.getSubtarget().getRegisterInfo();
|
||||
TII = MF.getSubtarget().getInstrInfo();
|
||||
MRI = &MF.getRegInfo();
|
||||
this->MF = &MF;
|
||||
if (PreRegRewrite) {
|
||||
Indexes = &getAnalysis<SlotIndexes>();
|
||||
LIS = &getAnalysis<LiveIntervals>();
|
||||
VRM = &getAnalysis<VirtRegMap>();
|
||||
}
|
||||
NoSubRegLiveness = !MRI->subRegLivenessEnabled();
|
||||
|
||||
for (MachineBasicBlock &MBB : MF)
|
||||
CopyPropagateBlock(MBB);
|
||||
|
|
|
@ -88,8 +88,6 @@ static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
|
|||
cl::desc("Disable Codegen Prepare"));
|
||||
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
|
||||
cl::desc("Disable Copy Propagation pass"));
|
||||
static cl::opt<bool> DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden,
|
||||
cl::desc("Disable Copy Propagation Pre-Register Re-write pass"));
|
||||
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
|
||||
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
|
||||
static cl::opt<bool> EnableImplicitNullChecks(
|
||||
|
@ -250,9 +248,6 @@ static IdentifyingPassPtr overridePass(AnalysisID StandardID,
|
|||
if (StandardID == &MachineCopyPropagationID)
|
||||
return applyDisable(TargetID, DisableCopyProp);
|
||||
|
||||
if (StandardID == &MachineCopyPropagationPreRegRewriteID)
|
||||
return applyDisable(TargetID, DisableCopyPropPreRegRewrite);
|
||||
|
||||
return TargetID;
|
||||
}
|
||||
|
||||
|
@ -1061,10 +1056,6 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
|
|||
// Allow targets to change the register assignments before rewriting.
|
||||
addPreRewrite();
|
||||
|
||||
// Copy propagate to forward register uses and try to eliminate COPYs that
|
||||
// were not coalesced.
|
||||
addPass(&MachineCopyPropagationPreRegRewriteID);
|
||||
|
||||
// Finally rewrite virtual registers.
|
||||
addPass(&VirtRegRewriterID);
|
||||
|
||||
|
|
|
@ -9,8 +9,7 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: halfword:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
|
||||
; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
@ -25,8 +24,7 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: word:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
@ -41,8 +39,7 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: doubleword:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
|
|
@ -8,9 +8,15 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
|
|||
; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
|
||||
; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
|
||||
; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
|
||||
; CHECK-NOT: fmov
|
||||
; Without advanced copy optimization, we end up with cross register
|
||||
; banks copies that cannot be coalesced.
|
||||
; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
|
||||
; With advanced copy optimization, we end up with just one copy
|
||||
; to insert the computed high part into the V register.
|
||||
; CHECK-OPT-NOT: fmov
|
||||
; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
|
||||
; CHECK-NOT: fmov
|
||||
; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
|
||||
; CHECK-OPT-NOT: fmov
|
||||
; CHECK: ins.d v0[1], [[COPY_REG2]]
|
||||
; CHECK-NEXT: ret
|
||||
;
|
||||
|
@ -18,9 +24,11 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
|
|||
; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
|
||||
; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
|
||||
; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
|
||||
; GENERIC-NOT: fmov
|
||||
; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
|
||||
; GENERIC-OPT-NOT: fmov
|
||||
; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
|
||||
; GENERIC-NOT: fmov
|
||||
; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
|
||||
; GENERIC-OPT-NOT: fmov
|
||||
; GENERIC: ins v0.d[1], [[COPY_REG2]]
|
||||
; GENERIC-NEXT: ret
|
||||
%add = add <2 x i64> %a, %b
|
||||
|
|
|
@ -4,10 +4,8 @@
|
|||
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t:
|
||||
; CHECK: mov [[REG2:x[0-9]+]], x3
|
||||
; CHECK: mov [[REG1:x[0-9]+]], x2
|
||||
; CHECK: mov x0, x2
|
||||
; CHECK: mov x1, x3
|
||||
; CHECK: mov x0, [[REG1:x[0-9]+]]
|
||||
; CHECK: mov x1, [[REG2:x[0-9]+]]
|
||||
; CHECK: bl _foo
|
||||
; CHECK: mov x0, [[REG1]]
|
||||
; CHECK: mov x1, [[REG2]]
|
||||
|
|
|
@ -489,7 +489,7 @@ else:
|
|||
|
||||
; CHECK-COMMON-LABEL: test_phi:
|
||||
; CHECK-COMMON: mov x[[PTR:[0-9]+]], x0
|
||||
; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x0]
|
||||
; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x[[PTR]]]
|
||||
; CHECK-COMMON: [[LOOP:LBB[0-9_]+]]:
|
||||
; CHECK-COMMON: mov.16b v[[R:[0-9]+]], v[[AB]]
|
||||
; CHECK-COMMON: ldr h[[AB]], [x[[PTR]]]
|
||||
|
|
|
@ -17,9 +17,6 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
|
|||
%val = zext i1 %test to i32
|
||||
; CHECK: cset {{[xw][0-9]+}}, ne
|
||||
|
||||
; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
|
||||
; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
|
||||
|
||||
store i32 %val, i32* @var
|
||||
|
||||
call void @bar()
|
||||
|
@ -28,7 +25,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
|
|||
; Currently, the comparison is emitted again. An MSR/MRS pair would also be
|
||||
; acceptable, but assuming the call preserves NZCV is not.
|
||||
br i1 %test, label %iftrue, label %iffalse
|
||||
; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]]
|
||||
; CHECK: cmp [[LHS]], [[RHS]]
|
||||
; CHECK: b.eq
|
||||
|
||||
iftrue:
|
||||
|
|
|
@ -8,9 +8,10 @@
|
|||
define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) {
|
||||
;CHECK-LABEL: test
|
||||
entry:
|
||||
; A53: mov [[DATA:w[0-9]+]], w1
|
||||
; A53: str q{{[0-9]+}}, {{.*}}
|
||||
; A53: str q{{[0-9]+}}, {{.*}}
|
||||
; A53: str w1, {{.*}}
|
||||
; A53: str [[DATA]], {{.*}}
|
||||
|
||||
%0 = bitcast %struct1* %fde to i8*
|
||||
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)
|
||||
|
|
|
@ -7,8 +7,8 @@ declare void @foo(i32)
|
|||
define void @test(i32 %px) {
|
||||
; CHECK_LABEL: test:
|
||||
; CHECK_LABEL: %entry
|
||||
; CHECK: subs [[REG0:w[0-9]+]],
|
||||
; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]]
|
||||
; CHECK: subs
|
||||
; CHECK-NEXT: csel
|
||||
entry:
|
||||
%sub = add nsw i32 %px, -1
|
||||
%cmp = icmp slt i32 %px, 1
|
||||
|
|
|
@ -547,16 +547,16 @@ define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz() #1 {
|
|||
; GCN: s_mov_b32 s5, s32
|
||||
; GCN: s_add_u32 s32, s32, 0x300
|
||||
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-57-9][0-9]*]], s14
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-68-9][0-9]*]], s15
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-79][0-9]*]], s16
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
|
||||
|
||||
; GCN-DAG: s_mov_b32 s6, s14
|
||||
; GCN-DAG: s_mov_b32 s7, s15
|
||||
; GCN-DAG: s_mov_b32 s8, s16
|
||||
; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
|
||||
; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
|
||||
; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
|
||||
; GCN: s_swappc_b64
|
||||
|
||||
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
; GCN-LABEL: {{^}}vgpr:
|
||||
; GCN-DAG: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
|
||||
; GCN: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
|
||||
; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
|
||||
; GCN: s_waitcnt expcnt(0)
|
||||
; GCN: v_add_f32_e32 v0, 1.0, v0
|
||||
; GCN-NOT: s_endpgm
|
||||
define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
|
||||
bb:
|
||||
|
@ -204,13 +204,13 @@ bb:
|
|||
}
|
||||
|
||||
; GCN-LABEL: {{^}}both:
|
||||
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
|
||||
; GCN-DAG: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: s_mov_b32 s1, s2
|
||||
; GCN: s_waitcnt expcnt(0)
|
||||
; GCN: v_add_f32_e32 v0, 1.0, v0
|
||||
; GCN: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
|
||||
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
|
||||
; GCN-DAG: s_add_i32 s0, s3, 2
|
||||
; GCN-DAG: s_mov_b32 s2, s3
|
||||
; GCN-DAG: s_mov_b32 s1, s2
|
||||
; GCN: s_mov_b32 s2, s3
|
||||
; GCN: s_waitcnt expcnt(0)
|
||||
; GCN-NOT: s_endpgm
|
||||
define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
|
||||
bb:
|
||||
|
|
|
@ -287,8 +287,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|||
|
||||
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
|
||||
%oldval = extractvalue { i32, i1 } %pair, 0
|
||||
; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0
|
||||
; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
|
||||
; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
||||
; CHECK-ARMV7: cmp [[OLDVAL]], r1
|
||||
; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
|
||||
; CHECK-ARMV7: dmb ish
|
||||
|
@ -306,8 +305,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|||
; CHECK-ARMV7: dmb ish
|
||||
; CHECK-ARMV7: bx lr
|
||||
|
||||
; CHECK-T2: mov r[[ADDR:[0-9]+]], r0
|
||||
; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
|
||||
; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
||||
; CHECK-T2: cmp [[OLDVAL]], r1
|
||||
; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
|
||||
; CHECK-T2: dmb ish
|
||||
|
|
|
@ -182,7 +182,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
|
|||
; CHECK-APPLE: beq
|
||||
; CHECK-APPLE: mov r0, #16
|
||||
; CHECK-APPLE: malloc
|
||||
; CHECK-APPLE: strb r{{.*}}, [r0, #8]
|
||||
; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
|
||||
; CHECK-APPLE: ble
|
||||
; CHECK-APPLE: mov r8, [[ID]]
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ entry:
|
|||
; MMR3: subu16 $5, $[[T19]], $[[T20]]
|
||||
|
||||
; MMR6: move $[[T0:[0-9]+]], $7
|
||||
; MMR6: sw $7, 8($sp)
|
||||
; MMR6: sw $[[T0]], 8($sp)
|
||||
; MMR6: move $[[T1:[0-9]+]], $5
|
||||
; MMR6: sw $4, 12($sp)
|
||||
; MMR6: lw $[[T2:[0-9]+]], 48($sp)
|
||||
|
|
|
@ -14,8 +14,7 @@ define double @foo3(double %a) nounwind {
|
|||
ret double %r
|
||||
|
||||
; CHECK: @foo3
|
||||
; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]]
|
||||
; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
|
||||
; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
|
||||
; CHECK: xsmaddmdp
|
||||
; CHECK: xsmaddadp
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ entry:
|
|||
|
||||
; CHECK-DAG: mr [[REG:[0-9]+]], 3
|
||||
; CHECK-DAG: li 0, 1076
|
||||
; CHECK-DAG: stw 3,
|
||||
; CHECK: stw [[REG]],
|
||||
|
||||
; CHECK: #APP
|
||||
; CHECK: sc
|
||||
|
|
|
@ -23,7 +23,7 @@ target triple = "powerpc64le-grtev4-linux-gnu"
|
|||
;CHECK-LABEL: straight_test:
|
||||
; test1 may have been merged with entry
|
||||
;CHECK: mr [[TAGREG:[0-9]+]], 3
|
||||
;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1
|
||||
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
|
||||
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
|
||||
;CHECK-NEXT: # %test2
|
||||
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
|
||||
|
|
|
@ -235,9 +235,8 @@ entry:
|
|||
|
||||
; CHECK-LABEL: test_load_add_i32
|
||||
; CHECK: membar
|
||||
; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
|
||||
; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
|
||||
; CHECK: cas [%o0], [[V]], [[V2]]
|
||||
; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
|
||||
; CHECK: cas [%o0], [[V]], [[U]]
|
||||
; CHECK: membar
|
||||
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
|
||||
entry:
|
||||
|
|
|
@ -598,7 +598,7 @@ declare void @abort() #0
|
|||
define i32 @b_to_bx(i32 %value) {
|
||||
; CHECK-LABEL: b_to_bx:
|
||||
; DISABLE: push {r7, lr}
|
||||
; CHECK: cmp r0, #49
|
||||
; CHECK: cmp r1, #49
|
||||
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
|
||||
; ENABLE: push {r7, lr}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ define i32 @f(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; CHECK-NEXT: movl %ecx, %edx
|
||||
; CHECK-NEXT: imull %ecx, %edx
|
||||
; CHECK-NEXT: imull %edx, %edx
|
||||
; CHECK-NEXT: imull %eax, %ecx
|
||||
; CHECK-NEXT: imull %eax, %eax
|
||||
; CHECK-NEXT: addl %edx, %eax
|
||||
|
|
|
@ -106,7 +106,7 @@ entry:
|
|||
; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
|
||||
; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
|
||||
; CHECK-DAG: movl %[[r2]], 4(%esp)
|
||||
; CHECK-DAG: movl %edx, (%esp)
|
||||
; CHECK-DAG: movl %[[r1]], (%esp)
|
||||
; CHECK: movl %esp, %[[reg:[^ ]*]]
|
||||
; CHECK: pushl %[[reg]]
|
||||
; CHECK: calll _addrof_i64
|
||||
|
|
|
@ -407,6 +407,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
|
|||
; SSE2-NEXT: pand %xmm0, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm1, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm10, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
||||
; SSE2-NEXT: psrld $1, %xmm4
|
||||
; SSE2-NEXT: psrld $1, %xmm12
|
||||
; SSE2-NEXT: pand %xmm0, %xmm12
|
||||
|
@ -443,7 +444,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
|
|||
; SSE2-NEXT: movdqu %xmm7, (%rax)
|
||||
; SSE2-NEXT: movdqu %xmm11, (%rax)
|
||||
; SSE2-NEXT: movdqu %xmm13, (%rax)
|
||||
; SSE2-NEXT: movdqu %xmm2, (%rax)
|
||||
; SSE2-NEXT: movdqu %xmm1, (%rax)
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: avg_v64i8:
|
||||
|
|
|
@ -12,11 +12,11 @@ define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>*
|
|||
; CHECK-NEXT: movq %rdx, %r14
|
||||
; CHECK-NEXT: movq %rsi, %r15
|
||||
; CHECK-NEXT: movq %rdi, %rbx
|
||||
; CHECK-NEXT: vmovaps (%rdi), %ymm0
|
||||
; CHECK-NEXT: vmovaps (%rbx), %ymm0
|
||||
; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: vmovaps (%rsi), %ymm1
|
||||
; CHECK-NEXT: vmovaps (%r15), %ymm1
|
||||
; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: vmovaps (%rdx), %ymm2
|
||||
; CHECK-NEXT: vmovaps (%r14), %ymm2
|
||||
; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: callq dummy
|
||||
; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
|
||||
|
|
|
@ -9,10 +9,10 @@ define void @bar__512(<16 x i32>* %var) #0 {
|
|||
; CHECK-NEXT: pushq %rbx
|
||||
; CHECK-NEXT: subq $112, %rsp
|
||||
; CHECK-NEXT: movq %rdi, %rbx
|
||||
; CHECK-NEXT: vmovups (%rdi), %zmm0
|
||||
; CHECK-NEXT: vmovups (%rbx), %zmm0
|
||||
; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
|
||||
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
|
||||
; CHECK-NEXT: vmovaps %zmm1, (%rdi)
|
||||
; CHECK-NEXT: vmovaps %zmm1, (%rbx)
|
||||
; CHECK-NEXT: callq _Print__512
|
||||
; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
|
||||
; CHECK-NEXT: callq _Print__512
|
||||
|
|
|
@ -466,7 +466,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
|
|||
; KNL_X32-NEXT: movl %edi, (%esp)
|
||||
; KNL_X32-NEXT: calll _test11
|
||||
; KNL_X32-NEXT: movl %eax, %ebx
|
||||
; KNL_X32-NEXT: movzbl %al, %eax
|
||||
; KNL_X32-NEXT: movzbl %bl, %eax
|
||||
; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
|
||||
; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
|
||||
; KNL_X32-NEXT: movl %edi, (%esp)
|
||||
|
|
|
@ -1171,6 +1171,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; KNL-NEXT: kmovw %esi, %k0
|
||||
; KNL-NEXT: kshiftlw $7, %k0, %k2
|
||||
; KNL-NEXT: kshiftrw $15, %k2, %k2
|
||||
; KNL-NEXT: kmovw %k2, %eax
|
||||
; KNL-NEXT: kshiftlw $6, %k0, %k0
|
||||
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
||||
; KNL-NEXT: kmovw %k0, %ecx
|
||||
|
@ -1183,7 +1184,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
|
||||
; KNL-NEXT: kshiftlw $1, %k0, %k0
|
||||
; KNL-NEXT: kshiftrw $1, %k0, %k0
|
||||
; KNL-NEXT: kshiftlw $7, %k2, %k1
|
||||
; KNL-NEXT: kmovw %eax, %k1
|
||||
; KNL-NEXT: kshiftlw $7, %k1, %k1
|
||||
; KNL-NEXT: korw %k1, %k0, %k1
|
||||
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
||||
; KNL-NEXT: vpmovqw %zmm0, %xmm0
|
||||
|
@ -1195,16 +1197,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; SKX-NEXT: kmovd %esi, %k1
|
||||
; SKX-NEXT: kshiftlw $7, %k1, %k2
|
||||
; SKX-NEXT: kshiftrw $15, %k2, %k2
|
||||
; SKX-NEXT: kmovd %k2, %eax
|
||||
; SKX-NEXT: kshiftlw $6, %k1, %k1
|
||||
; SKX-NEXT: kshiftrw $15, %k1, %k1
|
||||
; SKX-NEXT: kmovd %k1, %ecx
|
||||
; SKX-NEXT: vpmovm2q %k0, %zmm0
|
||||
; SKX-NEXT: vpmovm2q %k1, %zmm1
|
||||
; SKX-NEXT: kmovd %ecx, %k0
|
||||
; SKX-NEXT: vpmovm2q %k0, %zmm1
|
||||
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
|
||||
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
|
||||
; SKX-NEXT: vpmovq2m %zmm2, %k0
|
||||
; SKX-NEXT: kshiftlb $1, %k0, %k0
|
||||
; SKX-NEXT: kshiftrb $1, %k0, %k0
|
||||
; SKX-NEXT: kshiftlb $7, %k2, %k1
|
||||
; SKX-NEXT: kmovd %eax, %k1
|
||||
; SKX-NEXT: kshiftlb $7, %k1, %k1
|
||||
; SKX-NEXT: korb %k1, %k0, %k0
|
||||
; SKX-NEXT: vpmovm2w %k0, %xmm0
|
||||
; SKX-NEXT: vzeroupper
|
||||
|
@ -1216,6 +1222,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; AVX512BW-NEXT: kmovd %esi, %k0
|
||||
; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
|
||||
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
|
||||
; AVX512BW-NEXT: kmovd %k2, %eax
|
||||
; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
|
||||
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
|
||||
; AVX512BW-NEXT: kmovd %k0, %ecx
|
||||
|
@ -1228,7 +1235,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
|
||||
; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
|
||||
; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
|
||||
; AVX512BW-NEXT: kshiftlw $7, %k2, %k1
|
||||
; AVX512BW-NEXT: kmovd %eax, %k1
|
||||
; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
|
||||
; AVX512BW-NEXT: korw %k1, %k0, %k0
|
||||
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
|
||||
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
||||
|
@ -1241,16 +1249,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
|
|||
; AVX512DQ-NEXT: kmovw %esi, %k1
|
||||
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
|
||||
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
|
||||
; AVX512DQ-NEXT: kmovw %k2, %eax
|
||||
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
|
||||
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
|
||||
; AVX512DQ-NEXT: kmovw %k1, %ecx
|
||||
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
|
||||
; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
|
||||
; AVX512DQ-NEXT: kmovw %ecx, %k0
|
||||
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
|
||||
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
|
||||
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
|
||||
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
|
||||
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
|
||||
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
|
||||
; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
|
||||
; AVX512DQ-NEXT: kmovw %eax, %k1
|
||||
; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
|
||||
; AVX512DQ-NEXT: korb %k1, %k0, %k0
|
||||
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
|
||||
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
|
||||
|
|
|
@ -2003,7 +2003,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
|
|||
; AVX512F-32-NEXT: vpblendvb %ymm3, %ymm2, %ymm7, %ymm7
|
||||
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
|
||||
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
|
||||
; AVX512F-32-NEXT: movl %ecx, %eax
|
||||
; AVX512F-32-NEXT: movl %esi, %eax
|
||||
; AVX512F-32-NEXT: shrl $30, %eax
|
||||
; AVX512F-32-NEXT: kmovd %eax, %k1
|
||||
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
|
||||
|
@ -2014,7 +2014,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
|
|||
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
|
||||
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
|
||||
; AVX512F-32-NEXT: movl %ecx, %eax
|
||||
; AVX512F-32-NEXT: movl %esi, %eax
|
||||
; AVX512F-32-NEXT: shrl $31, %eax
|
||||
; AVX512F-32-NEXT: kmovd %eax, %k1
|
||||
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
|
||||
|
@ -2887,7 +2887,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
|
|||
; AVX512F-32-NEXT: vpblendvb %ymm3, %ymm2, %ymm7, %ymm7
|
||||
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
|
||||
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
|
||||
; AVX512F-32-NEXT: movl %ecx, %eax
|
||||
; AVX512F-32-NEXT: movl %esi, %eax
|
||||
; AVX512F-32-NEXT: shrl $30, %eax
|
||||
; AVX512F-32-NEXT: kmovd %eax, %k1
|
||||
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
|
||||
|
@ -2898,7 +2898,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
|
|||
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
|
||||
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
|
||||
; AVX512F-32-NEXT: movl %ecx, %eax
|
||||
; AVX512F-32-NEXT: movl %esi, %eax
|
||||
; AVX512F-32-NEXT: shrl $31, %eax
|
||||
; AVX512F-32-NEXT: kmovd %eax, %k1
|
||||
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
|
||||
|
|
|
@ -38,7 +38,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
|
|||
; SSE2-LABEL: test_negative_zero_1:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE2-NEXT: xorps %xmm2, %xmm2
|
||||
|
|
|
@ -231,8 +231,8 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
|
|||
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: movaps %xmm2, %xmm6
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm7
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: andps %xmm7, %xmm2
|
||||
|
@ -247,7 +247,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
|
|||
; SSE-NEXT: orps %xmm0, %xmm4
|
||||
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; SSE-NEXT: andps %xmm7, %xmm0
|
||||
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
|
||||
; SSE-NEXT: andps %xmm8, %xmm3
|
||||
|
@ -294,7 +294,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
|
|||
; SSE-NEXT: orps %xmm6, %xmm1
|
||||
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: andps %xmm5, %xmm1
|
||||
; SSE-NEXT: xorps %xmm6, %xmm6
|
||||
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
|
||||
|
|
|
@ -14,7 +14,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
|
|||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: addss %xmm0, %xmm2
|
||||
; SSE-NEXT: addss %xmm2, %xmm2
|
||||
; SSE-NEXT: mulss %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm0, %xmm0
|
||||
; SSE-NEXT: mulss %xmm1, %xmm1
|
||||
|
@ -58,9 +58,9 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
|
|||
; SSE-LABEL: complex_square_f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: addsd %xmm0, %xmm2
|
||||
; SSE-NEXT: addsd %xmm2, %xmm2
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm2
|
||||
; SSE-NEXT: mulsd %xmm0, %xmm0
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm1
|
||||
|
@ -161,9 +161,9 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
|
|||
; SSE-LABEL: complex_mul_f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: mulsd %xmm0, %xmm4
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm0
|
||||
|
|
|
@ -318,7 +318,7 @@ define i64 @PR23590(i64 %x) nounwind {
|
|||
; X64: # BB#0: # %entry
|
||||
; X64-NEXT: movq %rdi, %rcx
|
||||
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: shrq $12, %rdx
|
||||
; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039
|
||||
|
|
|
@ -18,7 +18,7 @@ declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
|
|||
|
||||
; CHECK-LABEL: @test_fmaxf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: maxss %xmm0, %xmm1
|
||||
|
@ -47,7 +47,7 @@ define float @test_fmaxf_minsize(float %x, float %y) minsize {
|
|||
|
||||
; CHECK-LABEL: @test_fmax
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: maxsd %xmm0, %xmm1
|
||||
|
@ -74,7 +74,7 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x, x86_fp80 %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmaxf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: maxss %xmm0, %xmm1
|
||||
|
@ -95,7 +95,7 @@ define float @test_intrinsic_fmaxf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmax
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: maxsd %xmm0, %xmm1
|
||||
|
|
|
@ -18,7 +18,7 @@ declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
|
|||
|
||||
; CHECK-LABEL: @test_fminf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: minss %xmm0, %xmm1
|
||||
|
@ -40,7 +40,7 @@ define float @test_fminf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_fmin
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: minsd %xmm0, %xmm1
|
||||
|
@ -67,7 +67,7 @@ define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fminf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: minss %xmm0, %xmm1
|
||||
|
@ -87,7 +87,7 @@ define float @test_intrinsic_fminf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmin
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: minsd %xmm0, %xmm1
|
||||
|
|
|
@ -227,7 +227,7 @@ define fp128 @TestI128_4(fp128 %x) #0 {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: subq $40, %rsp
|
||||
; CHECK-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
|
||||
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq $0, (%rsp)
|
||||
|
@ -275,7 +275,7 @@ define fp128 @acosl(fp128 %x) #0 {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: subq $40, %rsp
|
||||
; CHECK-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
|
||||
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq $0, (%rsp)
|
||||
|
|
|
@ -908,16 +908,16 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
|
|||
; SSE-LABEL: not_a_hsub_2:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: subss %xmm3, %xmm2
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: subss %xmm3, %xmm0
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm4
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
|
||||
; SSE-NEXT: subss %xmm4, %xmm3
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: subss %xmm4, %xmm1
|
||||
|
@ -965,10 +965,10 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
|
|||
; SSE-LABEL: not_a_hsub_3:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: subsd %xmm2, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: subsd %xmm0, %xmm2
|
||||
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movapd %xmm2, %xmm0
|
||||
|
|
|
@ -103,7 +103,7 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE-LABEL: test5_undef:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: addsd %xmm0, %xmm1
|
||||
; SSE-NEXT: movapd %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
|
@ -168,7 +168,7 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: addss %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
|
||||
; SSE-NEXT: addss %xmm2, %xmm0
|
||||
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
||||
|
|
|
@ -386,7 +386,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
|
|||
; CHECK-LIBCALL-NEXT: pushq %rbx
|
||||
; CHECK-LIBCALL-NEXT: subq $48, %rsp
|
||||
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
|
||||
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
|
||||
; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
|
||||
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
|
||||
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
|
||||
|
@ -472,7 +472,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
|
|||
; CHECK-LIBCALL-NEXT: pushq %rbx
|
||||
; CHECK-LIBCALL-NEXT: subq $16, %rsp
|
||||
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
|
||||
; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi
|
||||
; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
|
||||
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
|
||||
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
|
||||
; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
|
||||
|
@ -657,7 +657,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
|
|||
; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
|
||||
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
||||
; CHECK-I686-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; CHECK-I686-NEXT: movss %xmm1, (%esp)
|
||||
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
|
||||
; CHECK-I686-NEXT: movw %ax, %si
|
||||
|
|
|
@ -162,7 +162,6 @@ define void @testPR4459(x86_fp80 %a) {
|
|||
; CHECK-NEXT: fstpt (%esp)
|
||||
; CHECK-NEXT: calll _ceil
|
||||
; CHECK-NEXT: fld %st(0)
|
||||
; CHECK-NEXT: fxch %st(1)
|
||||
; CHECK-NEXT: ## InlineAsm Start
|
||||
; CHECK-NEXT: fistpl %st(0)
|
||||
; CHECK-NEXT: ## InlineAsm End
|
||||
|
|
|
@ -24,7 +24,7 @@ define void @bar(i32 %X) {
|
|||
call void @foo()
|
||||
; CHECK-LABEL: bar:
|
||||
; CHECK: callq foo
|
||||
; CHECK-NEXT: movl %edi, %r15d
|
||||
; CHECK-NEXT: movl %eax, %r15d
|
||||
call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
|
||||
ret void
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ define void @print_framealloc_from_fp(i8* %fp) {
|
|||
|
||||
; X64-LABEL: print_framealloc_from_fp:
|
||||
; X64: movq %rcx, %[[parent_fp:[a-z]+]]
|
||||
; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx
|
||||
; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
|
||||
; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
|
||||
; X64: movq %[[str]], %rcx
|
||||
; X64: callq printf
|
||||
|
|
|
@ -159,7 +159,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: movl %esi, %ebx
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
|
@ -752,7 +752,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
|
@ -898,6 +898,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: movl %edi, %ebx
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
|
@ -909,7 +910,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
|
@ -1364,7 +1365,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: movl %edi, %ebx
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl $0
|
||||
|
@ -2441,7 +2442,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %edi, %eax
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
|
@ -4264,6 +4265,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %rbp
|
||||
; X64-NEXT: addq %rcx, %rbx
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: adcq %rdi, %rbp
|
||||
; X64-NEXT: setb %bl
|
||||
; X64-NEXT: movzbl %bl, %ebx
|
||||
|
@ -4273,12 +4275,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, %r12
|
||||
; X64-NEXT: movq %rcx, %r8
|
||||
; X64-NEXT: movq %r11, %r12
|
||||
; X64-NEXT: movq %r11, %r8
|
||||
; X64-NEXT: addq %rax, %r12
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: movq %rdi, %r9
|
||||
; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: addq %rbp, %r12
|
||||
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -4307,7 +4309,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %rbx
|
||||
; X64-NEXT: movq 16(%rsi), %rax
|
||||
; X64-NEXT: movq %rsi, %r13
|
||||
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -4320,7 +4322,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rbx, %r11
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: movq %r8, %rbp
|
||||
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: adcq %rcx, %rax
|
||||
|
@ -4332,7 +4334,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: movq %rdx, %rax
|
||||
; X64-NEXT: movq %rdi, %r9
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: adcq %rcx, %rax
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq 32(%r13), %rax
|
||||
|
@ -4348,10 +4351,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: addq %r9, %rax
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdi, %r9
|
||||
; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
|
||||
; X64-NEXT: adcq %r15, %rax
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -4369,7 +4371,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: addq %rsi, %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: adcq $0, %rbp
|
||||
; X64-NEXT: addq %rbx, %r11
|
||||
; X64-NEXT: addq %rcx, %r11
|
||||
; X64-NEXT: adcq %rsi, %rbp
|
||||
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: setb %bl
|
||||
|
@ -4390,11 +4392,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rbx, %r10
|
||||
; X64-NEXT: movq %rcx, %rdx
|
||||
; X64-NEXT: movq %rcx, %r12
|
||||
; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %r9, %rdx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r11, %r8
|
||||
; X64-NEXT: adcq %r11, %r15
|
||||
; X64-NEXT: adcq %r8, %r15
|
||||
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq %rax, %r14
|
||||
; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -4490,12 +4492,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %r12
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %r10, %rbp
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rsi, %rbx
|
||||
|
@ -4522,7 +4525,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r15
|
||||
; X64-NEXT: adcq $0, %r12
|
||||
; X64-NEXT: movq %r10, %rbx
|
||||
; X64-NEXT: movq %r10, %rax
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
|
@ -4539,7 +4542,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %rbx
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: addq %rbp, %r8
|
||||
|
@ -4570,7 +4573,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r11, %rsi
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, %r13
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
|
||||
|
@ -4650,12 +4653,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %r10
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %r11, %rbp
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rdi, %rbx
|
||||
|
@ -4785,7 +4789,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %r14
|
||||
; X64-NEXT: movq %r8, %rbp
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: movq %rdx, %rbx
|
||||
|
@ -4845,7 +4849,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r9
|
||||
; X64-NEXT: adcq $0, %r10
|
||||
; X64-NEXT: movq %rbp, %rsi
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
|
@ -4902,8 +4906,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r15
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: movq %r8, %rdi
|
||||
; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdx, %r9
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: addq %rbx, %r8
|
||||
|
@ -4986,12 +4990,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rcx, %r14
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %r10, %rdi
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %r11, %rbx
|
||||
|
@ -5019,7 +5024,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq $0, %r14
|
||||
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: movq %r13, %rbx
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r8
|
||||
|
@ -5032,7 +5038,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rax, %rcx
|
||||
; X64-NEXT: addq %r8, %rcx
|
||||
; X64-NEXT: adcq $0, %rsi
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
|
||||
; X64-NEXT: mulq %r13
|
||||
; X64-NEXT: movq %rdx, %rbx
|
||||
|
@ -5066,12 +5072,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %r10, %rsi
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %rdi
|
||||
; X64-NEXT: addq %rcx, %rdi
|
||||
|
@ -5147,7 +5154,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r10
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %rdi
|
||||
; X64-NEXT: addq %rsi, %rdi
|
||||
|
@ -5159,16 +5166,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
|
||||
; X64-NEXT: addq %rax, %r12
|
||||
; X64-NEXT: addq %rbx, %r12
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
|
||||
; X64-NEXT: adcq %rdx, %r15
|
||||
; X64-NEXT: adcq %r14, %r15
|
||||
; X64-NEXT: addq %rdi, %r12
|
||||
; X64-NEXT: adcq %rcx, %r15
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r11, %rsi
|
||||
; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
|
||||
|
@ -5232,7 +5239,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
|
||||
; X64-NEXT: addq %rax, %rbp
|
||||
; X64-NEXT: addq %r9, %rbp
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
|
||||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: addq %rsi, %rbp
|
||||
|
@ -5410,7 +5417,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq 88(%rsi), %rax
|
||||
; X64-NEXT: movq %rsi, %r9
|
||||
; X64-NEXT: movq %rax, %rsi
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
|
@ -5446,12 +5453,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %r8, %r10
|
||||
; X64-NEXT: addq %rbx, %rsi
|
||||
; X64-NEXT: adcq %rbp, %r10
|
||||
; X64-NEXT: movq 64(%r9), %r13
|
||||
; X64-NEXT: movq %r9, %rdi
|
||||
; X64-NEXT: movq 64(%rdi), %r13
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq 72(%r9), %r9
|
||||
; X64-NEXT: movq 72(%rdi), %r9
|
||||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
|
@ -5479,8 +5487,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, %r15
|
||||
; X64-NEXT: movq %r12, %rcx
|
||||
; X64-NEXT: addq %rax, %rcx
|
||||
; X64-NEXT: adcq %rdx, %r8
|
||||
; X64-NEXT: addq %r15, %rcx
|
||||
; X64-NEXT: adcq %r11, %r8
|
||||
; X64-NEXT: addq %rbp, %rcx
|
||||
; X64-NEXT: adcq %rbx, %r8
|
||||
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
|
||||
|
@ -5532,13 +5540,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: setb %r10b
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %r8, %rdi
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %r8, %r12
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdi, %r12
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rcx, %rbx
|
||||
|
@ -5577,7 +5586,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: imulq %rcx, %rdi
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r12, %rsi
|
||||
; X64-NEXT: mulq %r12
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: addq %rdi, %rdx
|
||||
; X64-NEXT: movq 104(%rbp), %r8
|
||||
|
|
|
@ -909,7 +909,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq 8(%rsi), %rbp
|
||||
; X64-NEXT: movq %r15, %rax
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %rdx, %r9
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: movq %r11, %rax
|
||||
|
@ -932,24 +932,23 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq %r11, %rax
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rbp, %r14
|
||||
; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %rbp
|
||||
; X64-NEXT: addq %rcx, %rbp
|
||||
; X64-NEXT: adcq %rbx, %rsi
|
||||
; X64-NEXT: xorl %ecx, %ecx
|
||||
; X64-NEXT: movq %r10, %rbx
|
||||
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r10, %rax
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r13
|
||||
; X64-NEXT: movq %rax, %r10
|
||||
; X64-NEXT: movq %r15, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: # kill: %RAX<kill>
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %r15
|
||||
; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %r10, %r15
|
||||
; X64-NEXT: adcq %r13, %rdx
|
||||
; X64-NEXT: addq %rbp, %r15
|
||||
|
@ -988,8 +987,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
; X64-NEXT: movq %rax, %r11
|
||||
; X64-NEXT: addq %rax, %r10
|
||||
; X64-NEXT: adcq %rdx, %r13
|
||||
; X64-NEXT: addq %r11, %r10
|
||||
; X64-NEXT: adcq %r14, %r13
|
||||
; X64-NEXT: addq %rbp, %r10
|
||||
; X64-NEXT: adcq %rsi, %r13
|
||||
; X64-NEXT: addq %r8, %r10
|
||||
|
@ -1001,7 +1000,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq 16(%rsi), %r8
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %rcx, %r9
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %r12
|
||||
|
@ -1032,7 +1031,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %rbp
|
||||
; X64-NEXT: addq %rax, %r11
|
||||
; X64-NEXT: addq %rbp, %r11
|
||||
; X64-NEXT: adcq %rdx, %r14
|
||||
; X64-NEXT: addq %r9, %r11
|
||||
; X64-NEXT: adcq %rbx, %r14
|
||||
|
|
|
@ -7,7 +7,7 @@ define i128 @foo(i128 %t, i128 %u) {
|
|||
; X64-NEXT: movq %rdx, %r8
|
||||
; X64-NEXT: imulq %rdi, %rcx
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: addq %rcx, %rdx
|
||||
; X64-NEXT: imulq %r8, %rsi
|
||||
; X64-NEXT: addq %rsi, %rdx
|
||||
|
|
|
@ -9,7 +9,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v16i8c:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm1
|
||||
|
@ -143,10 +143,10 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v16i8:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm3
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -386,7 +386,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v32i8c:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
@ -398,7 +398,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm4, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: pand %xmm4, %xmm2
|
||||
|
@ -567,10 +567,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v32i8:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -583,10 +583,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm4, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm5
|
||||
; SSE2-NEXT: pand %xmm4, %xmm5
|
||||
|
@ -774,7 +774,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v64i8c:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
|
@ -786,7 +786,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -796,7 +796,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm1
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -806,7 +806,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -821,7 +821,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE41: # BB#0: # %entry
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
||||
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
|
||||
; SSE41-NEXT: pmullw %xmm6, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -939,10 +939,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v64i8:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm4, %xmm8
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm9
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm9
|
||||
; SSE2-NEXT: pmullw %xmm8, %xmm9
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -955,10 +955,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm9, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm9
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: pmullw %xmm9, %xmm4
|
||||
; SSE2-NEXT: pand %xmm8, %xmm4
|
||||
|
@ -970,10 +970,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm1
|
||||
; SSE2-NEXT: packuswb %xmm4, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: pand %xmm8, %xmm5
|
||||
|
@ -985,10 +985,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm5, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: pand %xmm8, %xmm5
|
||||
|
@ -1006,7 +1006,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm1, %xmm8
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
|
||||
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
||||
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmullw %xmm9, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm9, %xmm0
|
||||
|
|
|
@ -5,7 +5,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
|
|||
; CHECK-LABEL: pow_wrapper:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movapd %xmm0, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm0, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm0
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm0
|
||||
|
|
|
@ -25,7 +25,7 @@ define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
|
|||
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
|
||||
; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; SSE-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
|
||||
; SSE-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
|
|
|
@ -49,16 +49,16 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
|
|||
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3]
|
||||
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
|
||||
; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
|
||||
; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9
|
||||
; CHECK-NEXT: vmovaps %xmm15, %xmm1
|
||||
; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
|
||||
; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8
|
||||
; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
|
||||
; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
|
||||
; CHECK-NEXT: vmovaps %xmm15, %xmm1
|
||||
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: callq foo
|
||||
|
|
|
@ -638,7 +638,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
|
|||
; SSE41-LABEL: test14:
|
||||
; SSE41: ## BB#0: ## %vector.ph
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
|
||||
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
||||
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]
|
||||
|
|
|
@ -61,7 +61,7 @@ false:
|
|||
|
||||
; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
|
||||
; CHECK: movl %ecx, %eax
|
||||
; CHECK: cmpl %edx, %ecx
|
||||
; CHECK: cmpl %edx, %eax
|
||||
; CHECK: jge LBB1_2
|
||||
; CHECK: pushl %eax
|
||||
; CHECK: movl $4092, %eax
|
||||
|
|
|
@ -132,7 +132,7 @@ define float @f32_estimate(float %x) #1 {
|
|||
; SSE: # BB#0:
|
||||
; SSE-NEXT: rsqrtss %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm2, %xmm2
|
||||
; SSE-NEXT: mulss %xmm0, %xmm2
|
||||
; SSE-NEXT: addss {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
|
||||
|
@ -178,7 +178,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
|
|||
; SSE: # BB#0:
|
||||
; SSE-NEXT: rsqrtps %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulps %xmm2, %xmm2
|
||||
; SSE-NEXT: mulps %xmm0, %xmm2
|
||||
; SSE-NEXT: addps {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
|
||||
|
@ -228,7 +228,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
|
|||
; SSE-NEXT: rsqrtps %xmm0, %xmm3
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm2
|
||||
; SSE-NEXT: mulps %xmm3, %xmm2
|
||||
; SSE-NEXT: mulps %xmm2, %xmm2
|
||||
; SSE-NEXT: mulps %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
|
||||
; SSE-NEXT: addps %xmm0, %xmm2
|
||||
|
@ -236,7 +236,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
|
|||
; SSE-NEXT: mulps %xmm3, %xmm2
|
||||
; SSE-NEXT: rsqrtps %xmm1, %xmm5
|
||||
; SSE-NEXT: movaps %xmm5, %xmm3
|
||||
; SSE-NEXT: mulps %xmm5, %xmm3
|
||||
; SSE-NEXT: mulps %xmm3, %xmm3
|
||||
; SSE-NEXT: mulps %xmm1, %xmm3
|
||||
; SSE-NEXT: addps %xmm0, %xmm3
|
||||
; SSE-NEXT: mulps %xmm4, %xmm3
|
||||
|
|
|
@ -16,7 +16,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
|
|||
; X32-LABEL: test4:
|
||||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: movaps %xmm0, %xmm2
|
||||
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
|
||||
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
||||
; X32-NEXT: addss %xmm1, %xmm0
|
||||
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; X32-NEXT: subss %xmm1, %xmm2
|
||||
|
@ -26,7 +26,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
|
|||
; X64-LABEL: test4:
|
||||
; X64: # BB#0: # %entry
|
||||
; X64-NEXT: movaps %xmm0, %xmm2
|
||||
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
|
||||
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
||||
; X64-NEXT: addss %xmm1, %xmm0
|
||||
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; X64-NEXT: subss %xmm1, %xmm2
|
||||
|
|
|
@ -406,9 +406,9 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
|
|||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: subss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm4
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
|
||||
; SSE-NEXT: subss %xmm4, %xmm3
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: addss %xmm0, %xmm4
|
||||
|
|
|
@ -126,7 +126,7 @@ define void @test6(i32 %a) gc "statepoint-example" {
|
|||
; CHECK-NEXT: Lcfi11:
|
||||
; CHECK-NEXT: .cfi_offset %rbx, -16
|
||||
; CHECK-NEXT: movl %edi, %ebx
|
||||
; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: callq _baz
|
||||
; CHECK-NEXT: Ltmp6:
|
||||
; CHECK-NEXT: callq _bar
|
||||
|
@ -153,13 +153,13 @@ entry:
|
|||
; CHECK: .byte 1
|
||||
; CHECK-NEXT: .byte 0
|
||||
; CHECK-NEXT: .short 4
|
||||
; CHECK-NEXT: .short 5
|
||||
; CHECK-NEXT: .short 6
|
||||
; CHECK-NEXT: .short 0
|
||||
; CHECK-NEXT: .long 0
|
||||
; CHECK: .byte 1
|
||||
; CHECK-NEXT: .byte 0
|
||||
; CHECK-NEXT: .short 4
|
||||
; CHECK-NEXT: .short 4
|
||||
; CHECK-NEXT: .short 3
|
||||
; CHECK-NEXT: .short 0
|
||||
; CHECK-NEXT: .long 0
|
||||
; CHECK: Ltmp2-_test2
|
||||
|
|
|
@ -61,9 +61,9 @@ define i32 @back_to_back_deopt(i32 %a, i32 %b, i32 %c) #1
|
|||
gc "statepoint-example" {
|
||||
; CHECK-LABEL: back_to_back_deopt
|
||||
; The exact stores don't matter, but there need to be three stack slots created
|
||||
; CHECK-DAG: movl %edi, 12(%rsp)
|
||||
; CHECK-DAG: movl %esi, 8(%rsp)
|
||||
; CHECK-DAG: movl %edx, 4(%rsp)
|
||||
; CHECK-DAG: movl %ebx, 12(%rsp)
|
||||
; CHECK-DAG: movl %ebp, 8(%rsp)
|
||||
; CHECK-DAG: movl %r14d, 4(%rsp)
|
||||
; CHECK: callq
|
||||
; CHECK-DAG: movl %ebx, 12(%rsp)
|
||||
; CHECK-DAG: movl %ebp, 8(%rsp)
|
||||
|
|
|
@ -1018,12 +1018,12 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm1
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
|
@ -1126,12 +1126,12 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm1
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
|
@ -1316,11 +1316,11 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
|
|||
; SSE-LABEL: fptoui_4f32_to_4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: cvttss2si %xmm2, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm2
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
|
@ -1560,7 +1560,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm0
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: cvttss2si %xmm3, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
||||
|
@ -1572,11 +1572,11 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
|
|||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm2, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: cvttss2si %xmm3, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
|
@ -1687,7 +1687,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cmovaeq %rcx, %rdx
|
||||
; SSE-NEXT: movq %rdx, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1698,7 +1698,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: movq %rdx, %xmm3
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1865,7 +1865,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cmovaeq %rcx, %rdx
|
||||
; SSE-NEXT: movq %rdx, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1876,7 +1876,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: movq %rdx, %xmm3
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
|
|
@ -1611,7 +1611,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
|
|||
; SSE-LABEL: uitofp_2i64_to_4f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: movq %xmm0, %rax
|
||||
; SSE-NEXT: movq %xmm1, %rax
|
||||
; SSE-NEXT: testq %rax, %rax
|
||||
; SSE-NEXT: js .LBB39_1
|
||||
; SSE-NEXT: # BB#2:
|
||||
|
@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
|
|||
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: movq %xmm0, %rax
|
||||
; SSE-NEXT: movq %xmm1, %rax
|
||||
; SSE-NEXT: testq %rax, %rax
|
||||
; SSE-NEXT: js .LBB41_1
|
||||
; SSE-NEXT: # BB#2:
|
||||
|
|
|
@ -437,7 +437,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
|
|||
; SSE42: # BB#0:
|
||||
; SSE42-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE42-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
|
||||
; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
|
||||
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
||||
|
|
|
@ -35,7 +35,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: movdqa %xmm0, %xmm2
|
||||
; X32-NEXT: psllw $5, %xmm1
|
||||
; X32-NEXT: movdqa %xmm0, %xmm3
|
||||
; X32-NEXT: movdqa %xmm2, %xmm3
|
||||
; X32-NEXT: psllw $4, %xmm3
|
||||
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
|
||||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -47,7 +47,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; X32-NEXT: movdqa %xmm2, %xmm3
|
||||
; X32-NEXT: paddb %xmm2, %xmm3
|
||||
; X32-NEXT: paddb %xmm3, %xmm3
|
||||
; X32-NEXT: paddb %xmm1, %xmm1
|
||||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
|
@ -58,7 +58,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X64: # BB#0: # %entry
|
||||
; X64-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-NEXT: psllw $5, %xmm1
|
||||
; X64-NEXT: movdqa %xmm0, %xmm3
|
||||
; X64-NEXT: movdqa %xmm2, %xmm3
|
||||
; X64-NEXT: psllw $4, %xmm3
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -70,7 +70,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; X64-NEXT: movdqa %xmm2, %xmm3
|
||||
; X64-NEXT: paddb %xmm2, %xmm3
|
||||
; X64-NEXT: paddb %xmm3, %xmm3
|
||||
; X64-NEXT: paddb %xmm1, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
|
|
|
@ -992,7 +992,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
|
|||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: psrad $31, %xmm1
|
||||
; SSE41-NEXT: pxor %xmm3, %xmm3
|
||||
; SSE41-NEXT: psubd %xmm0, %xmm3
|
||||
; SSE41-NEXT: psubd %xmm2, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
|
||||
; SSE41-NEXT: movaps %xmm3, %xmm0
|
||||
|
|
|
@ -176,13 +176,13 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-LABEL: test_div7_16i8:
|
||||
; SSE2: # BB#0:
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
||||
; SSE2-NEXT: psrlw $8, %xmm1
|
||||
|
@ -482,13 +482,13 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-LABEL: test_rem7_16i8:
|
||||
; SSE2: # BB#0:
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
||||
; SSE2-NEXT: psrlw $8, %xmm1
|
||||
|
@ -504,7 +504,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: paddb %xmm2, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
|
|
@ -481,7 +481,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-NEXT: psrlw $2, %xmm1
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
|
|
@ -361,7 +361,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm1
|
||||
; SSE41-NEXT: por %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm4
|
||||
; SSE41-NEXT: paddw %xmm4, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm6
|
||||
; SSE41-NEXT: psllw $8, %xmm6
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm5
|
||||
|
@ -386,7 +386,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm2
|
||||
; SSE41-NEXT: por %xmm0, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
||||
; SSE41-NEXT: paddw %xmm2, %xmm1
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE41-NEXT: psrlw $8, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
||||
|
@ -631,10 +631,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; SSE41-NEXT: psubb %xmm3, %xmm2
|
||||
; SSE41-NEXT: psllw $5, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE41-NEXT: psllw $4, %xmm5
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
||||
|
@ -644,13 +644,13 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm4, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm5, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: psllw $5, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE41-NEXT: psrlw $4, %xmm5
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
|
||||
|
@ -1191,7 +1191,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-LABEL: constant_rotate_v16i8:
|
||||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: psllw $4, %xmm3
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
|
||||
|
@ -1203,7 +1203,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
|
|
|
@ -243,7 +243,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
|
|||
; SSSE3-LABEL: sext_16i8_to_8i32:
|
||||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSSE3-NEXT: psrad $24, %xmm0
|
||||
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7]
|
||||
|
@ -312,7 +312,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
|
|||
; SSSE3-LABEL: sext_16i8_to_16i32:
|
||||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSSE3-NEXT: psrad $24, %xmm0
|
||||
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
|
||||
|
@ -443,7 +443,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
|
|||
; SSSE3-LABEL: sext_16i8_to_4i64:
|
||||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSSE3-NEXT: psrad $31, %xmm2
|
||||
|
@ -499,7 +499,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
|
|||
; SSE2-LABEL: sext_16i8_to_8i64:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: psrad $31, %xmm2
|
||||
|
@ -1112,7 +1112,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
|
|||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: psrad $31, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
|
@ -1131,7 +1131,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
|
|||
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSSE3-NEXT: psrad $31, %xmm3
|
||||
; SSSE3-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSSE3-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSSE3-NEXT: psrad $31, %xmm4
|
||||
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
||||
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
|
|
|
@ -274,7 +274,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm1
|
||||
; SSE41-NEXT: por %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE41-NEXT: psraw $8, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
|
|
@ -245,7 +245,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm1
|
||||
; SSE41-NEXT: por %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE41-NEXT: psrlw $8, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -407,7 +407,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: psllw $5, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: psrlw $4, %xmm3
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -679,7 +679,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: pshufb %xmm0, %xmm1
|
||||
; SSE41-NEXT: psllw $5, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE41-NEXT: psrlw $4, %xmm4
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
|
||||
|
@ -1101,7 +1101,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-LABEL: constant_shift_v16i8:
|
||||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE41-NEXT: psrlw $4, %xmm2
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
|
|
|
@ -202,7 +202,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm1
|
||||
; SSE41-NEXT: por %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddw %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE41-NEXT: psllw $8, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -361,7 +361,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: psllw $5, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: psllw $4, %xmm3
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -373,7 +373,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
|
@ -627,7 +627,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: pshufb %xmm0, %xmm1
|
||||
; SSE41-NEXT: psllw $5, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE41-NEXT: psllw $4, %xmm4
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
|
||||
|
@ -639,7 +639,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm1
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm1
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
|
||||
|
@ -957,7 +957,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-LABEL: constant_shift_v16i8:
|
||||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE41-NEXT: psllw $4, %xmm2
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
|
@ -968,7 +968,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm2
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm2
|
||||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
|
|
|
@ -2792,7 +2792,7 @@ define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE-LABEL: PR22377:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
|
||||
; SSE-NEXT: addps %xmm0, %xmm1
|
||||
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
|
|
|
@ -4964,7 +4964,7 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
|
|||
; SSE-LABEL: mul_add_const_v4i64_v4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
|
||||
|
|
|
@ -246,7 +246,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
|
|||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm4, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
|
||||
|
@ -261,7 +261,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
|
|||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSSE3-NEXT: pxor %xmm4, %xmm4
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSSE3-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
||||
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
|
||||
|
@ -399,7 +399,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
|
|||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: pxor %xmm4, %xmm4
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -700,7 +700,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
|
|||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm4, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
||||
|
@ -715,7 +715,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
|
|||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSSE3-NEXT: pxor %xmm4, %xmm4
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSSE3-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
|
||||
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
||||
|
@ -1582,7 +1582,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
|
|||
; SSE41: # BB#0: # %entry
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
||||
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
||||
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
|
@ -1630,7 +1630,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
|
|||
; SSE41: # BB#0: # %entry
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
||||
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -3344,12 +3344,12 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
|
|||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm12
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm12
|
||||
; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
|
||||
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm9, %xmm14
|
||||
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm14, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm2
|
||||
|
@ -3487,12 +3487,12 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
|
|||
; SSE2-NEXT: movdqa %xmm2, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm10
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm12
|
||||
; SSE2-NEXT: pcmpgtb %xmm3, %xmm12
|
||||
; SSE2-NEXT: pcmpgtb %xmm8, %xmm12
|
||||
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
||||
; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
|
||||
; SSE2-NEXT: pcmpgtb %xmm9, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm14
|
||||
|
@ -4225,12 +4225,12 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
|
|||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm12
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm12
|
||||
; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
|
||||
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm9, %xmm14
|
||||
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm14, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm2
|
||||
|
@ -4368,12 +4368,12 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
|
|||
; SSE2-NEXT: movdqa %xmm2, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm10
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm12
|
||||
; SSE2-NEXT: pcmpgtd %xmm3, %xmm12
|
||||
; SSE2-NEXT: pcmpgtd %xmm8, %xmm12
|
||||
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm3
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
||||
; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
|
||||
; SSE2-NEXT: pcmpgtd %xmm9, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm14
|
||||
|
@ -4890,7 +4890,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test122:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -5164,7 +5164,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test124:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm11
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -5467,7 +5467,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test126:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -5795,7 +5795,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test128:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm11
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -6047,7 +6047,7 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
|
|||
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm9
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm14
|
||||
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm14, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm2
|
||||
|
@ -6190,7 +6190,7 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
|
|||
; SSE2-NEXT: movdqa %xmm12, %xmm9
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
||||
; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
|
||||
; SSE2-NEXT: pcmpgtb %xmm8, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm14
|
||||
|
@ -6941,7 +6941,7 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
|
|||
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm9
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm14
|
||||
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
|
||||
; SSE2-NEXT: movdqa %xmm14, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm13, %xmm2
|
||||
|
@ -7084,7 +7084,7 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
|
|||
; SSE2-NEXT: movdqa %xmm12, %xmm9
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
||||
; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
|
||||
; SSE2-NEXT: pcmpgtd %xmm8, %xmm13
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm14
|
||||
|
@ -7610,7 +7610,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test154:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -7882,7 +7882,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test156:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm11
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -8183,7 +8183,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test158:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -8509,7 +8509,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
|
|||
; SSE2-LABEL: test160:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm11
|
||||
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
|
@ -10289,7 +10289,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
|
|||
; SSE4: # BB#0: # %entry
|
||||
; SSE4-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE4-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
|
||||
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
|
||||
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE4-NEXT: pxor %xmm3, %xmm0
|
||||
; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
||||
|
@ -10768,7 +10768,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
|
|||
; SSE4: # BB#0: # %entry
|
||||
; SSE4-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE4-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
|
||||
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
|
||||
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE4-NEXT: pxor %xmm3, %xmm0
|
||||
; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||||
|
|
|
@ -74,7 +74,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
|
|||
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||
; X86-SSE2-NEXT: movss %xmm0, (%eax)
|
||||
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
|
||||
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
||||
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
|
||||
|
|
|
@ -19,7 +19,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin
|
|||
; X86-SSE2-NEXT: movups %xmm0, (%eax)
|
||||
; X86-SSE2-NEXT: movss %xmm2, 16(%eax)
|
||||
; X86-SSE2-NEXT: movaps %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1]
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; X86-SSE2-NEXT: movss %xmm0, 24(%eax)
|
||||
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
||||
; X86-SSE2-NEXT: movss %xmm2, 20(%eax)
|
||||
|
@ -100,7 +100,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
|
|||
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
|
||||
; X86-SSE2-NEXT: movss %xmm0, (%eax)
|
||||
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
|
||||
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
||||
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
|
||||
|
|
|
@ -23,7 +23,7 @@ target triple = "x86_64-apple-macosx"
|
|||
; Compare the arguments and jump to exit.
|
||||
; After the prologue is set.
|
||||
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
|
||||
; CHECK-NEXT: cmpl %esi, %edi
|
||||
; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
|
||||
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
|
||||
;
|
||||
; Store %a in the alloca.
|
||||
|
@ -69,7 +69,7 @@ attributes #0 = { "no-frame-pointer-elim"="false" }
|
|||
; Compare the arguments and jump to exit.
|
||||
; After the prologue is set.
|
||||
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
|
||||
; CHECK-NEXT: cmpl %esi, %edi
|
||||
; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
|
||||
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
|
||||
;
|
||||
; Prologue code.
|
||||
|
@ -115,7 +115,7 @@ attributes #1 = { "no-frame-pointer-elim"="true" }
|
|||
; Compare the arguments and jump to exit.
|
||||
; After the prologue is set.
|
||||
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
|
||||
; CHECK-NEXT: cmpl %esi, %edi
|
||||
; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
|
||||
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
|
||||
;
|
||||
; Prologue code.
|
||||
|
|
|
@ -17,7 +17,7 @@ target triple = "x86_64-apple-macosx"
|
|||
; Compare the arguments and jump to exit.
|
||||
; No prologue needed.
|
||||
; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
|
||||
; ENABLE-NEXT: cmpl %esi, %edi
|
||||
; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]]
|
||||
; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
|
||||
;
|
||||
; Prologue code.
|
||||
|
@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx"
|
|||
; Compare the arguments and jump to exit.
|
||||
; After the prologue is set.
|
||||
; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
|
||||
; DISABLE-NEXT: cmpl %esi, %edi
|
||||
; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]]
|
||||
; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
|
||||
;
|
||||
; Store %a in the alloca.
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
; CHECK: .debug_loc contents:
|
||||
; CHECK-NEXT: 0x00000000:
|
||||
; CHECK-NEXT: 0x000000000000001f - 0x000000000000005a: DW_OP_reg3 RBX
|
||||
; CHECK-NEXT: 0x000000000000001f - 0x000000000000003c: DW_OP_reg3 RBX
|
||||
; We should only have one entry
|
||||
; CHECK-NOT: :
|
||||
|
||||
|
|
Loading…
Reference in New Issue