forked from OSchip/llvm-project
[MachineCopyPropagation] Extend pass to do COPY source forwarding
Summary: This change extends MachineCopyPropagation to do COPY source forwarding and adds an additional run of the pass to the default pass pipeline just after register allocation. This version of this patch uses the newly added MachineOperand::isRenamable bit to avoid forwarding registers is such a way as to violate constraints that aren't captured in the Machine IR (e.g. ABI or ISA constraints). This change is a continuation of the work started in D30751. Reviewers: qcolombet, javed.absar, MatzeB, jonpa, tstellar Subscribers: tpr, mgorny, mcrosier, nhaehnle, nemanjai, jyknight, hfinkel, arsenm, inouehrs, eraman, sdardis, guyblank, fedor.sergeev, aheejin, dschuff, jfb, myatsina, llvm-commits Differential Revision: https://reviews.llvm.org/D41835 llvm-svn: 323991
This commit is contained in:
parent
a95bd9f724
commit
94503c7bc3
|
@ -9,6 +9,35 @@
|
|||
//
|
||||
// This is an extremely simple MachineInstr-level copy propagation pass.
|
||||
//
|
||||
// This pass forwards the source of COPYs to the users of their destinations
|
||||
// when doing so is legal. For example:
|
||||
//
|
||||
// %reg1 = COPY %reg0
|
||||
// ...
|
||||
// ... = OP %reg1
|
||||
//
|
||||
// If
|
||||
// - %reg0 has not been clobbered by the time of the use of %reg1
|
||||
// - the register class constraints are satisfied
|
||||
// - the COPY def is the only value that reaches OP
|
||||
// then this pass replaces the above with:
|
||||
//
|
||||
// %reg1 = COPY %reg0
|
||||
// ...
|
||||
// ... = OP %reg0
|
||||
//
|
||||
// This pass also removes some redundant COPYs. For example:
|
||||
//
|
||||
// %R1 = COPY %R0
|
||||
// ... // No clobber of %R1
|
||||
// %R0 = COPY %R1 <<< Removed
|
||||
//
|
||||
// or
|
||||
//
|
||||
// %R1 = COPY %R0
|
||||
// ... // No clobber of %R0
|
||||
// %R1 = COPY %R0 <<< Removed
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
|
@ -23,11 +52,13 @@
|
|||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/CodeGen/MachineOperand.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/CodeGen/TargetInstrInfo.h"
|
||||
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
||||
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
||||
#include "llvm/MC/MCRegisterInfo.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/DebugCounter.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <cassert>
|
||||
#include <iterator>
|
||||
|
@ -37,6 +68,9 @@ using namespace llvm;
|
|||
#define DEBUG_TYPE "machine-cp"
|
||||
|
||||
STATISTIC(NumDeletes, "Number of dead copies deleted");
|
||||
STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
|
||||
DEBUG_COUNTER(FwdCounter, "machine-cp-fwd",
|
||||
"Controls which register COPYs are forwarded");
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -73,6 +107,10 @@ using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
|
|||
void ReadRegister(unsigned Reg);
|
||||
void CopyPropagateBlock(MachineBasicBlock &MBB);
|
||||
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
|
||||
void forwardUses(MachineInstr &MI);
|
||||
bool isForwardableRegClassCopy(const MachineInstr &Copy,
|
||||
const MachineInstr &UseI, unsigned UseIdx);
|
||||
bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
|
||||
|
||||
/// Candidates for deletion.
|
||||
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;
|
||||
|
@ -208,6 +246,152 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src,
|
|||
return true;
|
||||
}
|
||||
|
||||
/// Decide whether we should forward the source of \param Copy to its use in
|
||||
/// \param UseI based on the physical register class constraints of the opcode
|
||||
/// and avoiding introducing more cross-class COPYs.
|
||||
bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy,
|
||||
const MachineInstr &UseI,
|
||||
unsigned UseIdx) {
|
||||
|
||||
unsigned CopySrcReg = Copy.getOperand(1).getReg();
|
||||
|
||||
// If the new register meets the opcode register constraints, then allow
|
||||
// forwarding.
|
||||
if (const TargetRegisterClass *URC =
|
||||
UseI.getRegClassConstraint(UseIdx, TII, TRI))
|
||||
return URC->contains(CopySrcReg);
|
||||
|
||||
if (!UseI.isCopy())
|
||||
return false;
|
||||
|
||||
/// COPYs don't have register class constraints, so if the user instruction
|
||||
/// is a COPY, we just try to avoid introducing additional cross-class
|
||||
/// COPYs. For example:
|
||||
///
|
||||
/// RegClassA = COPY RegClassB // Copy parameter
|
||||
/// ...
|
||||
/// RegClassB = COPY RegClassA // UseI parameter
|
||||
///
|
||||
/// which after forwarding becomes
|
||||
///
|
||||
/// RegClassA = COPY RegClassB
|
||||
/// ...
|
||||
/// RegClassB = COPY RegClassB
|
||||
///
|
||||
/// so we have reduced the number of cross-class COPYs and potentially
|
||||
/// introduced a nop COPY that can be removed.
|
||||
const TargetRegisterClass *UseDstRC =
|
||||
TRI->getMinimalPhysRegClass(UseI.getOperand(0).getReg());
|
||||
|
||||
const TargetRegisterClass *SuperRC = UseDstRC;
|
||||
for (TargetRegisterClass::sc_iterator SuperRCI = UseDstRC->getSuperClasses();
|
||||
SuperRC; SuperRC = *SuperRCI++)
|
||||
if (SuperRC->contains(CopySrcReg))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check that \p MI does not have implicit uses that overlap with it's \p Use
|
||||
/// operand (the register being replaced), since these can sometimes be
|
||||
/// implicitly tied to other operands. For example, on AMDGPU:
|
||||
///
|
||||
/// V_MOVRELS_B32_e32 %VGPR2, %M0<imp-use>, %EXEC<imp-use>, %VGPR2_VGPR3_VGPR4_VGPR5<imp-use>
|
||||
///
|
||||
/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
|
||||
/// way of knowing we need to update the latter when updating the former.
|
||||
bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
|
||||
const MachineOperand &Use) {
|
||||
for (const MachineOperand &MIUse : MI.uses())
|
||||
if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
|
||||
MIUse.isUse() && TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Look for available copies whose destination register is used by \p MI and
|
||||
/// replace the use in \p MI with the copy's source register.
|
||||
void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
|
||||
if (AvailCopyMap.empty())
|
||||
return;
|
||||
|
||||
// Look for non-tied explicit vreg uses that have an active COPY
|
||||
// instruction that defines the physical register allocated to them.
|
||||
// Replace the vreg with the source of the active COPY.
|
||||
for (unsigned OpIdx = 0, OpEnd = MI.getNumOperands(); OpIdx < OpEnd;
|
||||
++OpIdx) {
|
||||
MachineOperand &MOUse = MI.getOperand(OpIdx);
|
||||
// Don't forward into undef use operands since doing so can cause problems
|
||||
// with the machine verifier, since it doesn't treat undef reads as reads,
|
||||
// so we can end up with a live range that ends on an undef read, leading to
|
||||
// an error that the live range doesn't end on a read of the live range
|
||||
// register.
|
||||
if (!MOUse.isReg() || MOUse.isTied() || MOUse.isUndef() || MOUse.isDef() ||
|
||||
MOUse.isImplicit())
|
||||
continue;
|
||||
|
||||
if (!MOUse.getReg())
|
||||
continue;
|
||||
|
||||
// Check that the register is marked 'renamable' so we know it is safe to
|
||||
// rename it without violating any constraints that aren't expressed in the
|
||||
// IR (e.g. ABI or opcode requirements).
|
||||
if (!MOUse.isRenamable())
|
||||
continue;
|
||||
|
||||
auto CI = AvailCopyMap.find(MOUse.getReg());
|
||||
if (CI == AvailCopyMap.end())
|
||||
continue;
|
||||
|
||||
MachineInstr &Copy = *CI->second;
|
||||
unsigned CopyDstReg = Copy.getOperand(0).getReg();
|
||||
const MachineOperand &CopySrc = Copy.getOperand(1);
|
||||
unsigned CopySrcReg = CopySrc.getReg();
|
||||
|
||||
// FIXME: Don't handle partial uses of wider COPYs yet.
|
||||
if (MOUse.getReg() != CopyDstReg) {
|
||||
DEBUG(dbgs() << "MCP: FIXME! Not forwarding COPY to sub-register use:\n "
|
||||
<< MI);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't forward COPYs of reserved regs unless they are constant.
|
||||
if (MRI->isReserved(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
|
||||
continue;
|
||||
|
||||
if (!isForwardableRegClassCopy(Copy, MI, OpIdx))
|
||||
continue;
|
||||
|
||||
if (hasImplicitOverlap(MI, MOUse))
|
||||
continue;
|
||||
|
||||
if (!DebugCounter::shouldExecute(FwdCounter)) {
|
||||
DEBUG(dbgs() << "MCP: Skipping forwarding due to debug counter:\n "
|
||||
<< MI);
|
||||
continue;
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "MCP: Replacing " << printReg(MOUse.getReg(), TRI)
|
||||
<< "\n with " << printReg(CopySrcReg, TRI) << "\n in "
|
||||
<< MI << " from " << Copy);
|
||||
|
||||
MOUse.setReg(CopySrcReg);
|
||||
if (!CopySrc.isRenamable())
|
||||
MOUse.setIsRenamable(false);
|
||||
|
||||
DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
|
||||
|
||||
// Clear kill markers that may have been invalidated.
|
||||
for (MachineInstr &KMI :
|
||||
make_range(Copy.getIterator(), std::next(MI.getIterator())))
|
||||
KMI.clearRegisterKills(CopySrcReg, TRI);
|
||||
|
||||
++NumCopyForwards;
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
||||
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
|
||||
|
||||
|
@ -241,6 +425,11 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
|
||||
continue;
|
||||
|
||||
forwardUses(*MI);
|
||||
|
||||
// Src may have been changed by forwardUses()
|
||||
Src = MI->getOperand(1).getReg();
|
||||
|
||||
// If Src is defined by a previous copy, the previous copy cannot be
|
||||
// eliminated.
|
||||
ReadRegister(Src);
|
||||
|
@ -292,6 +481,20 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
continue;
|
||||
}
|
||||
|
||||
// Clobber any earlyclobber regs first.
|
||||
for (const MachineOperand &MO : MI->operands())
|
||||
if (MO.isReg() && MO.isEarlyClobber()) {
|
||||
unsigned Reg = MO.getReg();
|
||||
// If we have a tied earlyclobber, that means it is also read by this
|
||||
// instruction, so we need to make sure we don't remove it as dead
|
||||
// later.
|
||||
if (MO.isTied())
|
||||
ReadRegister(Reg);
|
||||
ClobberRegister(Reg);
|
||||
}
|
||||
|
||||
forwardUses(*MI);
|
||||
|
||||
// Not a copy.
|
||||
SmallVector<unsigned, 2> Defs;
|
||||
const MachineOperand *RegMask = nullptr;
|
||||
|
@ -307,7 +510,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
|
||||
"MachineCopyPropagation should be run after register allocation!");
|
||||
|
||||
if (MO.isDef()) {
|
||||
if (MO.isDef() && !MO.isEarlyClobber()) {
|
||||
Defs.push_back(Reg);
|
||||
continue;
|
||||
} else if (MO.readsReg())
|
||||
|
@ -364,6 +567,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
|
|||
// since we don't want to trust live-in lists.
|
||||
if (MBB.succ_empty()) {
|
||||
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
|
||||
DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
|
||||
MaybeDead->dump());
|
||||
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
|
||||
MaybeDead->eraseFromParent();
|
||||
Changed = true;
|
||||
|
|
|
@ -1081,6 +1081,10 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
|
|||
// kill markers.
|
||||
addPass(&StackSlotColoringID);
|
||||
|
||||
// Copy propagate to forward register uses and try to eliminate COPYs that
|
||||
// were not coalesced.
|
||||
addPass(&MachineCopyPropagationID);
|
||||
|
||||
// Run post-ra machine LICM to hoist reloads / remats.
|
||||
//
|
||||
// FIXME: can this move into MachineLateOptimization?
|
||||
|
|
|
@ -9,7 +9,8 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: halfword:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
|
||||
; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
@ -24,7 +25,8 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: word:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
@ -39,7 +41,8 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
|
|||
; CHECK-LABEL: doubleword:
|
||||
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
|
||||
; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
|
||||
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
|
||||
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
|
||||
%shr81 = lshr i32 %xor72, 9
|
||||
%conv82 = zext i32 %shr81 to i64
|
||||
%idxprom83 = and i64 %conv82, 255
|
||||
|
|
|
@ -8,15 +8,9 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
|
|||
; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
|
||||
; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
|
||||
; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
|
||||
; Without advanced copy optimization, we end up with cross register
|
||||
; banks copies that cannot be coalesced.
|
||||
; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
|
||||
; With advanced copy optimization, we end up with just one copy
|
||||
; to insert the computed high part into the V register.
|
||||
; CHECK-OPT-NOT: fmov
|
||||
; CHECK-NOT: fmov
|
||||
; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
|
||||
; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
|
||||
; CHECK-OPT-NOT: fmov
|
||||
; CHECK-NOT: fmov
|
||||
; CHECK: mov.d v0[1], [[COPY_REG2]]
|
||||
; CHECK-NEXT: ret
|
||||
;
|
||||
|
@ -24,11 +18,9 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
|
|||
; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
|
||||
; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
|
||||
; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
|
||||
; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
|
||||
; GENERIC-OPT-NOT: fmov
|
||||
; GENERIC-NOT: fmov
|
||||
; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
|
||||
; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
|
||||
; GENERIC-OPT-NOT: fmov
|
||||
; GENERIC-NOT: fmov
|
||||
; GENERIC: mov v0.d[1], [[COPY_REG2]]
|
||||
; GENERIC-NEXT: ret
|
||||
%add = add <2 x i64> %a, %b
|
||||
|
|
|
@ -4,8 +4,10 @@
|
|||
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
|
||||
entry:
|
||||
; CHECK-LABEL: t:
|
||||
; CHECK: mov x0, [[REG1:x[0-9]+]]
|
||||
; CHECK: mov x1, [[REG2:x[0-9]+]]
|
||||
; CHECK: mov [[REG2:x[0-9]+]], x3
|
||||
; CHECK: mov [[REG1:x[0-9]+]], x2
|
||||
; CHECK: mov x0, x2
|
||||
; CHECK: mov x1, x3
|
||||
; CHECK: bl _foo
|
||||
; CHECK: mov x0, [[REG1]]
|
||||
; CHECK: mov x1, [[REG2]]
|
||||
|
|
|
@ -45,8 +45,7 @@ define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
|
|||
|
||||
; CHECK: [[FAILED]]:
|
||||
; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
|
||||
; CHECK: mov [[TMP:w[0-9]+]], wzr
|
||||
; CHECK: eor w0, [[TMP]], #0x1
|
||||
; CHECK: eor w0, wzr, #0x1
|
||||
; CHECK: ret
|
||||
|
||||
%pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
# RUN: llc -mtriple=aarch64-linux-gnu -run-pass machine-cp -o - %s | FileCheck %s
|
||||
# Tests for MachineCopyPropagation copy forwarding.
|
||||
---
|
||||
# Simple forwarding.
|
||||
# CHECK-LABEL: name: test1
|
||||
# CHECK: $x0 = SUBXri $x0, 1, 0
|
||||
name: test1
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
renamable $x1 = COPY $x0
|
||||
$x0 = SUBXri renamable $x1, 1, 0
|
||||
...
|
||||
---
|
||||
# Don't forward if not renamable.
|
||||
# CHECK-LABEL: name: test2
|
||||
# CHECK: $x0 = SUBXri $x1, 1, 0
|
||||
name: test2
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
$x1 = COPY $x0
|
||||
$x0 = SUBXri $x1, 1, 0
|
||||
...
|
||||
---
|
||||
# Don't forward reserved non-constant reg values.
|
||||
# CHECK-LABEL: name: test4
|
||||
# CHECK: $x0 = SUBXri renamable $x1, 1, 0
|
||||
name: test4
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
$sp = SUBXri $sp, 16, 0
|
||||
renamable $x1 = COPY $sp
|
||||
$x0 = SUBXri renamable $x1, 1, 0
|
||||
$sp = ADDXri $sp, 16, 0
|
||||
...
|
||||
---
|
||||
# Don't violate opcode constraints when forwarding.
|
||||
# CHECK-LABEL: name: test5
|
||||
# CHECK: $x0 = SUBXri renamable $x1, 1, 0
|
||||
name: test5
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
renamable $x1 = COPY $xzr
|
||||
$x0 = SUBXri renamable $x1, 1, 0
|
||||
...
|
||||
---
|
||||
# Test cross-class COPY forwarding.
|
||||
# CHECK-LABEL: name: test6
|
||||
# CHECK: $x2 = COPY $x0
|
||||
name: test6
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
renamable $d1 = COPY $x0
|
||||
$x2 = COPY renamable $d1
|
||||
RET_ReallyLR implicit $x2
|
||||
...
|
||||
---
|
||||
# Don't forward if there are overlapping implicit operands.
|
||||
# CHECK-LABEL: name: test7
|
||||
# CHECK: $w0 = SUBWri killed renamable $w1, 1, 0
|
||||
name: test7
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $w0
|
||||
renamable $w1 = COPY $w0
|
||||
$w0 = SUBWri killed renamable $w1, 1, 0, implicit killed $x1
|
||||
...
|
||||
---
|
||||
# Check that kill flags are cleared.
|
||||
# CHECK-LABEL: name: test8
|
||||
# CHECK: $x2 = ADDXri $x0, 1, 0
|
||||
# CHECK: $x0 = SUBXri $x0, 1, 0
|
||||
name: test8
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
renamable $x1 = COPY $x0
|
||||
$x2 = ADDXri killed $x0, 1, 0
|
||||
$x0 = SUBXri renamable $x1, 1, 0
|
||||
...
|
||||
---
|
||||
# Don't forward if value is clobbered.
|
||||
# CHECK-LABEL: name: test9
|
||||
# CHECK: $x2 = SUBXri renamable $x1, 1, 0
|
||||
name: test9
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $x0
|
||||
renamable $x1 = COPY $x0
|
||||
$x0 = ADDXri $x0, 1, 0
|
||||
$x2 = SUBXri renamable $x1, 1, 0
|
||||
...
|
|
@ -489,7 +489,7 @@ else:
|
|||
|
||||
; CHECK-COMMON-LABEL: test_phi:
|
||||
; CHECK-COMMON: mov x[[PTR:[0-9]+]], x0
|
||||
; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x[[PTR]]]
|
||||
; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x0]
|
||||
; CHECK-COMMON: [[LOOP:LBB[0-9_]+]]:
|
||||
; CHECK-COMMON: mov.16b v[[R:[0-9]+]], v[[AB]]
|
||||
; CHECK-COMMON: ldr h[[AB]], [x[[PTR]]]
|
||||
|
|
|
@ -17,6 +17,9 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
|
|||
%val = zext i1 %test to i32
|
||||
; CHECK: cset {{[xw][0-9]+}}, ne
|
||||
|
||||
; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
|
||||
; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
|
||||
|
||||
store i32 %val, i32* @var
|
||||
|
||||
call void @bar()
|
||||
|
@ -25,7 +28,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
|
|||
; Currently, the comparison is emitted again. An MSR/MRS pair would also be
|
||||
; acceptable, but assuming the call preserves NZCV is not.
|
||||
br i1 %test, label %iftrue, label %iffalse
|
||||
; CHECK: cmp [[LHS]], [[RHS]]
|
||||
; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]]
|
||||
; CHECK: b.eq
|
||||
|
||||
iftrue:
|
||||
|
|
|
@ -1671,7 +1671,7 @@ entry:
|
|||
; CHECK-LABEL: bug34674:
|
||||
; CHECK: // %entry
|
||||
; CHECK-NEXT: mov [[ZREG:x[0-9]+]], xzr
|
||||
; CHECK-DAG: stp [[ZREG]], [[ZREG]], [x0]
|
||||
; CHECK-DAG: stp xzr, xzr, [x0]
|
||||
; CHECK-DAG: add x{{[0-9]+}}, [[ZREG]], #1
|
||||
define i64 @bug34674(<2 x i64>* %p) {
|
||||
entry:
|
||||
|
|
|
@ -11,7 +11,7 @@ entry:
|
|||
; A53: mov [[DATA:w[0-9]+]], w1
|
||||
; A53: str q{{[0-9]+}}, {{.*}}
|
||||
; A53: str q{{[0-9]+}}, {{.*}}
|
||||
; A53: str [[DATA]], {{.*}}
|
||||
; A53: str w1, {{.*}}
|
||||
|
||||
%0 = bitcast %struct1* %fde to i8*
|
||||
tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false)
|
||||
|
|
|
@ -7,8 +7,8 @@ declare void @foo(i32)
|
|||
define void @test(i32 %px) {
|
||||
; CHECK_LABEL: test:
|
||||
; CHECK_LABEL: %entry
|
||||
; CHECK: subs
|
||||
; CHECK-NEXT: csel
|
||||
; CHECK: subs [[REG0:w[0-9]+]],
|
||||
; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]]
|
||||
entry:
|
||||
%sub = add nsw i32 %px, -1
|
||||
%cmp = icmp slt i32 %px, 1
|
||||
|
|
|
@ -547,16 +547,16 @@ define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz() #1 {
|
|||
; GCN: s_mov_b32 s5, s32
|
||||
; GCN: s_add_u32 s32, s32, 0x300
|
||||
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-57-9][0-9]*]], s14
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-68-9][0-9]*]], s15
|
||||
; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-79][0-9]*]], s16
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
|
||||
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
|
||||
|
||||
; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
|
||||
; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
|
||||
; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
|
||||
; GCN-DAG: s_mov_b32 s6, s14
|
||||
; GCN-DAG: s_mov_b32 s7, s15
|
||||
; GCN-DAG: s_mov_b32 s8, s16
|
||||
; GCN: s_swappc_b64
|
||||
|
||||
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s
|
||||
# RUN: llc -march=amdgcn -start-after=greedy -disable-copyprop -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s
|
||||
# Check that we first do all vector instructions and only then change exec
|
||||
# CHECK-DAG: COPY $vgpr10_vgpr11
|
||||
# CHECK-DAG: COPY $vgpr12_vgpr13
|
||||
|
|
|
@ -78,7 +78,7 @@ ENDIF: ; preds = %LOOP
|
|||
|
||||
; Uses a copy intsead of an or
|
||||
; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
|
||||
; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
|
||||
; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]]
|
||||
define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
|
||||
bb:
|
||||
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
; GCN-LABEL: {{^}}vgpr:
|
||||
; GCN: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
|
||||
; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
|
||||
; GCN-DAG: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
|
||||
; GCN: s_waitcnt expcnt(0)
|
||||
; GCN: v_add_f32_e32 v0, 1.0, v0
|
||||
; GCN-NOT: s_endpgm
|
||||
define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
|
||||
bb:
|
||||
|
@ -204,13 +204,13 @@ bb:
|
|||
}
|
||||
|
||||
; GCN-LABEL: {{^}}both:
|
||||
; GCN: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
|
||||
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
|
||||
; GCN-DAG: s_add_i32 s0, s3, 2
|
||||
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
|
||||
; GCN-DAG: v_mov_b32_e32 v1, v0
|
||||
; GCN-DAG: s_mov_b32 s1, s2
|
||||
; GCN: s_mov_b32 s2, s3
|
||||
; GCN: s_waitcnt expcnt(0)
|
||||
; GCN: v_add_f32_e32 v0, 1.0, v0
|
||||
; GCN-DAG: s_add_i32 s0, s3, 2
|
||||
; GCN-DAG: s_mov_b32 s2, s3
|
||||
; GCN-NOT: s_endpgm
|
||||
define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
|
||||
bb:
|
||||
|
|
|
@ -287,7 +287,8 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|||
|
||||
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
|
||||
%oldval = extractvalue { i32, i1 } %pair, 0
|
||||
; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
||||
; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0
|
||||
; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
|
||||
; CHECK-ARMV7: cmp [[OLDVAL]], r1
|
||||
; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
|
||||
; CHECK-ARMV7: dmb ish
|
||||
|
@ -305,7 +306,8 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|||
; CHECK-ARMV7: dmb ish
|
||||
; CHECK-ARMV7: bx lr
|
||||
|
||||
; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
||||
; CHECK-T2: mov r[[ADDR:[0-9]+]], r0
|
||||
; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
|
||||
; CHECK-T2: cmp [[OLDVAL]], r1
|
||||
; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
|
||||
; CHECK-T2: dmb ish
|
||||
|
|
|
@ -39,7 +39,7 @@ define i32 @sadd_overflow(i32 %a, i32 %b) #0 {
|
|||
; ARM: mov pc, lr
|
||||
|
||||
; THUMBV6: mov r[[R2:[0-9]+]], r[[R0:[0-9]+]]
|
||||
; THUMBV6: adds r[[R3:[0-9]+]], r[[R2]], r[[R1:[0-9]+]]
|
||||
; THUMBV6: adds r[[R3:[0-9]+]], r[[R0]], r[[R1:[0-9]+]]
|
||||
; THUMBV6: movs r[[R0]], #0
|
||||
; THUMBV6: movs r[[R1]], #1
|
||||
; THUMBV6: cmp r[[R3]], r[[R2]]
|
||||
|
|
|
@ -197,9 +197,9 @@ entry:
|
|||
|
||||
; ARMT2-LABEL: t8:
|
||||
; ARMT2: mov r1, r0
|
||||
; ARMT2: cmp r0, #5
|
||||
; ARMT2: mov r0, #9
|
||||
; ARMT2: mov r4, #0
|
||||
; ARMT2: cmp r1, #5
|
||||
; ARMT2: movweq r4, #1
|
||||
; ARMT2: bl t7
|
||||
|
||||
|
@ -213,8 +213,8 @@ entry:
|
|||
|
||||
; THUMB2-LABEL: t8:
|
||||
; THUMB2: mov r1, r0
|
||||
; THUMB2: movs r4, #0
|
||||
; THUMB2: cmp r1, #5
|
||||
; THUMB2: cmp r0, #5
|
||||
; THUMB2: mov.w r4, #0
|
||||
; THUMB2: it eq
|
||||
; THUMB2: moveq r4, #1
|
||||
%cmp = icmp eq i32 %a, 5
|
||||
|
|
|
@ -182,7 +182,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
|
|||
; CHECK-APPLE: beq
|
||||
; CHECK-APPLE: mov r0, #16
|
||||
; CHECK-APPLE: malloc
|
||||
; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
|
||||
; CHECK-APPLE: strb r{{.*}}, [r0, #8]
|
||||
; CHECK-APPLE: ble
|
||||
; CHECK-APPLE: mov r8, [[ID]]
|
||||
|
||||
|
|
|
@ -800,7 +800,7 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR3-NEXT: sw $5, 36($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: lw $16, 76($sp)
|
||||
; MMR3-NEXT: srlv $4, $8, $16
|
||||
; MMR3-NEXT: srlv $4, $7, $16
|
||||
; MMR3-NEXT: not16 $3, $16
|
||||
; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: sll16 $2, $6, 1
|
||||
|
@ -890,7 +890,7 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR6-NEXT: lw $3, 68($sp)
|
||||
; MMR6-NEXT: li16 $2, 64
|
||||
; MMR6-NEXT: subu16 $7, $2, $3
|
||||
; MMR6-NEXT: sllv $8, $6, $7
|
||||
; MMR6-NEXT: sllv $8, $5, $7
|
||||
; MMR6-NEXT: andi16 $5, $7, 32
|
||||
; MMR6-NEXT: selnez $9, $8, $5
|
||||
; MMR6-NEXT: sllv $16, $4, $7
|
||||
|
|
|
@ -828,7 +828,7 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR3-NEXT: move $17, $5
|
||||
; MMR3-NEXT: sw $4, 8($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: lw $16, 76($sp)
|
||||
; MMR3-NEXT: srlv $7, $8, $16
|
||||
; MMR3-NEXT: srlv $7, $7, $16
|
||||
; MMR3-NEXT: not16 $3, $16
|
||||
; MMR3-NEXT: sw $3, 24($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: sll16 $2, $6, 1
|
||||
|
@ -915,14 +915,14 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR6-NEXT: move $1, $7
|
||||
; MMR6-NEXT: sw $5, 8($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: move $16, $4
|
||||
; MMR6-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: sw $4, 32($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: lw $3, 76($sp)
|
||||
; MMR6-NEXT: srlv $2, $1, $3
|
||||
; MMR6-NEXT: srlv $2, $7, $3
|
||||
; MMR6-NEXT: not16 $5, $3
|
||||
; MMR6-NEXT: sw $5, 24($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: move $4, $6
|
||||
; MMR6-NEXT: sw $4, 28($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: sll16 $6, $4, 1
|
||||
; MMR6-NEXT: sw $6, 28($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: sll16 $6, $6, 1
|
||||
; MMR6-NEXT: sllv $17, $6, $5
|
||||
; MMR6-NEXT: or16 $17, $2
|
||||
; MMR6-NEXT: addiu $7, $3, -64
|
||||
|
@ -956,7 +956,7 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR6-NEXT: sw $7, 4($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: not16 $6, $6
|
||||
; MMR6-NEXT: move $7, $17
|
||||
; MMR6-NEXT: srl16 $17, $7, 1
|
||||
; MMR6-NEXT: srl16 $17, $17, 1
|
||||
; MMR6-NEXT: srlv $6, $17, $6
|
||||
; MMR6-NEXT: lw $17, 4($sp) # 4-byte Folded Reload
|
||||
; MMR6-NEXT: or16 $6, $17
|
||||
|
|
|
@ -857,7 +857,7 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR3-NEXT: sw $5, 32($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: move $1, $4
|
||||
; MMR3-NEXT: lw $16, 76($sp)
|
||||
; MMR3-NEXT: sllv $2, $1, $16
|
||||
; MMR3-NEXT: sllv $2, $4, $16
|
||||
; MMR3-NEXT: not16 $4, $16
|
||||
; MMR3-NEXT: sw $4, 24($sp) # 4-byte Folded Spill
|
||||
; MMR3-NEXT: srl16 $3, $5, 1
|
||||
|
@ -946,7 +946,7 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
|
|||
; MMR6-NEXT: sw $6, 4($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: move $1, $4
|
||||
; MMR6-NEXT: lw $3, 60($sp)
|
||||
; MMR6-NEXT: sllv $2, $1, $3
|
||||
; MMR6-NEXT: sllv $2, $4, $3
|
||||
; MMR6-NEXT: not16 $4, $3
|
||||
; MMR6-NEXT: sw $4, 16($sp) # 4-byte Folded Spill
|
||||
; MMR6-NEXT: sw $5, 20($sp) # 4-byte Folded Spill
|
||||
|
|
|
@ -163,7 +163,7 @@ entry:
|
|||
; MMR3: subu16 $5, $[[T19]], $[[T20]]
|
||||
|
||||
; MMR6: move $[[T0:[0-9]+]], $7
|
||||
; MMR6: sw $[[T0]], 8($sp)
|
||||
; MMR6: sw $7, 8($sp)
|
||||
; MMR6: move $[[T1:[0-9]+]], $5
|
||||
; MMR6: sw $4, 12($sp)
|
||||
; MMR6: lw $[[T2:[0-9]+]], 48($sp)
|
||||
|
|
|
@ -20,9 +20,9 @@ define noalias i8* @_ZN2CC3funEv(%class.CC* %this) {
|
|||
; CHECK-NEXT: .cfi_def_cfa_offset 48
|
||||
; CHECK-NEXT: .cfi_offset lr, 16
|
||||
; CHECK-NEXT: .cfi_offset r30, -16
|
||||
; CHECK-NEXT: ld 12, 0(3)
|
||||
; CHECK-NEXT: std 30, 32(1)
|
||||
; CHECK-NEXT: mr 30, 3
|
||||
; CHECK-NEXT: ld 12, 0(30)
|
||||
; CHECK-NEXT: std 2, 24(1)
|
||||
; CHECK-NEXT: mtctr 12
|
||||
; CHECK-NEXT: bctrl
|
||||
|
|
|
@ -14,7 +14,8 @@ define double @foo3(double %a) nounwind {
|
|||
ret double %r
|
||||
|
||||
; CHECK: @foo3
|
||||
; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
|
||||
; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]]
|
||||
; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
|
||||
; CHECK: xsmaddmdp
|
||||
; CHECK: xsmaddadp
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@ if.end: ; preds = %if.then, %entry
|
|||
ret i32 %e.0
|
||||
; CHECK: @foo
|
||||
; CHECK: mr [[NEWREG:[0-9]+]], 3
|
||||
; CHECK: mr [[REG1:[0-9]+]], 4
|
||||
; CHECK: mtvsrd [[NEWREG2:[0-9]+]], 4
|
||||
; CHECK: mffprd [[REG1:[0-9]+]], [[NEWREG2]]
|
||||
; CHECK: add {{[0-9]+}}, [[NEWREG]], [[REG1]]
|
||||
; CHECK: mffprd [[REG2:[0-9]+]], [[NEWREG2]]
|
||||
; CHECK: add {{[0-9]+}}, [[REG2]], [[NEWREG]]
|
||||
|
|
|
@ -20,8 +20,8 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture r
|
|||
define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) {
|
||||
; CHECK-LABEL: ZN6snappyDecompressor_:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha
|
||||
; CHECK-DAG: addi 25, 3, _ZN6snappy8internalL8wordmaskE@toc@l
|
||||
; CHECK: addis 23, 2, _ZN6snappy8internalL8wordmaskE@toc@ha
|
||||
; CHECK-DAG: addi 25, 23, _ZN6snappy8internalL8wordmaskE@toc@l
|
||||
; CHECK-DAG: addis 5, 2, _ZN6snappy8internalL10char_tableE@toc@ha
|
||||
; CHECK-DAG: addi 24, 5, _ZN6snappy8internalL10char_tableE@toc@l
|
||||
; CHECK: b .LBB0_2
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
define i64 @testOptimizeLiAddToAddi(i64 %a) {
|
||||
; CHECK-LABEL: testOptimizeLiAddToAddi:
|
||||
; CHECK: addi 3, 30, 2444
|
||||
; CHECK: addi 3, 3, 2444
|
||||
; CHECK: bl callv
|
||||
; CHECK: addi 3, 30, 234
|
||||
; CHECK: bl call
|
||||
|
|
|
@ -25,7 +25,7 @@ target triple = "powerpc64le-grtev4-linux-gnu"
|
|||
;CHECK-LABEL: straight_test:
|
||||
; test1 may have been merged with entry
|
||||
;CHECK: mr [[TAGREG:[0-9]+]], 3
|
||||
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
|
||||
;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1
|
||||
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
|
||||
;CHECK-NEXT: # %test2
|
||||
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
|
||||
|
|
|
@ -156,9 +156,9 @@ define double @floatarg(double %a0, ; %i0,%i1
|
|||
; HARD-NEXT: std %o0, [%sp+96]
|
||||
; HARD-NEXT: st %o1, [%sp+92]
|
||||
; HARD-NEXT: mov %i0, %o2
|
||||
; HARD-NEXT: mov %o0, %o3
|
||||
; HARD-NEXT: mov %i1, %o3
|
||||
; HARD-NEXT: mov %o1, %o4
|
||||
; HARD-NEXT: mov %o0, %o5
|
||||
; HARD-NEXT: mov %i1, %o5
|
||||
; HARD-NEXT: call floatarg
|
||||
; HARD: std %f0, [%i4]
|
||||
; SOFT: st %i0, [%sp+104]
|
||||
|
|
|
@ -235,8 +235,9 @@ entry:
|
|||
|
||||
; CHECK-LABEL: test_load_add_i32
|
||||
; CHECK: membar
|
||||
; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
|
||||
; CHECK: cas [%o0], [[V]], [[U]]
|
||||
; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
|
||||
; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
|
||||
; CHECK: cas [%o0], [[V]], [[V2]]
|
||||
; CHECK: membar
|
||||
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
|
||||
entry:
|
||||
|
|
|
@ -46,12 +46,12 @@ define <4 x float> @f5(<4 x float> %val1, <4 x float> %val2) {
|
|||
; CHECK-LABEL: f5:
|
||||
; CHECK-DAG: vlr %v[[A1:[0-5]]], %v24
|
||||
; CHECK-DAG: vlr %v[[A2:[0-5]]], %v26
|
||||
; CHECK-DAG: vrepf %v[[B1:[0-5]]], %v[[A1]], 1
|
||||
; CHECK-DAG: vrepf %v[[B2:[0-5]]], %v[[A2]], 1
|
||||
; CHECK-DAG: vrepf %v[[C1:[0-5]]], %v[[A1]], 2
|
||||
; CHECK-DAG: vrepf %v[[C2:[0-5]]], %v[[A2]], 2
|
||||
; CHECK-DAG: vrepf %v[[D1:[0-5]]], %v[[A1]], 3
|
||||
; CHECK-DAG: vrepf %v[[D2:[0-5]]], %v[[A2]], 3
|
||||
; CHECK-DAG: vrepf %v[[B1:[0-5]]], %v24, 1
|
||||
; CHECK-DAG: vrepf %v[[B2:[0-5]]], %v26, 1
|
||||
; CHECK-DAG: vrepf %v[[C1:[0-5]]], %v24, 2
|
||||
; CHECK-DAG: vrepf %v[[C2:[0-5]]], %v26, 2
|
||||
; CHECK-DAG: vrepf %v[[D1:[0-5]]], %v24, 3
|
||||
; CHECK-DAG: vrepf %v[[D2:[0-5]]], %v26, 3
|
||||
; CHECK-DAG: sebr %f[[A1]], %f[[A2]]
|
||||
; CHECK-DAG: sebr %f[[B1]], %f[[B2]]
|
||||
; CHECK-DAG: sebr %f[[C1]], %f[[C2]]
|
||||
|
|
|
@ -37,13 +37,13 @@ while.body:
|
|||
; CHECK: adds r3, r0, r1
|
||||
; CHECK: push {r5}
|
||||
; CHECK: pop {r1}
|
||||
; CHECK: adcs r1, r1
|
||||
; CHECK: adcs r1, r5
|
||||
; CHECK: ldr r0, [sp, #12] @ 4-byte Reload
|
||||
; CHECK: ldr r2, [sp, #8] @ 4-byte Reload
|
||||
; CHECK: adds r2, r0, r2
|
||||
; CHECK: push {r5}
|
||||
; CHECK: pop {r4}
|
||||
; CHECK: adcs r4, r4
|
||||
; CHECK: adcs r4, r5
|
||||
; CHECK: adds r0, r2, r5
|
||||
; CHECK: push {r3}
|
||||
; CHECK: pop {r0}
|
||||
|
|
|
@ -598,7 +598,7 @@ declare void @abort() #0
|
|||
define i32 @b_to_bx(i32 %value) {
|
||||
; CHECK-LABEL: b_to_bx:
|
||||
; DISABLE: push {r7, lr}
|
||||
; CHECK: cmp r1, #49
|
||||
; CHECK: cmp r0, #49
|
||||
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
|
||||
; ENABLE: push {r7, lr}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ define i32 @f(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; CHECK-NEXT: movl %ecx, %edx
|
||||
; CHECK-NEXT: imull %edx, %edx
|
||||
; CHECK-NEXT: imull %ecx, %edx
|
||||
; CHECK-NEXT: imull %eax, %ecx
|
||||
; CHECK-NEXT: imull %eax, %eax
|
||||
; CHECK-NEXT: addl %edx, %eax
|
||||
|
|
|
@ -106,7 +106,7 @@ entry:
|
|||
; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
|
||||
; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
|
||||
; CHECK-DAG: movl %[[r2]], 4(%esp)
|
||||
; CHECK-DAG: movl %[[r1]], (%esp)
|
||||
; CHECK-DAG: movl %edx, (%esp)
|
||||
; CHECK: movl %esp, %[[reg:[^ ]*]]
|
||||
; CHECK: pushl %[[reg]]
|
||||
; CHECK: calll _addrof_i64
|
||||
|
|
|
@ -12,11 +12,11 @@ define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>*
|
|||
; CHECK-NEXT: movq %rdx, %r14
|
||||
; CHECK-NEXT: movq %rsi, %r15
|
||||
; CHECK-NEXT: movq %rdi, %rbx
|
||||
; CHECK-NEXT: vmovaps (%rbx), %ymm0
|
||||
; CHECK-NEXT: vmovaps (%rdi), %ymm0
|
||||
; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: vmovaps (%r15), %ymm1
|
||||
; CHECK-NEXT: vmovaps (%rsi), %ymm1
|
||||
; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: vmovaps (%r14), %ymm2
|
||||
; CHECK-NEXT: vmovaps (%rdx), %ymm2
|
||||
; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
|
||||
; CHECK-NEXT: callq dummy
|
||||
; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
|
||||
|
|
|
@ -9,10 +9,10 @@ define void @bar__512(<16 x i32>* %var) #0 {
|
|||
; CHECK-NEXT: pushq %rbx
|
||||
; CHECK-NEXT: subq $112, %rsp
|
||||
; CHECK-NEXT: movq %rdi, %rbx
|
||||
; CHECK-NEXT: vmovups (%rbx), %zmm0
|
||||
; CHECK-NEXT: vmovups (%rdi), %zmm0
|
||||
; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
|
||||
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
|
||||
; CHECK-NEXT: vmovaps %zmm1, (%rbx)
|
||||
; CHECK-NEXT: vmovaps %zmm1, (%rdi)
|
||||
; CHECK-NEXT: callq _Print__512
|
||||
; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
|
||||
; CHECK-NEXT: callq _Print__512
|
||||
|
|
|
@ -355,7 +355,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
|
|||
; KNL_X32-NEXT: movl %edi, (%esp)
|
||||
; KNL_X32-NEXT: calll _test11
|
||||
; KNL_X32-NEXT: movl %eax, %ebx
|
||||
; KNL_X32-NEXT: movzbl %bl, %eax
|
||||
; KNL_X32-NEXT: movzbl %al, %eax
|
||||
; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
|
||||
; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
|
||||
; KNL_X32-NEXT: movl %edi, (%esp)
|
||||
|
|
|
@ -650,7 +650,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
|
|||
; X32-NEXT: subl $24, %esp
|
||||
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
|
||||
; X32-NEXT: vmovdqa %xmm0, %xmm4
|
||||
; X32-NEXT: vmovdqa %xmm4, %xmm1
|
||||
; X32-NEXT: vmovdqa %xmm0, %xmm1
|
||||
; X32-NEXT: calll _test_argRet128Vector
|
||||
; X32-NEXT: vmovdqa32 %xmm4, %xmm0 {%k1}
|
||||
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
|
||||
|
@ -668,7 +668,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
|
|||
; WIN64-NEXT: .seh_savexmm 8, 0
|
||||
; WIN64-NEXT: .seh_endprologue
|
||||
; WIN64-NEXT: vmovdqa %xmm0, %xmm8
|
||||
; WIN64-NEXT: vmovdqa %xmm8, %xmm1
|
||||
; WIN64-NEXT: vmovdqa %xmm0, %xmm1
|
||||
; WIN64-NEXT: callq test_argRet128Vector
|
||||
; WIN64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1}
|
||||
; WIN64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
|
||||
|
@ -689,7 +689,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
|
|||
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
|
||||
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
|
||||
; LINUXOSX64-NEXT: vmovdqa %xmm0, %xmm8
|
||||
; LINUXOSX64-NEXT: vmovdqa %xmm8, %xmm1
|
||||
; LINUXOSX64-NEXT: vmovdqa %xmm0, %xmm1
|
||||
; LINUXOSX64-NEXT: callq test_argRet128Vector
|
||||
; LINUXOSX64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1}
|
||||
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
|
||||
|
@ -908,12 +908,12 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
|
|||
; X32-NEXT: subl $20, %esp
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, %esi
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: subl %ecx, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: movl %edi, %ebp
|
||||
|
|
|
@ -202,7 +202,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
|
|||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: shrl $12, %ecx
|
||||
; X32-NEXT: kmovd %ecx, %k3
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: shrl $13, %ecx
|
||||
; X32-NEXT: andb $1, %cl
|
||||
; X32-NEXT: kmovd %ecx, %k4
|
||||
|
@ -340,7 +340,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
|
|||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $28, %k0, %k1
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $28, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -348,7 +348,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
|
|||
; X32-NEXT: kshiftrq $35, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $29, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $29, %eax
|
||||
; X32-NEXT: andb $1, %al
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
|
@ -357,7 +357,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext
|
|||
; X32-NEXT: kshiftrq $34, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $30, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $30, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -751,7 +751,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
|
|||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: shrl $12, %ecx
|
||||
; X32-NEXT: kmovd %ecx, %k3
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: shrl $13, %ecx
|
||||
; X32-NEXT: andb $1, %cl
|
||||
; X32-NEXT: kmovd %ecx, %k4
|
||||
|
@ -889,7 +889,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
|
|||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $28, %k0, %k1
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $28, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -897,7 +897,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
|
|||
; X32-NEXT: kshiftrq $35, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $29, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $29, %eax
|
||||
; X32-NEXT: andb $1, %al
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
|
@ -906,7 +906,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) {
|
|||
; X32-NEXT: kshiftrq $34, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $30, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $30, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -1781,7 +1781,7 @@ define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %
|
|||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: shrl $12, %ecx
|
||||
; X32-NEXT: kmovd %ecx, %k3
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: shrl $13, %ecx
|
||||
; X32-NEXT: andb $1, %cl
|
||||
; X32-NEXT: kmovd %ecx, %k4
|
||||
|
@ -1919,7 +1919,7 @@ define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %
|
|||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $28, %k0, %k1
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $28, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -1927,7 +1927,7 @@ define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %
|
|||
; X32-NEXT: kshiftrq $35, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $29, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $29, %eax
|
||||
; X32-NEXT: andb $1, %al
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
|
@ -1936,7 +1936,7 @@ define i64 @test_mm512_mask_test_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64> %
|
|||
; X32-NEXT: kshiftrq $34, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $30, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $30, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -2423,7 +2423,7 @@ define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64>
|
|||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: shrl $12, %ecx
|
||||
; X32-NEXT: kmovd %ecx, %k3
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: shrl $13, %ecx
|
||||
; X32-NEXT: andb $1, %cl
|
||||
; X32-NEXT: kmovd %ecx, %k4
|
||||
|
@ -2561,7 +2561,7 @@ define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64>
|
|||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $28, %k0, %k1
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $28, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
@ -2569,7 +2569,7 @@ define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64>
|
|||
; X32-NEXT: kshiftrq $35, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $29, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $29, %eax
|
||||
; X32-NEXT: andb $1, %al
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
|
@ -2578,7 +2578,7 @@ define i64 @test_mm512_mask_testn_epi8_mask(i64 %__U, <8 x i64> %__A, <8 x i64>
|
|||
; X32-NEXT: kshiftrq $34, %k1, %k1
|
||||
; X32-NEXT: kxorq %k1, %k0, %k0
|
||||
; X32-NEXT: kshiftrq $30, %k0, %k1
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: shrl $30, %eax
|
||||
; X32-NEXT: kmovd %eax, %k2
|
||||
; X32-NEXT: kxorq %k2, %k1, %k1
|
||||
|
|
|
@ -1876,7 +1876,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
|
|||
; AVX512F-32-NEXT: kshiftrq $5, %k7, %k0
|
||||
; AVX512F-32-NEXT: kxorq %k4, %k0, %k4
|
||||
; AVX512F-32-NEXT: kmovd %ecx, %k0
|
||||
; AVX512F-32-NEXT: movl %ebp, %ecx
|
||||
; AVX512F-32-NEXT: movl %ebx, %ecx
|
||||
; AVX512F-32-NEXT: shrl $13, %ecx
|
||||
; AVX512F-32-NEXT: andb $1, %cl
|
||||
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
|
||||
|
@ -2576,7 +2576,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
|
|||
; AVX512F-32-NEXT: kshiftrq $5, %k7, %k0
|
||||
; AVX512F-32-NEXT: kxorq %k4, %k0, %k4
|
||||
; AVX512F-32-NEXT: kmovd %ecx, %k0
|
||||
; AVX512F-32-NEXT: movl %ebp, %ecx
|
||||
; AVX512F-32-NEXT: movl %ebx, %ecx
|
||||
; AVX512F-32-NEXT: shrl $13, %ecx
|
||||
; AVX512F-32-NEXT: andb $1, %cl
|
||||
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
|
||||
|
|
|
@ -38,7 +38,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
|
|||
; SSE2-LABEL: test_negative_zero_1:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: xorps %xmm2, %xmm2
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
|
|
|
@ -197,8 +197,8 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
|
|||
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: movaps %xmm2, %xmm6
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm7
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: andps %xmm7, %xmm2
|
||||
|
@ -213,7 +213,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
|
|||
; SSE-NEXT: orps %xmm0, %xmm4
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
|
||||
; SSE-NEXT: andps %xmm7, %xmm0
|
||||
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
|
||||
; SSE-NEXT: andps %xmm8, %xmm3
|
||||
|
@ -260,7 +260,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
|
|||
; SSE-NEXT: orps %xmm6, %xmm1
|
||||
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
|
||||
; SSE-NEXT: andps %xmm5, %xmm1
|
||||
; SSE-NEXT: xorps %xmm6, %xmm6
|
||||
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
|
||||
|
|
|
@ -204,7 +204,7 @@ define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
|
|||
; SSE-LABEL: combine_vec_shl_ext_shl0:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
||||
; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
||||
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE-NEXT: pslld $20, %xmm1
|
||||
; SSE-NEXT: pslld $20, %xmm0
|
||||
|
|
|
@ -14,7 +14,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: addss %xmm2, %xmm2
|
||||
; SSE-NEXT: addss %xmm0, %xmm2
|
||||
; SSE-NEXT: mulss %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm0, %xmm0
|
||||
; SSE-NEXT: mulss %xmm1, %xmm1
|
||||
|
@ -58,9 +58,9 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
|
|||
; SSE-LABEL: complex_square_f64:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: addsd %xmm2, %xmm2
|
||||
; SSE-NEXT: addsd %xmm0, %xmm2
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm2
|
||||
; SSE-NEXT: mulsd %xmm0, %xmm0
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm1
|
||||
|
@ -161,9 +161,9 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
|
|||
; SSE-LABEL: complex_mul_f64:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: mulsd %xmm0, %xmm4
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm0
|
||||
|
|
|
@ -312,7 +312,7 @@ define i64 @PR23590(i64 %x) nounwind {
|
|||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: movq %rdi, %rcx
|
||||
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: shrq $12, %rdx
|
||||
; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039
|
||||
|
|
|
@ -18,7 +18,7 @@ declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
|
|||
|
||||
; CHECK-LABEL: @test_fmaxf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: maxss %xmm0, %xmm1
|
||||
|
@ -47,7 +47,7 @@ define float @test_fmaxf_minsize(float %x, float %y) minsize {
|
|||
|
||||
; CHECK-LABEL: @test_fmax
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: maxsd %xmm0, %xmm1
|
||||
|
@ -74,7 +74,7 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x, x86_fp80 %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmaxf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: maxss %xmm0, %xmm1
|
||||
|
@ -95,7 +95,7 @@ define float @test_intrinsic_fmaxf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmax
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: maxsd %xmm0, %xmm1
|
||||
|
|
|
@ -30,7 +30,7 @@ define float @fast_fmuladd_opts(float %a , float %b , float %c) {
|
|||
; X64-LABEL: fast_fmuladd_opts:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movaps %xmm0, %xmm1
|
||||
; X64-NEXT: addss %xmm1, %xmm1
|
||||
; X64-NEXT: addss %xmm0, %xmm1
|
||||
; X64-NEXT: addss %xmm0, %xmm1
|
||||
; X64-NEXT: movaps %xmm1, %xmm0
|
||||
; X64-NEXT: retq
|
||||
|
|
|
@ -18,7 +18,7 @@ declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
|
|||
|
||||
; CHECK-LABEL: @test_fminf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: minss %xmm0, %xmm1
|
||||
|
@ -40,7 +40,7 @@ define float @test_fminf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_fmin
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: minsd %xmm0, %xmm1
|
||||
|
@ -67,7 +67,7 @@ define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fminf
|
||||
; SSE: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: minss %xmm0, %xmm1
|
||||
|
@ -87,7 +87,7 @@ define float @test_intrinsic_fminf(float %x, float %y) {
|
|||
|
||||
; CHECK-LABEL: @test_intrinsic_fmin
|
||||
; SSE: movapd %xmm0, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm2, %xmm2
|
||||
; SSE-NEXT: cmpunordsd %xmm0, %xmm2
|
||||
; SSE-NEXT: movapd %xmm2, %xmm3
|
||||
; SSE-NEXT: andpd %xmm1, %xmm3
|
||||
; SSE-NEXT: minsd %xmm0, %xmm1
|
||||
|
|
|
@ -227,7 +227,7 @@ define fp128 @TestI128_4(fp128 %x) #0 {
|
|||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: subq $40, %rsp
|
||||
; CHECK-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
|
||||
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq $0, (%rsp)
|
||||
|
@ -275,7 +275,7 @@ define fp128 @acosl(fp128 %x) #0 {
|
|||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: subq $40, %rsp
|
||||
; CHECK-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
|
||||
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq $0, (%rsp)
|
||||
|
|
|
@ -32,8 +32,7 @@ define i64 @foo(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h)
|
|||
; CHECK-NEXT: movzbl %ah, %eax
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %ebx
|
||||
; CHECK-NEXT: movzbl %bh, %edi
|
||||
; CHECK-NEXT: movq %r10, %r8
|
||||
; CHECK-NEXT: addq %r8, %rsi
|
||||
; CHECK-NEXT: addq %r10, %rsi
|
||||
; CHECK-NEXT: addq %r11, %rdx
|
||||
; CHECK-NEXT: addq %rsi, %rdx
|
||||
; CHECK-NEXT: addq %rbp, %rcx
|
||||
|
@ -68,8 +67,7 @@ define i64 @foo(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h)
|
|||
; GNUX32-NEXT: movzbl %ah, %eax
|
||||
; GNUX32-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
||||
; GNUX32-NEXT: movzbl %bh, %edi
|
||||
; GNUX32-NEXT: movq %r10, %r8
|
||||
; GNUX32-NEXT: addq %r8, %rsi
|
||||
; GNUX32-NEXT: addq %r10, %rsi
|
||||
; GNUX32-NEXT: addq %r11, %rdx
|
||||
; GNUX32-NEXT: addq %rsi, %rdx
|
||||
; GNUX32-NEXT: addq %rbp, %rcx
|
||||
|
|
|
@ -896,16 +896,16 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
|
|||
; SSE-LABEL: not_a_hsub_2:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: subss %xmm3, %xmm2
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: subss %xmm3, %xmm0
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm4
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
|
||||
; SSE-NEXT: subss %xmm4, %xmm3
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: subss %xmm4, %xmm1
|
||||
|
@ -953,10 +953,10 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
|
|||
; SSE-LABEL: not_a_hsub_3:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
|
||||
; SSE-NEXT: subsd %xmm2, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: subsd %xmm0, %xmm2
|
||||
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movapd %xmm2, %xmm0
|
||||
|
|
|
@ -7,10 +7,10 @@ define float @pr26491(<4 x float> %a0) {
|
|||
; SSE2-LABEL: pr26491:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[3,3]
|
||||
; SSE2-NEXT: addps %xmm0, %xmm1
|
||||
; SSE2-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
|
||||
; SSE2-NEXT: addss %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
|
@ -19,7 +19,7 @@ define float @pr26491(<4 x float> %a0) {
|
|||
; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSSE3-NEXT: addps %xmm0, %xmm1
|
||||
; SSSE3-NEXT: movaps %xmm1, %xmm0
|
||||
; SSSE3-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; SSSE3-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
|
||||
; SSSE3-NEXT: addss %xmm1, %xmm0
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -103,7 +103,7 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE-LABEL: test5_undef:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: addsd %xmm0, %xmm1
|
||||
; SSE-NEXT: movapd %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
|
@ -168,7 +168,7 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: addss %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
|
||||
; SSE-NEXT: addss %xmm2, %xmm0
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
||||
|
|
|
@ -386,7 +386,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
|
|||
; CHECK-LIBCALL-NEXT: pushq %rbx
|
||||
; CHECK-LIBCALL-NEXT: subq $48, %rsp
|
||||
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
|
||||
; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
|
||||
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
|
||||
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
|
||||
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
|
||||
|
@ -472,7 +472,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
|
|||
; CHECK-LIBCALL-NEXT: pushq %rbx
|
||||
; CHECK-LIBCALL-NEXT: subq $16, %rsp
|
||||
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
|
||||
; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
|
||||
; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi
|
||||
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
|
||||
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
|
||||
; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
|
||||
|
@ -657,7 +657,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
|
|||
; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
|
||||
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
||||
; CHECK-I686-NEXT: movaps %xmm0, %xmm1
|
||||
; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; CHECK-I686-NEXT: movss %xmm1, (%esp)
|
||||
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
|
||||
; CHECK-I686-NEXT: movw %ax, %si
|
||||
|
|
|
@ -40,7 +40,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X86-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
|
||||
; X86-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||||
; X86-SSE42-NEXT: movd %xmm2, %eax
|
||||
|
@ -80,7 +80,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X64-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm0
|
||||
; X64-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||||
; X64-SSE42-NEXT: movq %xmm2, %rax
|
||||
|
@ -1061,7 +1061,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: subl $28, %esp
|
||||
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm5, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
|
@ -1079,7 +1079,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: por %xmm6, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, %xmm6
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm1, %xmm7
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm7
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm7
|
||||
; X86-SSE2-NEXT: movdqa %xmm7, %xmm0
|
||||
; X86-SSE2-NEXT: pcmpgtd %xmm6, %xmm0
|
||||
|
@ -1134,7 +1134,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE42-LABEL: test_reduce_v8i64:
|
||||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm4
|
||||
; X86-SSE42-NEXT: movdqa %xmm4, %xmm5
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm5
|
||||
; X86-SSE42-NEXT: pcmpgtq %xmm2, %xmm5
|
||||
; X86-SSE42-NEXT: movdqa %xmm1, %xmm0
|
||||
; X86-SSE42-NEXT: pcmpgtq %xmm3, %xmm0
|
||||
|
@ -1260,7 +1260,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X64-SSE42-LABEL: test_reduce_v8i64:
|
||||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm4
|
||||
; X64-SSE42-NEXT: movdqa %xmm4, %xmm5
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm5
|
||||
; X64-SSE42-NEXT: pcmpgtq %xmm2, %xmm5
|
||||
; X64-SSE42-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-SSE42-NEXT: pcmpgtq %xmm3, %xmm0
|
||||
|
|
|
@ -40,7 +40,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X86-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X86-SSE42-NEXT: movdqa %xmm2, %xmm0
|
||||
; X86-SSE42-NEXT: pcmpgtq %xmm1, %xmm0
|
||||
; X86-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||||
|
@ -81,7 +81,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X64-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X64-SSE42-NEXT: movdqa %xmm2, %xmm0
|
||||
; X64-SSE42-NEXT: pcmpgtq %xmm1, %xmm0
|
||||
; X64-SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
||||
|
@ -1065,10 +1065,10 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: subl $28, %esp
|
||||
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm6, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm5
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm6, %xmm7
|
||||
|
|
|
@ -40,7 +40,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X86-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
|
||||
; X86-SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
; X86-SSE42-NEXT: pxor %xmm2, %xmm3
|
||||
|
@ -86,7 +86,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X64-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
|
||||
; X64-SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
; X64-SSE42-NEXT: pxor %xmm2, %xmm3
|
||||
|
@ -1206,7 +1206,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: subl $28, %esp
|
||||
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm5, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm3
|
||||
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
|
@ -1224,7 +1224,7 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: por %xmm6, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm3, %xmm6
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm1, %xmm7
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm7
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm7
|
||||
; X86-SSE2-NEXT: movdqa %xmm7, %xmm0
|
||||
; X86-SSE2-NEXT: pcmpgtd %xmm6, %xmm0
|
||||
|
|
|
@ -40,7 +40,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X86-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X86-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648]
|
||||
; X86-SSE42-NEXT: movdqa %xmm1, %xmm3
|
||||
; X86-SSE42-NEXT: pxor %xmm0, %xmm3
|
||||
|
@ -87,7 +87,7 @@ define i64 @test_reduce_v2i64(<2 x i64> %a0) {
|
|||
; X64-SSE42-LABEL: test_reduce_v2i64:
|
||||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
; X64-SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
|
||||
; X64-SSE42-NEXT: movdqa %xmm1, %xmm3
|
||||
; X64-SSE42-NEXT: pxor %xmm0, %xmm3
|
||||
|
@ -466,7 +466,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
|
|||
; X86-SSE42: ## %bb.0:
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
|
||||
; X86-SSE42-NEXT: movdqa %xmm2, %xmm4
|
||||
; X86-SSE42-NEXT: movdqa %xmm0, %xmm4
|
||||
; X86-SSE42-NEXT: pxor %xmm3, %xmm4
|
||||
; X86-SSE42-NEXT: movdqa %xmm1, %xmm0
|
||||
; X86-SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
|
@ -565,7 +565,7 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
|
|||
; X64-SSE42: ## %bb.0:
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
|
||||
; X64-SSE42-NEXT: movdqa %xmm2, %xmm4
|
||||
; X64-SSE42-NEXT: movdqa %xmm0, %xmm4
|
||||
; X64-SSE42-NEXT: pxor %xmm3, %xmm4
|
||||
; X64-SSE42-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
|
@ -1106,10 +1106,10 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
|
|||
; X86-SSE2-NEXT: subl $28, %esp
|
||||
; X86-SSE2-NEXT: .cfi_def_cfa_offset 32
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm6, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, (%esp) ## 16-byte Spill
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
|
||||
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm5
|
||||
; X86-SSE2-NEXT: pxor %xmm4, %xmm6
|
||||
; X86-SSE2-NEXT: movdqa %xmm6, %xmm7
|
||||
|
|
|
@ -161,6 +161,7 @@ define void @testPR4459(x86_fp80 %a) {
|
|||
; CHECK-NEXT: fstpt (%esp)
|
||||
; CHECK-NEXT: calll _ceil
|
||||
; CHECK-NEXT: fld %st(0)
|
||||
; CHECK-NEXT: fxch %st(1)
|
||||
; CHECK-NEXT: ## InlineAsm Start
|
||||
; CHECK-NEXT: fistpl %st(0)
|
||||
; CHECK-NEXT: ## InlineAsm End
|
||||
|
|
|
@ -24,7 +24,7 @@ define void @bar(i32 %X) {
|
|||
call void @foo()
|
||||
; CHECK-LABEL: bar:
|
||||
; CHECK: callq foo
|
||||
; CHECK-NEXT: movl %eax, %r15d
|
||||
; CHECK-NEXT: movl %edi, %r15d
|
||||
call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
|
||||
ret void
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ define void @print_framealloc_from_fp(i8* %fp) {
|
|||
|
||||
; X64-LABEL: print_framealloc_from_fp:
|
||||
; X64: movq %rcx, %[[parent_fp:[a-z]+]]
|
||||
; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
|
||||
; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx
|
||||
; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
|
||||
; X64: movq %[[str]], %rcx
|
||||
; X64: callq printf
|
||||
|
|
|
@ -8,7 +8,7 @@ define i32 @t1(i32 %a, i32 %b) nounwind {
|
|||
; CHECK: ## %bb.0: ## %entry
|
||||
; CHECK-NEXT: movl %esi, %edx
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: testl %edx, %edx
|
||||
; CHECK-NEXT: testl %esi, %esi
|
||||
; CHECK-NEXT: je LBB0_1
|
||||
; CHECK-NEXT: .p2align 4, 0x90
|
||||
; CHECK-NEXT: LBB0_2: ## %while.body
|
||||
|
@ -59,7 +59,7 @@ define i32 @t3(i64 %a, i64 %b) nounwind {
|
|||
; CHECK: ## %bb.0: ## %entry
|
||||
; CHECK-NEXT: movq %rsi, %rdx
|
||||
; CHECK-NEXT: movq %rdi, %rax
|
||||
; CHECK-NEXT: testq %rdx, %rdx
|
||||
; CHECK-NEXT: testq %rsi, %rsi
|
||||
; CHECK-NEXT: je LBB2_1
|
||||
; CHECK-NEXT: .p2align 4, 0x90
|
||||
; CHECK-NEXT: LBB2_2: ## %while.body
|
||||
|
|
|
@ -38,7 +38,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %edx, %eax
|
||||
; X32-NEXT: adcl %edi, %eax
|
||||
; X32-NEXT: movl %edi, %ecx
|
||||
; X32-NEXT: movl %ecx, -204(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, -204(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -892(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl 12(%ebp), %eax
|
||||
; X32-NEXT: movl 36(%eax), %eax
|
||||
|
@ -47,7 +47,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, -236(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: movl %edi, -304(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: addl %ecx, %edi
|
||||
; X32-NEXT: movl %edi, -80(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %eax
|
||||
|
@ -58,7 +58,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: movl %ecx, -124(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -124(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %edx
|
||||
; X32-NEXT: movl -400(%ebp), %esi # 4-byte Reload
|
||||
|
@ -72,7 +72,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %eax, -656(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: leal (%ebx,%edi), %eax
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: leal (%ecx,%edi), %edx
|
||||
; X32-NEXT: leal (%ecx,%edx), %edx
|
||||
; X32-NEXT: adcl %eax, %edx
|
||||
; X32-NEXT: movl %edx, -700(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: seto %al
|
||||
|
@ -123,7 +123,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: adcl %edi, %ebx
|
||||
; X32-NEXT: movl %ebx, -424(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, %ebx
|
||||
; X32-NEXT: movl %ebx, -256(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, -256(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, -80(%ebp) # 4-byte Folded Spill
|
||||
; X32-NEXT: movl -204(%ebp), %eax # 4-byte Reload
|
||||
|
@ -148,7 +148,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movzbl %bh, %eax
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: movl %edi, -72(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl 12(%ebp), %eax
|
||||
; X32-NEXT: movl 8(%eax), %eax
|
||||
; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
|
||||
|
@ -220,7 +220,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %eax, -364(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %ebx, -396(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -396(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -324(%ebp), %edx # 4-byte Reload
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: addl %eax, %edi
|
||||
|
@ -252,7 +252,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %esi, -84(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -84(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl 20(%ecx), %eax
|
||||
; X32-NEXT: movl %eax, -252(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: mull %ebx
|
||||
|
@ -303,7 +303,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl -52(%ebp), %eax # 4-byte Reload
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %ebx, -56(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -780(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -132(%ebp), %edx # 4-byte Reload
|
||||
; X32-NEXT: movl %edx, %eax
|
||||
|
@ -393,10 +393,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: movl %ecx, -160(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -160(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -268(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: addl %ecx, %eax
|
||||
; X32-NEXT: movl -264(%ebp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: movl %ebx, %ecx
|
||||
|
@ -425,7 +425,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %eax, -592(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %esi, %edx
|
||||
; X32-NEXT: movl %edx, %eax
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl -116(%ebp), %esi # 4-byte Reload
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
|
@ -533,7 +533,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: movl %ebx, -336(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -336(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl 52(%esi), %eax
|
||||
; X32-NEXT: movl %eax, -144(%ebp) # 4-byte Spill
|
||||
|
@ -559,7 +559,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl -336(%ebp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, %ebx
|
||||
; X32-NEXT: movl %edi, %edx
|
||||
; X32-NEXT: movl %edx, -176(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, -176(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: adcl -360(%ebp), %edi # 4-byte Folded Reload
|
||||
; X32-NEXT: addl %ecx, %ebx
|
||||
; X32-NEXT: movl %ebx, -472(%ebp) # 4-byte Spill
|
||||
|
@ -590,12 +590,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %esi, -384(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -384(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -116(%ebp), %edi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, %ecx
|
||||
; X32-NEXT: movl %eax, %edx
|
||||
; X32-NEXT: movl %edx, -480(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: addl %edx, %ecx
|
||||
; X32-NEXT: movl %eax, -480(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: addl %eax, %ecx
|
||||
; X32-NEXT: movl -84(%ebp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: adcl %esi, %eax
|
||||
|
@ -642,8 +642,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: addl %esi, %ecx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %esi, -496(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %edx, -496(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: adcl %edi, %ecx
|
||||
; X32-NEXT: movl %ecx, -992(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
|
@ -761,7 +761,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: movl %esi, -484(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -484(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -488(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
|
@ -793,8 +793,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: adcl -60(%ebp), %ebx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ebx, -928(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl 8(%ebp), %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl 84(%eax), %eax
|
||||
; X32-NEXT: movl 84(%ecx), %eax
|
||||
; X32-NEXT: movl %eax, -544(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
|
@ -871,7 +870,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: movl %esi, -556(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -556(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -560(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -524(%ebp), %eax # 4-byte Reload
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
|
@ -882,7 +881,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %ebx, -732(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %edi, %esi
|
||||
; X32-NEXT: movl %esi, %edx
|
||||
; X32-NEXT: movl %edx, -728(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %esi, -728(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: addl -136(%ebp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %eax, -712(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -668(%ebp), %ecx # 4-byte Reload
|
||||
|
@ -917,7 +916,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %eax, -564(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %ebx, -568(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, -568(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -500(%ebp), %edx # 4-byte Reload
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: addl %eax, %edi
|
||||
|
@ -983,7 +982,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movzbl -88(%ebp), %eax # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: movl %ecx, %edx
|
||||
; X32-NEXT: addl %edx, %ebx
|
||||
; X32-NEXT: addl %ecx, %ebx
|
||||
; X32-NEXT: adcl %esi, %eax
|
||||
; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload
|
||||
|
@ -1038,7 +1037,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: movl %ebx, %ecx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
|
||||
; X32-NEXT: addl %esi, %ecx
|
||||
; X32-NEXT: adcl $0, %edx
|
||||
|
@ -1052,7 +1051,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movzbl -16(%ebp), %ebx # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %edi, %ebx
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: addl %esi, %edx
|
||||
; X32-NEXT: addl %eax, %edx
|
||||
; X32-NEXT: adcl %ecx, %ebx
|
||||
; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
|
||||
; X32-NEXT: addl -324(%ebp), %eax # 4-byte Folded Reload
|
||||
|
@ -1143,7 +1142,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movzbl %cl, %eax
|
||||
; X32-NEXT: adcl %esi, %eax
|
||||
; X32-NEXT: movl %edi, %esi
|
||||
; X32-NEXT: addl %esi, %edx
|
||||
; X32-NEXT: addl %edi, %edx
|
||||
; X32-NEXT: adcl %ebx, %eax
|
||||
; X32-NEXT: movl %eax, -112(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -136(%ebp), %edi # 4-byte Reload
|
||||
|
@ -1223,7 +1222,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movzbl %bl, %eax
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: movl %ecx, %edx
|
||||
; X32-NEXT: addl %edx, %esi
|
||||
; X32-NEXT: addl %ecx, %esi
|
||||
; X32-NEXT: adcl %edi, %eax
|
||||
; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -100(%ebp), %edi # 4-byte Reload
|
||||
|
@ -1697,7 +1696,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: movl %esi, -48(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, -48(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: addl %ecx, %eax
|
||||
; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
|
||||
|
@ -4479,7 +4478,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: movl %esi, -140(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, -140(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: addl %ecx, %eax
|
||||
; X32-NEXT: movl %eax, -56(%ebp) # 4-byte Spill
|
||||
|
@ -5199,7 +5198,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: addl %edi, %edx
|
||||
; X32-NEXT: movl 124(%ebx), %ebx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: imull %eax, %ebx
|
||||
; X32-NEXT: imull %ecx, %ebx
|
||||
; X32-NEXT: addl %edx, %ebx
|
||||
; X32-NEXT: movl -144(%ebp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: addl %ecx, -96(%ebp) # 4-byte Folded Spill
|
||||
|
@ -6073,8 +6072,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl 108(%eax), %edx
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %ebx, -112(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, -112(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: addl %ecx, %eax
|
||||
; X32-NEXT: movl %eax, -128(%ebp) # 4-byte Spill
|
||||
|
@ -6113,7 +6112,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, -144(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, -280(%ebp) # 4-byte Spill
|
||||
; X32-NEXT: movl -60(%ebp), %ebx # 4-byte Reload
|
||||
|
@ -6754,7 +6753,6 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %rbp
|
||||
; X64-NEXT: addq %rcx, %rbx
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: adcq %rdi, %rbp
|
||||
; X64-NEXT: setb %bl
|
||||
; X64-NEXT: movzbl %bl, %ebx
|
||||
|
@ -6764,12 +6762,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r11, %r12
|
||||
; X64-NEXT: movq %r11, %r8
|
||||
; X64-NEXT: movq %rcx, %r12
|
||||
; X64-NEXT: movq %rcx, %r8
|
||||
; X64-NEXT: addq %rax, %r12
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: movq %rdi, %r9
|
||||
; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: addq %rbp, %r12
|
||||
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -6798,7 +6796,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %rbx
|
||||
; X64-NEXT: movq 16(%rsi), %rax
|
||||
; X64-NEXT: movq %rsi, %r13
|
||||
; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -6811,7 +6809,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rbx, %r11
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: movq %r8, %rbp
|
||||
; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: adcq %rcx, %rax
|
||||
|
@ -6824,7 +6822,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: movq %rdi, %r9
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: movq %rdx, %rax
|
||||
; X64-NEXT: adcq %rcx, %rax
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq 32(%r13), %rax
|
||||
|
@ -6840,9 +6838,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: addq %r9, %rax
|
||||
; X64-NEXT: addq %rdi, %rax
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
|
||||
; X64-NEXT: adcq %r15, %rax
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -6860,7 +6858,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: addq %rsi, %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: adcq $0, %rbp
|
||||
; X64-NEXT: addq %rcx, %r11
|
||||
; X64-NEXT: addq %rbx, %r11
|
||||
; X64-NEXT: adcq %rsi, %rbp
|
||||
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: setb %bl
|
||||
|
@ -6881,11 +6879,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rbx, %r10
|
||||
; X64-NEXT: movq %rcx, %rdx
|
||||
; X64-NEXT: movq %rcx, %r12
|
||||
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %r9, %rdx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r11, %r8
|
||||
; X64-NEXT: adcq %r8, %r15
|
||||
; X64-NEXT: adcq %r11, %r15
|
||||
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq %rax, %r14
|
||||
; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
|
@ -6981,13 +6979,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %r12
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r10, %rbp
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rsi, %rbx
|
||||
|
@ -7014,7 +7011,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r15
|
||||
; X64-NEXT: adcq $0, %r12
|
||||
; X64-NEXT: movq %r10, %rbx
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq %r10, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
|
@ -7031,7 +7028,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %rbx
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: addq %rbp, %r8
|
||||
|
@ -7062,7 +7059,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r11, %rsi
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, %r13
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
|
||||
|
@ -7142,13 +7139,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %rdx, %r10
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r11, %rbp
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rdi, %rbx
|
||||
|
@ -7278,7 +7274,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %r14
|
||||
; X64-NEXT: movq %r8, %rbp
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: movq %rdx, %rbx
|
||||
|
@ -7338,7 +7334,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r9
|
||||
; X64-NEXT: adcq $0, %r10
|
||||
; X64-NEXT: movq %rbp, %rsi
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
|
@ -7395,8 +7391,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq $0, %r15
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: movq %r8, %rdi
|
||||
; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rdx, %r9
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: addq %rbx, %r8
|
||||
|
@ -7479,13 +7475,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rcx, %r14
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r10, %rdi
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %r11, %rbx
|
||||
|
@ -7513,8 +7508,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: adcq $0, %r14
|
||||
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r13, %rbx
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r8
|
||||
|
@ -7527,7 +7521,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rax, %rcx
|
||||
; X64-NEXT: addq %r8, %rcx
|
||||
; X64-NEXT: adcq $0, %rsi
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
|
||||
; X64-NEXT: mulq %r13
|
||||
; X64-NEXT: movq %rdx, %rbx
|
||||
|
@ -7561,13 +7555,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq %r10, %rsi
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
|
||||
; X64-NEXT: movq %r8, %rax
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: mulq %r10
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %rdi
|
||||
; X64-NEXT: addq %rcx, %rdi
|
||||
|
@ -7643,7 +7636,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r10
|
||||
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %rdi
|
||||
; X64-NEXT: addq %rsi, %rdi
|
||||
|
@ -7655,16 +7648,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
|
||||
; X64-NEXT: addq %rbx, %r12
|
||||
; X64-NEXT: addq %rax, %r12
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
|
||||
; X64-NEXT: adcq %r14, %r15
|
||||
; X64-NEXT: adcq %rdx, %r15
|
||||
; X64-NEXT: addq %rdi, %r12
|
||||
; X64-NEXT: adcq %rcx, %r15
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r11, %rsi
|
||||
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
|
||||
|
@ -7728,7 +7721,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
|
||||
; X64-NEXT: addq %r9, %rbp
|
||||
; X64-NEXT: addq %rax, %rbp
|
||||
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
|
||||
; X64-NEXT: adcq %rdx, %rax
|
||||
; X64-NEXT: addq %rsi, %rbp
|
||||
|
@ -7906,7 +7899,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq 88(%rsi), %rax
|
||||
; X64-NEXT: movq %rsi, %r9
|
||||
; X64-NEXT: movq %rax, %rsi
|
||||
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rcx, %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
|
@ -7942,13 +7935,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: adcq %r8, %r10
|
||||
; X64-NEXT: addq %rbx, %rsi
|
||||
; X64-NEXT: adcq %rbp, %r10
|
||||
; X64-NEXT: movq %r9, %rdi
|
||||
; X64-NEXT: movq 64(%rdi), %r13
|
||||
; X64-NEXT: movq 64(%r9), %r13
|
||||
; X64-NEXT: movq %r13, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq 72(%rdi), %r9
|
||||
; X64-NEXT: movq 72(%r9), %r9
|
||||
; X64-NEXT: movq %r9, %rax
|
||||
; X64-NEXT: mulq %r11
|
||||
; X64-NEXT: movq %rdx, %rbp
|
||||
|
@ -7976,8 +7968,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: movq %rdx, %r11
|
||||
; X64-NEXT: movq %rax, %r15
|
||||
; X64-NEXT: movq %r12, %rcx
|
||||
; X64-NEXT: addq %r15, %rcx
|
||||
; X64-NEXT: adcq %r11, %r8
|
||||
; X64-NEXT: addq %rax, %rcx
|
||||
; X64-NEXT: adcq %rdx, %r8
|
||||
; X64-NEXT: addq %rbp, %rcx
|
||||
; X64-NEXT: adcq %rbx, %r8
|
||||
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
|
||||
|
@ -8029,14 +8021,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: setb %r10b
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
|
||||
; X64-NEXT: movq %rsi, %rax
|
||||
; X64-NEXT: movq %r8, %rdi
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rdx, %rcx
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
|
||||
; X64-NEXT: movq %rbp, %rax
|
||||
; X64-NEXT: mulq %rdi
|
||||
; X64-NEXT: movq %rdi, %r12
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %r8, %r12
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %rbx
|
||||
; X64-NEXT: addq %rcx, %rbx
|
||||
|
@ -8075,7 +8066,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|||
; X64-NEXT: imulq %rcx, %rdi
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %r12, %rsi
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: mulq %r12
|
||||
; X64-NEXT: movq %rax, %r9
|
||||
; X64-NEXT: addq %rdi, %rdx
|
||||
; X64-NEXT: movq 104(%rbp), %r8
|
||||
|
|
|
@ -44,7 +44,7 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
|
|||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %ecx, %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: addl %ebx, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
|
@ -62,9 +62,9 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
|
|||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, %ebp
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: mull %edx
|
||||
|
@ -127,7 +127,7 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
|
|||
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: movl 8(%ecx), %ebx
|
||||
; X32-NEXT: movl 8(%eax), %ebx
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl %esi, %edi
|
||||
; X32-NEXT: mull %ebx
|
||||
|
@ -156,7 +156,7 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
|
|||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %eax, %esi
|
||||
; X32-NEXT: movl %ebx, %edi
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
|
|
|
@ -31,7 +31,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %ecx, %ebx
|
||||
; X32-NEXT: movl %ecx, %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: setb %cl
|
||||
; X32-NEXT: addl %eax, %ebx
|
||||
; X32-NEXT: movzbl %cl, %ecx
|
||||
|
@ -55,7 +55,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 4(%ecx), %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, %esi
|
||||
|
@ -92,14 +92,13 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: adcl %edi, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl (%eax), %eax
|
||||
; X32-NEXT: movl (%ecx), %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: xorl %ebp, %ebp
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: movl %ecx, %edx
|
||||
; X32-NEXT: movl %eax, %edx
|
||||
; X32-NEXT: addl %esi, %edx
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
|
@ -113,7 +112,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, %edi
|
||||
; X32-NEXT: movl %ecx, %ebp
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: addl %eax, %edi
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
|
@ -143,7 +142,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %ebx, %ecx
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: setb %bl
|
||||
; X32-NEXT: addl %eax, %ecx
|
||||
; X32-NEXT: movzbl %bl, %ebx
|
||||
|
@ -278,7 +277,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: adcl %ebx, %ecx
|
||||
; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl %edi, %ebp
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
|
@ -433,7 +432,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: adcl %edi, %ecx
|
||||
; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl %ebx, %edi
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: addl %ecx, %ebp
|
||||
|
@ -899,7 +898,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: addl %ebx, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
|
@ -929,7 +928,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: movl %ebx, %esi
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
|
||||
|
@ -1077,7 +1076,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: addl %esi, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: imull %eax, %esi
|
||||
; X32-NEXT: imull %edi, %esi
|
||||
; X32-NEXT: addl %edx, %esi
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
|
@ -1177,7 +1176,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: movl %esi, %ecx
|
||||
; X32-NEXT: movl 40(%ecx), %ebx
|
||||
; X32-NEXT: movl 40(%esi), %ebx
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
|
@ -1374,7 +1373,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X32-NEXT: addl %edi, %edx
|
||||
; X32-NEXT: movl 60(%ebx), %ebx
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: imull %eax, %ebx
|
||||
; X32-NEXT: imull %ecx, %ebx
|
||||
; X32-NEXT: addl %edx, %ebx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: addl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
|
@ -1546,7 +1545,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq 8(%rsi), %rbp
|
||||
; X64-NEXT: movq %r15, %rax
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: mulq %rsi
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: movq %rdx, %r9
|
||||
; X64-NEXT: movq %rax, %r8
|
||||
; X64-NEXT: movq %r11, %rax
|
||||
|
@ -1569,15 +1568,15 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq %r11, %rax
|
||||
; X64-NEXT: mulq %rbp
|
||||
; X64-NEXT: movq %rbp, %r14
|
||||
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rdx, %rsi
|
||||
; X64-NEXT: movq %rax, %rbp
|
||||
; X64-NEXT: addq %rcx, %rbp
|
||||
; X64-NEXT: adcq %rbx, %rsi
|
||||
; X64-NEXT: xorl %ecx, %ecx
|
||||
; X64-NEXT: movq %r10, %rbx
|
||||
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rbx, %rax
|
||||
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %r10, %rax
|
||||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, %r13
|
||||
; X64-NEXT: movq %rax, %r10
|
||||
|
@ -1585,7 +1584,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %r15
|
||||
; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: addq %r10, %r15
|
||||
; X64-NEXT: adcq %r13, %rdx
|
||||
; X64-NEXT: addq %rbp, %r15
|
||||
|
@ -1624,8 +1623,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: movq %rdx, %r14
|
||||
; X64-NEXT: movq %rax, %r11
|
||||
; X64-NEXT: addq %r11, %r10
|
||||
; X64-NEXT: adcq %r14, %r13
|
||||
; X64-NEXT: addq %rax, %r10
|
||||
; X64-NEXT: adcq %rdx, %r13
|
||||
; X64-NEXT: addq %rbp, %r10
|
||||
; X64-NEXT: adcq %rsi, %r13
|
||||
; X64-NEXT: addq %r8, %r10
|
||||
|
@ -1637,7 +1636,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: movq 16(%rsi), %r8
|
||||
; X64-NEXT: movq %rcx, %rax
|
||||
; X64-NEXT: movq %rcx, %r9
|
||||
; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: movq %rdx, %rdi
|
||||
; X64-NEXT: movq %rax, %r12
|
||||
|
@ -1668,7 +1667,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
|
|||
; X64-NEXT: mulq %rcx
|
||||
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
|
||||
; X64-NEXT: movq %rax, %rbp
|
||||
; X64-NEXT: addq %rbp, %r11
|
||||
; X64-NEXT: addq %rax, %r11
|
||||
; X64-NEXT: adcq %rdx, %r14
|
||||
; X64-NEXT: addq %r9, %r11
|
||||
; X64-NEXT: adcq %rbx, %r14
|
||||
|
|
|
@ -8,7 +8,7 @@ define i128 @foo(i128 %t, i128 %u) {
|
|||
; X64-NEXT: movq %rdx, %r8
|
||||
; X64-NEXT: imulq %rdi, %rcx
|
||||
; X64-NEXT: movq %rdi, %rax
|
||||
; X64-NEXT: mulq %r8
|
||||
; X64-NEXT: mulq %rdx
|
||||
; X64-NEXT: addq %rcx, %rdx
|
||||
; X64-NEXT: imulq %r8, %rsi
|
||||
; X64-NEXT: addq %rsi, %rdx
|
||||
|
|
|
@ -234,7 +234,7 @@ define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
|
|||
; SSE-LABEL: _mul4xi32toi64b:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm1
|
||||
|
|
|
@ -9,7 +9,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v16i8c:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm1
|
||||
|
@ -143,10 +143,10 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v16i8:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm3
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -386,7 +386,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v32i8c:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
@ -398,7 +398,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm4, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: pand %xmm4, %xmm2
|
||||
|
@ -567,10 +567,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v32i8:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -583,10 +583,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm4, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm5
|
||||
; SSE2-NEXT: pand %xmm4, %xmm5
|
||||
|
@ -774,7 +774,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-LABEL: mul_v64i8c:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
|
@ -786,7 +786,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -796,7 +796,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm1
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -806,7 +806,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE2-NEXT: pand %xmm5, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm6, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm6
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
||||
; SSE2-NEXT: pand %xmm5, %xmm6
|
||||
|
@ -821,7 +821,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|||
; SSE41: # %bb.0: # %entry
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
|
||||
; SSE41-NEXT: pmullw %xmm6, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -939,10 +939,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-LABEL: mul_v64i8:
|
||||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movdqa %xmm4, %xmm8
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm8
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm9
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm9
|
||||
; SSE2-NEXT: pmullw %xmm8, %xmm9
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
||||
|
@ -955,10 +955,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm9, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm9
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: pmullw %xmm9, %xmm4
|
||||
; SSE2-NEXT: pand %xmm8, %xmm4
|
||||
|
@ -970,10 +970,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm1
|
||||
; SSE2-NEXT: packuswb %xmm4, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: pand %xmm8, %xmm5
|
||||
|
@ -985,10 +985,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE2-NEXT: pand %xmm8, %xmm2
|
||||
; SSE2-NEXT: packuswb %xmm5, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm5
|
||||
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
||||
; SSE2-NEXT: pand %xmm8, %xmm5
|
||||
|
@ -1006,7 +1006,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm1, %xmm8
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
|
||||
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
||||
; SSE41-NEXT: pmullw %xmm9, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm9, %xmm0
|
||||
|
@ -1387,7 +1387,7 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
|
||||
; SSE2-NEXT: movdqa %xmm9, %xmm0
|
||||
; SSE2-NEXT: psrad $31, %xmm0
|
||||
; SSE2-NEXT: psrad $16, %xmm9
|
||||
|
|
|
@ -5,7 +5,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
|
|||
; CHECK-LABEL: pow_wrapper:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: movapd %xmm0, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm0, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm0
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm1
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm0
|
||||
|
|
|
@ -25,7 +25,7 @@ define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
|
|||
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
|
||||
; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; SSE-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
|
||||
; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
|
||||
; SSE-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
|
|
|
@ -49,13 +49,13 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
|
|||
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
|
||||
; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
|
||||
; CHECK-NEXT: vmovaps %xmm15, %xmm1
|
||||
; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
|
||||
; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9
|
||||
; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
|
||||
; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8
|
||||
; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0
|
||||
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
|
||||
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
|
||||
|
|
|
@ -23,7 +23,7 @@ define void @computeJD(%struct.DateTime*) nounwind {
|
|||
; CHECK-NEXT: movl %esi, %eax
|
||||
; CHECK-NEXT: imull %ecx
|
||||
; CHECK-NEXT: movl %edx, %ecx
|
||||
; CHECK-NEXT: movl %ecx, %eax
|
||||
; CHECK-NEXT: movl %edx, %eax
|
||||
; CHECK-NEXT: shrl $31, %eax
|
||||
; CHECK-NEXT: sarl $5, %ecx
|
||||
; CHECK-NEXT: addl %eax, %ecx
|
||||
|
@ -31,7 +31,7 @@ define void @computeJD(%struct.DateTime*) nounwind {
|
|||
; CHECK-NEXT: movl %esi, %eax
|
||||
; CHECK-NEXT: imull %edx
|
||||
; CHECK-NEXT: movl %edx, %edi
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: movl %edx, %eax
|
||||
; CHECK-NEXT: shrl $31, %eax
|
||||
; CHECK-NEXT: sarl $7, %edi
|
||||
; CHECK-NEXT: addl %eax, %edi
|
||||
|
|
|
@ -683,7 +683,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
|
|||
; SSE41-LABEL: test14:
|
||||
; SSE41: # %bb.0: # %vector.ph
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
||||
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
||||
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]
|
||||
|
@ -1247,7 +1247,7 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
|
|||
; SSE2-NEXT: movdqa %xmm1, %xmm9
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm8
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: pxor %xmm3, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm4, %xmm0
|
||||
; SSE2-NEXT: pxor %xmm3, %xmm0
|
||||
|
@ -1295,7 +1295,7 @@ define <32 x i16> @psubus_32i16_max(<32 x i16> %x, <32 x i16> %y) nounwind {
|
|||
; SSSE3-NEXT: movdqa %xmm1, %xmm9
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm8
|
||||
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
||||
; SSSE3-NEXT: movdqa %xmm8, %xmm1
|
||||
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSSE3-NEXT: pxor %xmm3, %xmm1
|
||||
; SSSE3-NEXT: movdqa %xmm4, %xmm0
|
||||
; SSSE3-NEXT: pxor %xmm3, %xmm0
|
||||
|
@ -1950,7 +1950,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
|
|||
; SSE2-NEXT: movdqa %xmm9, %xmm11
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
|
||||
; SSE2-NEXT: movdqa %xmm8, %xmm10
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm10
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
|
||||
|
@ -2013,7 +2013,7 @@ define <16 x i16> @psubus_16i32_max(<16 x i16> %x, <16 x i32> %y) nounwind {
|
|||
; SSSE3-NEXT: movdqa %xmm9, %xmm11
|
||||
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
|
||||
; SSSE3-NEXT: movdqa %xmm8, %xmm10
|
||||
; SSSE3-NEXT: movdqa %xmm1, %xmm10
|
||||
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
|
||||
; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
|
||||
|
|
|
@ -19,7 +19,7 @@ entry:
|
|||
; X64-LABEL: icall_reg:
|
||||
; X64-DAG: movq %rdi, %[[fp:[^ ]*]]
|
||||
; X64-DAG: movl %esi, %[[x:[^ ]*]]
|
||||
; X64: movl %[[x]], %edi
|
||||
; X64: movl %esi, %edi
|
||||
; X64: callq bar
|
||||
; X64-DAG: movl %[[x]], %edi
|
||||
; X64-DAG: movq %[[fp]], %r11
|
||||
|
@ -111,7 +111,7 @@ define void @vcall(%struct.Foo* %obj) #0 {
|
|||
|
||||
; X64-LABEL: vcall:
|
||||
; X64: movq %rdi, %[[obj:[^ ]*]]
|
||||
; X64: movq (%[[obj]]), %[[vptr:[^ ]*]]
|
||||
; X64: movq (%rdi), %[[vptr:[^ ]*]]
|
||||
; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]]
|
||||
; X64: movq %[[fp]], %r11
|
||||
; X64: callq __llvm_external_retpoline_r11
|
||||
|
|
|
@ -19,7 +19,7 @@ entry:
|
|||
; X64-LABEL: icall_reg:
|
||||
; X64-DAG: movq %rdi, %[[fp:[^ ]*]]
|
||||
; X64-DAG: movl %esi, %[[x:[^ ]*]]
|
||||
; X64: movl %[[x]], %edi
|
||||
; X64: movl %esi, %edi
|
||||
; X64: callq bar
|
||||
; X64-DAG: movl %[[x]], %edi
|
||||
; X64-DAG: movq %[[fp]], %r11
|
||||
|
@ -111,7 +111,7 @@ define void @vcall(%struct.Foo* %obj) #0 {
|
|||
|
||||
; X64-LABEL: vcall:
|
||||
; X64: movq %rdi, %[[obj:[^ ]*]]
|
||||
; X64: movq (%[[obj]]), %[[vptr:[^ ]*]]
|
||||
; X64: movq (%rdi), %[[vptr:[^ ]*]]
|
||||
; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]]
|
||||
; X64: movq %[[fp]], %r11
|
||||
; X64: callq __llvm_retpoline_r11
|
||||
|
|
|
@ -653,7 +653,7 @@ define i32 @sad_avx64i8() nounwind {
|
|||
; SSE2-NEXT: paddd %xmm7, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm13, %xmm0
|
||||
; SSE2-NEXT: psrad $31, %xmm0
|
||||
; SSE2-NEXT: paddd %xmm0, %xmm1
|
||||
; SSE2-NEXT: pxor %xmm0, %xmm1
|
||||
|
|
|
@ -40,6 +40,6 @@ declare void @_Z7CapturePi(i32*)
|
|||
|
||||
; LINUX-I386-PA: calll __safestack_pointer_address
|
||||
; LINUX-I386-PA: movl %eax, %[[A:.*]]
|
||||
; LINUX-I386-PA: movl (%[[A]]), %[[B:.*]]
|
||||
; LINUX-I386-PA: movl (%eax), %[[B:.*]]
|
||||
; LINUX-I386-PA: leal -16(%[[B]]), %[[C:.*]]
|
||||
; LINUX-I386-PA: movl %[[C]], (%[[A]])
|
||||
|
|
|
@ -25,6 +25,6 @@ declare void @_Z7CapturePi(i32*)
|
|||
|
||||
; CALL: callq __safestack_pointer_address
|
||||
; CALL: movq %rax, %[[A:.*]]
|
||||
; CALL: movq (%[[A]]), %[[B:.*]]
|
||||
; CALL: movq (%rax), %[[B:.*]]
|
||||
; CALL: leaq -16(%[[B]]), %[[C:.*]]
|
||||
; CALL: movq %[[C]], (%[[A]])
|
||||
|
|
|
@ -11,7 +11,7 @@ define void @vectorDiv (<2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)
|
|||
; CHECK-NEXT: movq %rdx, %r8
|
||||
; CHECK-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rcx
|
||||
; CHECK-NEXT: pmovsxdq (%rdi,%rcx,8), %xmm0
|
||||
; CHECK-NEXT: pmovsxdq (%rsi,%rcx,8), %xmm1
|
||||
|
@ -403,7 +403,7 @@ define void @test_int_div(<3 x i32>* %dest, <3 x i32>* %old, i32 %n) {
|
|||
; CHECK-LABEL: test_int_div:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: movl %edx, %r9d
|
||||
; CHECK-NEXT: testl %r9d, %r9d
|
||||
; CHECK-NEXT: testl %edx, %edx
|
||||
; CHECK-NEXT: jle .LBB12_3
|
||||
; CHECK-NEXT: # %bb.1: # %bb.nph
|
||||
; CHECK-NEXT: xorl %ecx, %ecx
|
||||
|
|
|
@ -22,8 +22,7 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
|
|||
; MCU-NEXT: jne .LBB0_1
|
||||
; MCU-NEXT: # %bb.2:
|
||||
; MCU-NEXT: addl $8, %edx
|
||||
; MCU-NEXT: movl %edx, %eax
|
||||
; MCU-NEXT: movl (%eax), %eax
|
||||
; MCU-NEXT: movl (%edx), %eax
|
||||
; MCU-NEXT: retl
|
||||
; MCU-NEXT: .LBB0_1:
|
||||
; MCU-NEXT: addl $8, %eax
|
||||
|
|
|
@ -61,7 +61,7 @@ false:
|
|||
|
||||
; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
|
||||
; CHECK: movl %ecx, %eax
|
||||
; CHECK: cmpl %edx, %eax
|
||||
; CHECK: cmpl %edx, %ecx
|
||||
; CHECK: jge LBB1_2
|
||||
; CHECK: pushl %eax
|
||||
; CHECK: movl $4092, %eax
|
||||
|
|
|
@ -614,7 +614,7 @@ define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) {
|
|||
; SLOW32-NEXT: movdqa %xmm1, %xmm3
|
||||
; SLOW32-NEXT: movdqa %xmm0, %xmm1
|
||||
; SLOW32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
|
||||
; SLOW32-NEXT: movdqa %xmm1, %xmm4
|
||||
; SLOW32-NEXT: movdqa %xmm0, %xmm4
|
||||
; SLOW32-NEXT: pmulhuw %xmm2, %xmm4
|
||||
; SLOW32-NEXT: pmullw %xmm2, %xmm1
|
||||
; SLOW32-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -633,7 +633,7 @@ define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) {
|
|||
; SLOW64-NEXT: movdqa %xmm1, %xmm3
|
||||
; SLOW64-NEXT: movdqa %xmm0, %xmm1
|
||||
; SLOW64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
|
||||
; SLOW64-NEXT: movdqa %xmm1, %xmm4
|
||||
; SLOW64-NEXT: movdqa %xmm0, %xmm4
|
||||
; SLOW64-NEXT: pmulhuw %xmm2, %xmm4
|
||||
; SLOW64-NEXT: pmullw %xmm2, %xmm1
|
||||
; SLOW64-NEXT: movdqa %xmm1, %xmm0
|
||||
|
|
|
@ -201,7 +201,7 @@ define float @f32_estimate(float %x) #1 {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: rsqrtss %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm2, %xmm2
|
||||
; SSE-NEXT: mulss %xmm1, %xmm2
|
||||
; SSE-NEXT: mulss %xmm0, %xmm2
|
||||
; SSE-NEXT: addss {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
|
||||
|
@ -247,7 +247,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
|
|||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: rsqrtps %xmm0, %xmm1
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulps %xmm2, %xmm2
|
||||
; SSE-NEXT: mulps %xmm1, %xmm2
|
||||
; SSE-NEXT: mulps %xmm0, %xmm2
|
||||
; SSE-NEXT: addps {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
|
||||
|
@ -297,7 +297,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
|
|||
; SSE-NEXT: rsqrtps %xmm0, %xmm3
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm2
|
||||
; SSE-NEXT: mulps %xmm2, %xmm2
|
||||
; SSE-NEXT: mulps %xmm3, %xmm2
|
||||
; SSE-NEXT: mulps %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
|
||||
; SSE-NEXT: addps %xmm0, %xmm2
|
||||
|
@ -305,7 +305,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
|
|||
; SSE-NEXT: mulps %xmm3, %xmm2
|
||||
; SSE-NEXT: rsqrtps %xmm1, %xmm5
|
||||
; SSE-NEXT: movaps %xmm5, %xmm3
|
||||
; SSE-NEXT: mulps %xmm3, %xmm3
|
||||
; SSE-NEXT: mulps %xmm5, %xmm3
|
||||
; SSE-NEXT: mulps %xmm1, %xmm3
|
||||
; SSE-NEXT: addps %xmm0, %xmm3
|
||||
; SSE-NEXT: mulps %xmm4, %xmm3
|
||||
|
|
|
@ -1084,8 +1084,7 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
|
|||
; SSE2-NEXT: testb $1, %dil
|
||||
; SSE2-NEXT: jne .LBB62_1
|
||||
; SSE2-NEXT: # %bb.2:
|
||||
; SSE2-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
|
||||
; SSE2-NEXT: retq
|
||||
; SSE2-NEXT: .LBB62_1:
|
||||
; SSE2-NEXT: addss %xmm0, %xmm1
|
||||
|
@ -1097,8 +1096,7 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
|
|||
; SSE41-NEXT: testb $1, %dil
|
||||
; SSE41-NEXT: jne .LBB62_1
|
||||
; SSE41-NEXT: # %bb.2:
|
||||
; SSE41-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
; SSE41-NEXT: .LBB62_1:
|
||||
; SSE41-NEXT: addss %xmm0, %xmm1
|
||||
|
@ -1139,8 +1137,7 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
|
|||
; SSE2-NEXT: testb $1, %dil
|
||||
; SSE2-NEXT: jne .LBB63_1
|
||||
; SSE2-NEXT: # %bb.2:
|
||||
; SSE2-NEXT: movapd %xmm2, %xmm1
|
||||
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
|
||||
; SSE2-NEXT: retq
|
||||
; SSE2-NEXT: .LBB63_1:
|
||||
; SSE2-NEXT: addsd %xmm0, %xmm1
|
||||
|
@ -1152,8 +1149,7 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
|
|||
; SSE41-NEXT: testb $1, %dil
|
||||
; SSE41-NEXT: jne .LBB63_1
|
||||
; SSE41-NEXT: # %bb.2:
|
||||
; SSE41-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
; SSE41-NEXT: .LBB63_1:
|
||||
; SSE41-NEXT: addsd %xmm0, %xmm1
|
||||
|
|
|
@ -16,7 +16,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
|
|||
; X32-LABEL: test4:
|
||||
; X32: # %bb.0: # %entry
|
||||
; X32-NEXT: movaps %xmm0, %xmm2
|
||||
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
||||
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
|
||||
; X32-NEXT: addss %xmm1, %xmm0
|
||||
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; X32-NEXT: subss %xmm1, %xmm2
|
||||
|
@ -26,7 +26,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
|
|||
; X64-LABEL: test4:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: movaps %xmm0, %xmm2
|
||||
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
||||
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
|
||||
; X64-NEXT: addss %xmm1, %xmm0
|
||||
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; X64-NEXT: subss %xmm1, %xmm2
|
||||
|
|
|
@ -406,9 +406,9 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
|
|||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: subss %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm4
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
|
||||
; SSE-NEXT: subss %xmm4, %xmm3
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: addss %xmm0, %xmm4
|
||||
|
|
|
@ -114,7 +114,7 @@ define void @test6(i32 %a) gc "statepoint-example" {
|
|||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: .cfi_offset %rbx, -16
|
||||
; CHECK-NEXT: movl %edi, %ebx
|
||||
; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: callq _baz
|
||||
; CHECK-NEXT: Ltmp6:
|
||||
; CHECK-NEXT: callq _bar
|
||||
|
|
|
@ -61,9 +61,9 @@ define i32 @back_to_back_deopt(i32 %a, i32 %b, i32 %c) #1
|
|||
gc "statepoint-example" {
|
||||
; CHECK-LABEL: back_to_back_deopt
|
||||
; The exact stores don't matter, but there need to be three stack slots created
|
||||
; CHECK-DAG: movl %ebx, 12(%rsp)
|
||||
; CHECK-DAG: movl %ebp, 8(%rsp)
|
||||
; CHECK-DAG: movl %r14d, 4(%rsp)
|
||||
; CHECK-DAG: movl %edi, 12(%rsp)
|
||||
; CHECK-DAG: movl %esi, 8(%rsp)
|
||||
; CHECK-DAG: movl %edx, 4(%rsp)
|
||||
; CHECK: callq
|
||||
; CHECK-DAG: movl %ebx, 12(%rsp)
|
||||
; CHECK-DAG: movl %ebp, 8(%rsp)
|
||||
|
|
|
@ -1016,12 +1016,12 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm1
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
|
@ -1124,12 +1124,12 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm1
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movq %rax, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
|
@ -1314,11 +1314,11 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
|
|||
; SSE-LABEL: fptoui_4f32_to_4i32:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm1
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm1, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm1
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: cvttss2si %xmm2, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm2
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
|
@ -1556,7 +1556,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
|
|||
; SSE-NEXT: cvttss2si %xmm0, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm0
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1]
|
||||
; SSE-NEXT: cvttss2si %xmm3, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
||||
|
@ -1568,11 +1568,11 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
|
|||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm2
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
|
||||
; SSE-NEXT: cvttss2si %xmm2, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm2
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
|
||||
; SSE-NEXT: cvttss2si %xmm3, %rax
|
||||
; SSE-NEXT: movd %eax, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
|
@ -1683,7 +1683,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cmovaeq %rcx, %rdx
|
||||
; SSE-NEXT: movq %rdx, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1694,7 +1694,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: movq %rdx, %xmm3
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1861,7 +1861,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: cmovaeq %rcx, %rdx
|
||||
; SSE-NEXT: movq %rdx, %xmm2
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
@ -1872,7 +1872,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
|
|||
; SSE-NEXT: movq %rdx, %xmm3
|
||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm4
|
||||
; SSE-NEXT: subss %xmm1, %xmm4
|
||||
; SSE-NEXT: cvttss2si %xmm4, %rcx
|
||||
|
|
|
@ -1591,7 +1591,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
|
|||
; SSE-LABEL: uitofp_2i64_to_4f32:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: movq %xmm1, %rax
|
||||
; SSE-NEXT: movq %xmm0, %rax
|
||||
; SSE-NEXT: testq %rax, %rax
|
||||
; SSE-NEXT: js .LBB39_1
|
||||
; SSE-NEXT: # %bb.2:
|
||||
|
@ -1819,7 +1819,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
|
|||
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: movq %xmm1, %rax
|
||||
; SSE-NEXT: movq %xmm0, %rax
|
||||
; SSE-NEXT: testq %rax, %rax
|
||||
; SSE-NEXT: js .LBB41_1
|
||||
; SSE-NEXT: # %bb.2:
|
||||
|
|
|
@ -454,7 +454,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
|
|||
; SSE42: # %bb.0:
|
||||
; SSE42-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE42-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
|
||||
; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
|
||||
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE42-NEXT: pxor %xmm3, %xmm0
|
||||
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
||||
|
|
|
@ -35,7 +35,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X32: # %bb.0: # %entry
|
||||
; X32-NEXT: movdqa %xmm0, %xmm2
|
||||
; X32-NEXT: psllw $5, %xmm1
|
||||
; X32-NEXT: movdqa %xmm2, %xmm3
|
||||
; X32-NEXT: movdqa %xmm0, %xmm3
|
||||
; X32-NEXT: psllw $4, %xmm3
|
||||
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
|
||||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -47,7 +47,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; X32-NEXT: movdqa %xmm2, %xmm3
|
||||
; X32-NEXT: paddb %xmm3, %xmm3
|
||||
; X32-NEXT: paddb %xmm2, %xmm3
|
||||
; X32-NEXT: paddb %xmm1, %xmm1
|
||||
; X32-NEXT: movdqa %xmm1, %xmm0
|
||||
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
|
@ -58,7 +58,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-NEXT: psllw $5, %xmm1
|
||||
; X64-NEXT: movdqa %xmm2, %xmm3
|
||||
; X64-NEXT: movdqa %xmm0, %xmm3
|
||||
; X64-NEXT: psllw $4, %xmm3
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -70,7 +70,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
|
|||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; X64-NEXT: movdqa %xmm2, %xmm3
|
||||
; X64-NEXT: paddb %xmm3, %xmm3
|
||||
; X64-NEXT: paddb %xmm2, %xmm3
|
||||
; X64-NEXT: paddb %xmm1, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
|
|
|
@ -955,7 +955,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
|
|||
; SSE41: # %bb.0: # %entry
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: pxor %xmm3, %xmm3
|
||||
; SSE41-NEXT: psubd %xmm2, %xmm3
|
||||
; SSE41-NEXT: psubd %xmm0, %xmm3
|
||||
; SSE41-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
|
||||
; SSE41-NEXT: movaps %xmm3, %xmm0
|
||||
|
|
|
@ -177,13 +177,13 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-LABEL: test_div7_16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
||||
; SSE2-NEXT: psrlw $8, %xmm1
|
||||
|
@ -501,13 +501,13 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-LABEL: test_rem7_16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE2-NEXT: psraw $8, %xmm1
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
||||
; SSE2-NEXT: psrlw $8, %xmm1
|
||||
|
@ -523,7 +523,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: paddb %xmm2, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
|
|
@ -497,7 +497,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
|
|||
; SSE2-NEXT: psrlw $2, %xmm1
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: psraw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
|
||||
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
||||
|
|
|
@ -178,7 +178,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
|
|||
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X86: # %bb.0:
|
||||
; X86-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-NEXT: psllw $4, %xmm2
|
||||
; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
|
||||
|
@ -189,7 +189,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
|
|||
; X86-NEXT: paddb %xmm0, %xmm0
|
||||
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-NEXT: paddb %xmm2, %xmm2
|
||||
; X86-NEXT: paddb %xmm1, %xmm2
|
||||
; X86-NEXT: paddb %xmm0, %xmm0
|
||||
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm0
|
||||
|
@ -198,7 +198,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
|
|||
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm2
|
||||
; X64-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-NEXT: psllw $4, %xmm2
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
|
||||
|
@ -209,7 +209,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw
|
|||
; X64-NEXT: paddb %xmm0, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm2
|
||||
; X64-NEXT: paddb %xmm2, %xmm2
|
||||
; X64-NEXT: paddb %xmm1, %xmm2
|
||||
; X64-NEXT: paddb %xmm0, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
|
|
|
@ -359,7 +359,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm1
|
||||
; SSE41-NEXT: por %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: paddw %xmm4, %xmm4
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm6
|
||||
; SSE41-NEXT: psllw $8, %xmm6
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm5
|
||||
|
@ -384,7 +384,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; SSE41-NEXT: psllw $4, %xmm2
|
||||
; SSE41-NEXT: por %xmm0, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm1
|
||||
; SSE41-NEXT: paddw %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE41-NEXT: psrlw $8, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
||||
|
@ -629,10 +629,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; SSE41-NEXT: psubb %xmm3, %xmm2
|
||||
; SSE41-NEXT: psllw $5, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE41-NEXT: psllw $4, %xmm5
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
||||
|
@ -642,13 +642,13 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm5, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm4, %xmm5
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
|
||||
; SSE41-NEXT: psllw $5, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE41-NEXT: psrlw $4, %xmm5
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
|
||||
|
@ -1202,7 +1202,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-LABEL: constant_rotate_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE41-NEXT: psllw $4, %xmm3
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
|
||||
|
@ -1214,7 +1214,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm3, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm2, %xmm3
|
||||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue