2017-10-05 08:33:50 +08:00
|
|
|
//====- X86CmovConversion.cpp - Convert Cmov to Branch --------------------===//
|
2017-07-17 01:39:56 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-07-17 01:39:56 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2017-07-17 01:39:56 +08:00
|
|
|
/// \file
|
2017-08-30 21:16:25 +08:00
|
|
|
/// This file implements a pass that converts X86 cmov instructions into
|
|
|
|
/// branches when profitable. This pass is conservative. It transforms if and
|
2017-08-30 21:19:23 +08:00
|
|
|
/// only if it can guarantee a gain with high confidence.
|
2017-07-17 01:39:56 +08:00
|
|
|
///
|
|
|
|
/// Thus, the optimization applies under the following conditions:
|
2017-08-30 21:16:25 +08:00
|
|
|
/// 1. Consider as candidates only CMOVs in innermost loops (assume that
|
|
|
|
/// most hotspots are represented by these loops).
|
|
|
|
/// 2. Given a group of CMOV instructions that are using the same EFLAGS def
|
2017-07-17 01:39:56 +08:00
|
|
|
/// instruction:
|
2017-08-30 21:16:25 +08:00
|
|
|
/// a. Consider them as candidates only if all have the same code condition
|
|
|
|
/// or the opposite one to prevent generating more than one conditional
|
|
|
|
/// jump per EFLAGS def instruction.
|
2017-07-17 01:39:56 +08:00
|
|
|
/// b. Consider them as candidates only if all are profitable to be
|
2017-08-30 21:16:25 +08:00
|
|
|
/// converted (assume that one bad conversion may cause a degradation).
|
|
|
|
/// 3. Apply conversion only for loops that are found profitable and only for
|
2017-07-17 01:39:56 +08:00
|
|
|
/// CMOV candidates that were found profitable.
|
2017-08-30 21:16:25 +08:00
|
|
|
/// a. A loop is considered profitable only if conversion will reduce its
|
|
|
|
/// depth cost by some threshold.
|
2017-07-17 01:39:56 +08:00
|
|
|
/// b. CMOV is considered profitable if the cost of its condition is higher
|
|
|
|
/// than the average cost of its true-value and false-value by 25% of
|
2017-08-30 21:19:23 +08:00
|
|
|
/// branch-misprediction-penalty. This assures no degradation even with
|
2017-08-30 21:16:25 +08:00
|
|
|
/// 25% branch misprediction.
|
2017-07-17 01:39:56 +08:00
|
|
|
///
|
|
|
|
/// Note: This pass is assumed to run on SSA machine code.
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2017-07-17 01:39:56 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// External interfaces:
|
|
|
|
// FunctionPass *llvm::createX86CmovConverterPass();
|
|
|
|
// bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF);
|
|
|
|
//
|
2017-10-05 08:33:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
#include "X86.h"
|
|
|
|
#include "X86InstrInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/CodeGen/TargetSchedule.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
|
|
|
#include "llvm/MC/MCSchedule.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-07-17 01:39:56 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <iterator>
|
|
|
|
#include <utility>
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-10-03 05:46:37 +08:00
|
|
|
#define DEBUG_TYPE "x86-cmov-conversion"
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
STATISTIC(NumOfSkippedCmovGroups, "Number of unsupported CMOV-groups");
|
|
|
|
STATISTIC(NumOfCmovGroupCandidate, "Number of CMOV-group candidates");
|
|
|
|
STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
|
|
|
|
STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
|
|
|
|
|
|
|
|
// This internal switch can be used to turn off the cmov/branch optimization.
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableCmovConverter("x86-cmov-converter",
|
|
|
|
cl::desc("Enable the X86 cmov-to-branch optimization."),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2017-08-08 20:17:56 +08:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
GainCycleThreshold("x86-cmov-converter-threshold",
|
|
|
|
cl::desc("Minimum gain per loop (in cycles) threshold."),
|
|
|
|
cl::init(4), cl::Hidden);
|
|
|
|
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
static cl::opt<bool> ForceMemOperand(
|
|
|
|
"x86-cmov-converter-force-mem-operand",
|
|
|
|
cl::desc("Convert cmovs to branches whenever they have memory operands."),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2017-10-05 08:33:50 +08:00
|
|
|
namespace {
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
/// Converts X86 cmov instructions into branches when profitable.
|
|
|
|
class X86CmovConverterPass : public MachineFunctionPass {
|
|
|
|
public:
|
2019-06-13 10:09:32 +08:00
|
|
|
X86CmovConverterPass() : MachineFunctionPass(ID) { }
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
StringRef getPassName() const override { return "X86 cmov Conversion"; }
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
|
|
|
|
|
|
/// Pass identification, replacement for typeid.
|
|
|
|
static char ID;
|
|
|
|
|
2017-10-03 05:46:37 +08:00
|
|
|
private:
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
MachineRegisterInfo *MRI;
|
2017-07-17 01:39:56 +08:00
|
|
|
const TargetInstrInfo *TII;
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
const TargetRegisterInfo *TRI;
|
2017-07-17 01:39:56 +08:00
|
|
|
TargetSchedModel TSchedModel;
|
|
|
|
|
|
|
|
/// List of consecutive CMOV instructions.
|
2017-10-05 08:33:50 +08:00
|
|
|
using CmovGroup = SmallVector<MachineInstr *, 2>;
|
|
|
|
using CmovGroups = SmallVector<CmovGroup, 2>;
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
/// Collect all CMOV-group-candidates in \p CurrLoop and update \p
|
|
|
|
/// CmovInstGroups accordingly.
|
|
|
|
///
|
2017-08-19 12:28:20 +08:00
|
|
|
/// \param Blocks List of blocks to process.
|
2017-07-17 01:39:56 +08:00
|
|
|
/// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
|
|
|
|
/// \returns true iff it found any CMOV-group-candidate.
|
2017-08-19 12:28:20 +08:00
|
|
|
bool collectCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
CmovGroups &CmovInstGroups,
|
|
|
|
bool IncludeLoads = false);
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
/// Check if it is profitable to transform each CMOV-group-candidates into
|
|
|
|
/// branch. Remove all groups that are not profitable from \p CmovInstGroups.
|
|
|
|
///
|
2017-08-19 12:28:20 +08:00
|
|
|
/// \param Blocks List of blocks to process.
|
2017-07-17 01:39:56 +08:00
|
|
|
/// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
|
|
|
|
/// \returns true iff any CMOV-group-candidate remain.
|
2017-08-19 12:28:20 +08:00
|
|
|
bool checkForProfitableCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
|
2017-07-17 01:39:56 +08:00
|
|
|
CmovGroups &CmovInstGroups);
|
|
|
|
|
|
|
|
/// Convert the given list of consecutive CMOV instructions into a branch.
|
|
|
|
///
|
|
|
|
/// \param Group Consecutive CMOV instructions to be converted into branch.
|
|
|
|
void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
|
|
|
|
};
|
|
|
|
|
2017-10-05 08:33:50 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char X86CmovConverterPass::ID = 0;
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
AU.addRequired<MachineLoopInfo>();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2017-07-17 01:39:56 +08:00
|
|
|
return false;
|
|
|
|
if (!EnableCmovConverter)
|
|
|
|
return false;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
|
|
|
|
<< "**********\n");
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
|
|
|
|
const TargetSubtargetInfo &STI = MF.getSubtarget();
|
|
|
|
MRI = &MF.getRegInfo();
|
|
|
|
TII = STI.getInstrInfo();
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
TRI = STI.getRegisterInfo();
|
2018-04-09 03:56:04 +08:00
|
|
|
TSchedModel.init(&STI);
|
2017-07-17 01:39:56 +08:00
|
|
|
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
// Before we handle the more subtle cases of register-register CMOVs inside
|
|
|
|
// of potentially hot loops, we want to quickly remove all CMOVs with
|
|
|
|
// a memory operand. The CMOV will risk a stall waiting for the load to
|
|
|
|
// complete that speculative execution behind a branch is better suited to
|
|
|
|
// handle on modern x86 chips.
|
|
|
|
if (ForceMemOperand) {
|
|
|
|
CmovGroups AllCmovGroups;
|
|
|
|
SmallVector<MachineBasicBlock *, 4> Blocks;
|
|
|
|
for (auto &MBB : MF)
|
|
|
|
Blocks.push_back(&MBB);
|
|
|
|
if (collectCmovCandidates(Blocks, AllCmovGroups, /*IncludeLoads*/ true)) {
|
|
|
|
for (auto &Group : AllCmovGroups) {
|
|
|
|
// Skip any group that doesn't do at least one memory operand cmov.
|
|
|
|
if (!llvm::any_of(Group, [&](MachineInstr *I) { return I->mayLoad(); }))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// For CMOV groups which we can rewrite and which contain a memory load,
|
|
|
|
// always rewrite them. On x86, a CMOV will dramatically amplify any
|
|
|
|
// memory latency by blocking speculative execution.
|
|
|
|
Changed = true;
|
|
|
|
convertCmovInstsToBranches(Group);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
// Register-operand Conversion Algorithm
|
2017-07-17 01:39:56 +08:00
|
|
|
// ---------
|
|
|
|
// For each inner most loop
|
|
|
|
// collectCmovCandidates() {
|
|
|
|
// Find all CMOV-group-candidates.
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// checkForProfitableCmovCandidates() {
|
|
|
|
// * Calculate both loop-depth and optimized-loop-depth.
|
|
|
|
// * Use these depth to check for loop transformation profitability.
|
|
|
|
// * Check for CMOV-group-candidate transformation profitability.
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// For each profitable CMOV-group-candidate
|
|
|
|
// convertCmovInstsToBranches() {
|
|
|
|
// * Create FalseBB, SinkBB, Conditional branch to SinkBB.
|
|
|
|
// * Replace each CMOV instruction with a PHI instruction in SinkBB.
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Note: For more details, see each function description.
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2017-08-19 12:28:20 +08:00
|
|
|
// Build up the loops in pre-order.
|
|
|
|
SmallVector<MachineLoop *, 4> Loops(MLI.begin(), MLI.end());
|
|
|
|
// Note that we need to check size on each iteration as we accumulate child
|
|
|
|
// loops.
|
|
|
|
for (int i = 0; i < (int)Loops.size(); ++i)
|
|
|
|
for (MachineLoop *Child : Loops[i]->getSubLoops())
|
|
|
|
Loops.push_back(Child);
|
|
|
|
|
|
|
|
for (MachineLoop *CurrLoop : Loops) {
|
2017-07-17 01:39:56 +08:00
|
|
|
// Optimize only inner most loops.
|
2017-08-19 12:28:20 +08:00
|
|
|
if (!CurrLoop->getSubLoops().empty())
|
2017-07-17 01:39:56 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// List of consecutive CMOV instructions to be processed.
|
|
|
|
CmovGroups CmovInstGroups;
|
|
|
|
|
2017-08-19 12:28:20 +08:00
|
|
|
if (!collectCmovCandidates(CurrLoop->getBlocks(), CmovInstGroups))
|
2017-07-17 01:39:56 +08:00
|
|
|
continue;
|
|
|
|
|
2017-08-19 12:28:20 +08:00
|
|
|
if (!checkForProfitableCmovCandidates(CurrLoop->getBlocks(),
|
|
|
|
CmovInstGroups))
|
2017-07-17 01:39:56 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
for (auto &Group : CmovInstGroups)
|
|
|
|
convertCmovInstsToBranches(Group);
|
|
|
|
}
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2017-08-19 12:28:20 +08:00
|
|
|
bool X86CmovConverterPass::collectCmovCandidates(
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups,
|
|
|
|
bool IncludeLoads) {
|
2017-07-17 01:39:56 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Collect all CMOV-group-candidates and add them into CmovInstGroups.
|
|
|
|
//
|
|
|
|
// CMOV-group:
|
|
|
|
// CMOV instructions, in same MBB, that uses same EFLAGS def instruction.
|
|
|
|
//
|
|
|
|
// CMOV-group-candidate:
|
|
|
|
// CMOV-group where all the CMOV instructions are
|
|
|
|
// 1. consecutive.
|
|
|
|
// 2. have same condition code or opposite one.
|
|
|
|
// 3. have only operand registers (X86::CMOVrr).
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// List of possible improvement (TODO's):
|
|
|
|
// --------------------------------------
|
|
|
|
// TODO: Add support for X86::CMOVrm instructions.
|
|
|
|
// TODO: Add support for X86::SETcc instructions.
|
|
|
|
// TODO: Add support for CMOV-groups with non consecutive CMOV instructions.
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Current processed CMOV-Group.
|
|
|
|
CmovGroup Group;
|
2017-08-19 12:28:20 +08:00
|
|
|
for (auto *MBB : Blocks) {
|
2017-07-17 01:39:56 +08:00
|
|
|
Group.clear();
|
|
|
|
// Condition code of first CMOV instruction current processed range and its
|
|
|
|
// opposite condition code.
|
2019-05-28 18:53:23 +08:00
|
|
|
X86::CondCode FirstCC = X86::COND_INVALID, FirstOppCC = X86::COND_INVALID,
|
|
|
|
MemOpCC = X86::COND_INVALID;
|
2017-07-17 01:39:56 +08:00
|
|
|
// Indicator of a non CMOVrr instruction in the current processed range.
|
|
|
|
bool FoundNonCMOVInst = false;
|
|
|
|
// Indicator for current processed CMOV-group if it should be skipped.
|
|
|
|
bool SkipGroup = false;
|
|
|
|
|
|
|
|
for (auto &I : *MBB) {
|
2017-10-15 19:00:56 +08:00
|
|
|
// Skip debug instructions.
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I.isDebugInstr())
|
2017-10-15 19:00:56 +08:00
|
|
|
continue;
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
X86::CondCode CC = X86::getCondFromCMov(I);
|
2017-07-17 01:39:56 +08:00
|
|
|
// Check if we found a X86::CMOVrr instruction.
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
if (CC != X86::COND_INVALID && (IncludeLoads || !I.mayLoad())) {
|
2017-07-17 01:39:56 +08:00
|
|
|
if (Group.empty()) {
|
|
|
|
// We found first CMOV in the range, reset flags.
|
|
|
|
FirstCC = CC;
|
|
|
|
FirstOppCC = X86::GetOppositeBranchCondition(CC);
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
// Clear out the prior group's memory operand CC.
|
|
|
|
MemOpCC = X86::COND_INVALID;
|
2017-07-17 01:39:56 +08:00
|
|
|
FoundNonCMOVInst = false;
|
|
|
|
SkipGroup = false;
|
|
|
|
}
|
|
|
|
Group.push_back(&I);
|
|
|
|
// Check if it is a non-consecutive CMOV instruction or it has different
|
|
|
|
// condition code than FirstCC or FirstOppCC.
|
|
|
|
if (FoundNonCMOVInst || (CC != FirstCC && CC != FirstOppCC))
|
|
|
|
// Mark the SKipGroup indicator to skip current processed CMOV-Group.
|
|
|
|
SkipGroup = true;
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
if (I.mayLoad()) {
|
|
|
|
if (MemOpCC == X86::COND_INVALID)
|
|
|
|
// The first memory operand CMOV.
|
|
|
|
MemOpCC = CC;
|
|
|
|
else if (CC != MemOpCC)
|
|
|
|
// Can't handle mixed conditions with memory operands.
|
|
|
|
SkipGroup = true;
|
|
|
|
}
|
2017-09-06 14:28:08 +08:00
|
|
|
// Check if we were relying on zero-extending behavior of the CMOV.
|
|
|
|
if (!SkipGroup &&
|
|
|
|
llvm::any_of(
|
|
|
|
MRI->use_nodbg_instructions(I.defs().begin()->getReg()),
|
|
|
|
[&](MachineInstr &UseI) {
|
|
|
|
return UseI.getOpcode() == X86::SUBREG_TO_REG;
|
|
|
|
}))
|
|
|
|
// FIXME: We should model the cost of using an explicit MOV to handle
|
|
|
|
// the zero-extension rather than just refusing to handle this.
|
|
|
|
SkipGroup = true;
|
2017-07-17 01:39:56 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// If Group is empty, keep looking for first CMOV in the range.
|
|
|
|
if (Group.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We found a non X86::CMOVrr instruction.
|
|
|
|
FoundNonCMOVInst = true;
|
|
|
|
// Check if this instruction define EFLAGS, to determine end of processed
|
|
|
|
// range, as there would be no more instructions using current EFLAGS def.
|
|
|
|
if (I.definesRegister(X86::EFLAGS)) {
|
|
|
|
// Check if current processed CMOV-group should not be skipped and add
|
|
|
|
// it as a CMOV-group-candidate.
|
|
|
|
if (!SkipGroup)
|
|
|
|
CmovInstGroups.push_back(Group);
|
|
|
|
else
|
|
|
|
++NumOfSkippedCmovGroups;
|
|
|
|
Group.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// End of basic block is considered end of range, check if current processed
|
|
|
|
// CMOV-group should not be skipped and add it as a CMOV-group-candidate.
|
|
|
|
if (Group.empty())
|
|
|
|
continue;
|
|
|
|
if (!SkipGroup)
|
|
|
|
CmovInstGroups.push_back(Group);
|
|
|
|
else
|
|
|
|
++NumOfSkippedCmovGroups;
|
|
|
|
}
|
|
|
|
|
|
|
|
NumOfCmovGroupCandidate += CmovInstGroups.size();
|
|
|
|
return !CmovInstGroups.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns Depth of CMOV instruction as if it was converted into branch.
|
|
|
|
/// \param TrueOpDepth depth cost of CMOV true value operand.
|
|
|
|
/// \param FalseOpDepth depth cost of CMOV false value operand.
|
|
|
|
static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth) {
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// With no info about branch weight, we assume 50% for each value operand.
|
|
|
|
// Thus, depth of optimized CMOV instruction is the rounded up average of
|
|
|
|
// its True-Operand-Value-Depth and False-Operand-Value-Depth.
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
return (TrueOpDepth + FalseOpDepth + 1) / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86CmovConverterPass::checkForProfitableCmovCandidates(
|
2017-08-19 12:28:20 +08:00
|
|
|
ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups) {
|
2017-07-17 01:39:56 +08:00
|
|
|
struct DepthInfo {
|
|
|
|
/// Depth of original loop.
|
|
|
|
unsigned Depth;
|
|
|
|
/// Depth of optimized loop.
|
|
|
|
unsigned OptDepth;
|
|
|
|
};
|
|
|
|
/// Number of loop iterations to calculate depth for ?!
|
|
|
|
static const unsigned LoopIterations = 2;
|
|
|
|
DenseMap<MachineInstr *, DepthInfo> DepthMap;
|
|
|
|
DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
|
|
|
|
enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
|
|
|
|
/// For each register type maps the register to its last def instruction.
|
|
|
|
DenseMap<unsigned, MachineInstr *> RegDefMaps[RegTypeNum];
|
|
|
|
/// Maps register operand to its def instruction, which can be nullptr if it
|
|
|
|
/// is unknown (e.g., operand is defined outside the loop).
|
|
|
|
DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
|
|
|
|
|
|
|
|
// Set depth of unknown instruction (i.e., nullptr) to zero.
|
|
|
|
DepthMap[nullptr] = {0, 0};
|
|
|
|
|
|
|
|
SmallPtrSet<MachineInstr *, 4> CmovInstructions;
|
|
|
|
for (auto &Group : CmovInstGroups)
|
|
|
|
CmovInstructions.insert(Group.begin(), Group.end());
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Step 1: Calculate instruction depth and loop depth.
|
|
|
|
// Optimized-Loop:
|
|
|
|
// loop with CMOV-group-candidates converted into branches.
|
|
|
|
//
|
|
|
|
// Instruction-Depth:
|
|
|
|
// instruction latency + max operand depth.
|
|
|
|
// * For CMOV instruction in optimized loop the depth is calculated as:
|
|
|
|
// CMOV latency + getDepthOfOptCmov(True-Op-Depth, False-Op-depth)
|
|
|
|
// TODO: Find a better way to estimate the latency of the branch instruction
|
|
|
|
// rather than using the CMOV latency.
|
|
|
|
//
|
|
|
|
// Loop-Depth:
|
|
|
|
// max instruction depth of all instructions in the loop.
|
|
|
|
// Note: instruction with max depth represents the critical-path in the loop.
|
|
|
|
//
|
|
|
|
// Loop-Depth[i]:
|
|
|
|
// Loop-Depth calculated for first `i` iterations.
|
|
|
|
// Note: it is enough to calculate depth for up to two iterations.
|
|
|
|
//
|
|
|
|
// Depth-Diff[i]:
|
|
|
|
// Number of cycles saved in first 'i` iterations by optimizing the loop.
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
for (unsigned I = 0; I < LoopIterations; ++I) {
|
|
|
|
DepthInfo &MaxDepth = LoopDepth[I];
|
2017-08-19 12:28:20 +08:00
|
|
|
for (auto *MBB : Blocks) {
|
2017-07-17 01:39:56 +08:00
|
|
|
// Clear physical registers Def map.
|
|
|
|
RegDefMaps[PhyRegType].clear();
|
|
|
|
for (MachineInstr &MI : *MBB) {
|
2017-10-15 19:00:56 +08:00
|
|
|
// Skip debug instructions.
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2017-10-15 19:00:56 +08:00
|
|
|
continue;
|
2017-07-17 01:39:56 +08:00
|
|
|
unsigned MIDepth = 0;
|
|
|
|
unsigned MIDepthOpt = 0;
|
|
|
|
bool IsCMOV = CmovInstructions.count(&MI);
|
|
|
|
for (auto &MO : MI.uses()) {
|
|
|
|
// Checks for "isUse()" as "uses()" returns also implicit definitions.
|
|
|
|
if (!MO.isReg() || !MO.isUse())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)];
|
2017-07-17 01:39:56 +08:00
|
|
|
if (MachineInstr *DefMI = RDM.lookup(Reg)) {
|
|
|
|
OperandToDefMap[&MO] = DefMI;
|
|
|
|
DepthInfo Info = DepthMap.lookup(DefMI);
|
|
|
|
MIDepth = std::max(MIDepth, Info.Depth);
|
|
|
|
if (!IsCMOV)
|
|
|
|
MIDepthOpt = std::max(MIDepthOpt, Info.OptDepth);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsCMOV)
|
|
|
|
MIDepthOpt = getDepthOfOptCmov(
|
|
|
|
DepthMap[OperandToDefMap.lookup(&MI.getOperand(1))].OptDepth,
|
|
|
|
DepthMap[OperandToDefMap.lookup(&MI.getOperand(2))].OptDepth);
|
|
|
|
|
|
|
|
// Iterates over all operands to handle implicit definitions as well.
|
|
|
|
for (auto &MO : MI.operands()) {
|
|
|
|
if (!MO.isReg() || !MO.isDef())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI;
|
2017-07-17 01:39:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Latency = TSchedModel.computeInstrLatency(&MI);
|
|
|
|
DepthMap[&MI] = {MIDepth += Latency, MIDepthOpt += Latency};
|
|
|
|
MaxDepth.Depth = std::max(MaxDepth.Depth, MIDepth);
|
|
|
|
MaxDepth.OptDepth = std::max(MaxDepth.OptDepth, MIDepthOpt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Diff[LoopIterations] = {LoopDepth[0].Depth - LoopDepth[0].OptDepth,
|
|
|
|
LoopDepth[1].Depth - LoopDepth[1].OptDepth};
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Step 2: Check if Loop worth to be optimized.
|
|
|
|
// Worth-Optimize-Loop:
|
|
|
|
// case 1: Diff[1] == Diff[0]
|
|
|
|
// Critical-path is iteration independent - there is no dependency
|
|
|
|
// of critical-path instructions on critical-path instructions of
|
|
|
|
// previous iteration.
|
|
|
|
// Thus, it is enough to check gain percent of 1st iteration -
|
|
|
|
// To be conservative, the optimized loop need to have a depth of
|
|
|
|
// 12.5% cycles less than original loop, per iteration.
|
|
|
|
//
|
|
|
|
// case 2: Diff[1] > Diff[0]
|
|
|
|
// Critical-path is iteration dependent - there is dependency of
|
|
|
|
// critical-path instructions on critical-path instructions of
|
|
|
|
// previous iteration.
|
2017-08-08 20:17:56 +08:00
|
|
|
// Thus, check the gain percent of the 2nd iteration (similar to the
|
|
|
|
// previous case), but it is also required to check the gradient of
|
|
|
|
// the gain - the change in Depth-Diff compared to the change in
|
|
|
|
// Loop-Depth between 1st and 2nd iterations.
|
2017-07-17 01:39:56 +08:00
|
|
|
// To be conservative, the gradient need to be at least 50%.
|
|
|
|
//
|
2017-08-08 20:17:56 +08:00
|
|
|
// In addition, In order not to optimize loops with very small gain, the
|
|
|
|
// gain (in cycles) after 2nd iteration should not be less than a given
|
|
|
|
// threshold. Thus, the check (Diff[1] >= GainCycleThreshold) must apply.
|
|
|
|
//
|
2017-07-17 01:39:56 +08:00
|
|
|
// If loop is not worth optimizing, remove all CMOV-group-candidates.
|
|
|
|
//===--------------------------------------------------------------------===//
|
2017-08-08 20:17:56 +08:00
|
|
|
if (Diff[1] < GainCycleThreshold)
|
|
|
|
return false;
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
bool WorthOptLoop = false;
|
|
|
|
if (Diff[1] == Diff[0])
|
|
|
|
WorthOptLoop = Diff[0] * 8 >= LoopDepth[0].Depth;
|
|
|
|
else if (Diff[1] > Diff[0])
|
|
|
|
WorthOptLoop =
|
2017-08-08 20:17:56 +08:00
|
|
|
(Diff[1] - Diff[0]) * 2 >= (LoopDepth[1].Depth - LoopDepth[0].Depth) &&
|
|
|
|
(Diff[1] * 8 >= LoopDepth[1].Depth);
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
if (!WorthOptLoop)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
++NumOfLoopCandidate;
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Step 3: Check for each CMOV-group-candidate if it worth to be optimized.
|
|
|
|
// Worth-Optimize-Group:
|
|
|
|
// Iff it worths to optimize all CMOV instructions in the group.
|
|
|
|
//
|
|
|
|
// Worth-Optimize-CMOV:
|
|
|
|
// Predicted branch is faster than CMOV by the difference between depth of
|
|
|
|
// condition operand and depth of taken (predicted) value operand.
|
|
|
|
// To be conservative, the gain of such CMOV transformation should cover at
|
|
|
|
// at least 25% of branch-misprediction-penalty.
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
unsigned MispredictPenalty = TSchedModel.getMCSchedModel()->MispredictPenalty;
|
|
|
|
CmovGroups TempGroups;
|
|
|
|
std::swap(TempGroups, CmovInstGroups);
|
|
|
|
for (auto &Group : TempGroups) {
|
|
|
|
bool WorthOpGroup = true;
|
|
|
|
for (auto *MI : Group) {
|
|
|
|
// Avoid CMOV instruction which value is used as a pointer to load from.
|
|
|
|
// This is another conservative check to avoid converting CMOV instruction
|
|
|
|
// used with tree-search like algorithm, where the branch is unpredicted.
|
|
|
|
auto UIs = MRI->use_instructions(MI->defs().begin()->getReg());
|
|
|
|
if (UIs.begin() != UIs.end() && ++UIs.begin() == UIs.end()) {
|
|
|
|
unsigned Op = UIs.begin()->getOpcode();
|
|
|
|
if (Op == X86::MOV64rm || Op == X86::MOV32rm) {
|
|
|
|
WorthOpGroup = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned CondCost =
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
DepthMap[OperandToDefMap.lookup(&MI->getOperand(4))].Depth;
|
2017-07-17 01:39:56 +08:00
|
|
|
unsigned ValCost = getDepthOfOptCmov(
|
|
|
|
DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
|
|
|
|
DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
|
|
|
|
if (ValCost > CondCost || (CondCost - ValCost) * 4 < MispredictPenalty) {
|
|
|
|
WorthOpGroup = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WorthOpGroup)
|
|
|
|
CmovInstGroups.push_back(Group);
|
|
|
|
}
|
|
|
|
|
|
|
|
return !CmovInstGroups.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool checkEFLAGSLive(MachineInstr *MI) {
|
|
|
|
if (MI->killsRegister(X86::EFLAGS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The EFLAGS operand of MI might be missing a kill marker.
|
|
|
|
// Figure out whether EFLAGS operand should LIVE after MI instruction.
|
|
|
|
MachineBasicBlock *BB = MI->getParent();
|
|
|
|
MachineBasicBlock::iterator ItrMI = MI;
|
|
|
|
|
|
|
|
// Scan forward through BB for a use/def of EFLAGS.
|
|
|
|
for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
|
|
|
|
if (I->readsRegister(X86::EFLAGS))
|
|
|
|
return true;
|
|
|
|
if (I->definesRegister(X86::EFLAGS))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We hit the end of the block, check whether EFLAGS is live into a successor.
|
|
|
|
for (auto I = BB->succ_begin(), E = BB->succ_end(); I != E; ++I) {
|
|
|
|
if ((*I)->isLiveIn(X86::EFLAGS))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-15 19:00:56 +08:00
|
|
|
/// Given /p First CMOV instruction and /p Last CMOV instruction representing a
|
|
|
|
/// group of CMOV instructions, which may contain debug instructions in between,
|
|
|
|
/// move all debug instructions to after the last CMOV instruction, making the
|
|
|
|
/// CMOV group consecutive.
|
|
|
|
static void packCmovGroup(MachineInstr *First, MachineInstr *Last) {
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
assert(X86::getCondFromCMov(*Last) != X86::COND_INVALID &&
|
2017-10-15 19:00:56 +08:00
|
|
|
"Last instruction in a CMOV group must be a CMOV instruction");
|
|
|
|
|
|
|
|
SmallVector<MachineInstr *, 2> DBGInstructions;
|
|
|
|
for (auto I = First->getIterator(), E = Last->getIterator(); I != E; I++) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I->isDebugInstr())
|
2017-10-15 19:00:56 +08:00
|
|
|
DBGInstructions.push_back(&*I);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Splice the debug instruction after the cmov group.
|
|
|
|
MachineBasicBlock *MBB = First->getParent();
|
|
|
|
for (auto *MI : DBGInstructions)
|
|
|
|
MBB->insertAfter(Last, MI->removeFromParent());
|
|
|
|
}
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
void X86CmovConverterPass::convertCmovInstsToBranches(
|
|
|
|
SmallVectorImpl<MachineInstr *> &Group) const {
|
|
|
|
assert(!Group.empty() && "No CMOV instructions to convert");
|
|
|
|
++NumOfOptimizedCmovGroups;
|
|
|
|
|
2017-10-15 19:00:56 +08:00
|
|
|
// If the CMOV group is not packed, e.g., there are debug instructions between
|
|
|
|
// first CMOV and last CMOV, then pack the group and make the CMOV instruction
|
2018-07-31 03:41:25 +08:00
|
|
|
// consecutive by moving the debug instructions to after the last CMOV.
|
2017-10-15 19:00:56 +08:00
|
|
|
packCmovGroup(Group.front(), Group.back());
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
// To convert a CMOVcc instruction, we actually have to insert the diamond
|
|
|
|
// control-flow pattern. The incoming instruction knows the destination vreg
|
|
|
|
// to set, the condition code register to branch on, the true/false values to
|
|
|
|
// select between, and a branch opcode to use.
|
|
|
|
|
|
|
|
// Before
|
|
|
|
// -----
|
|
|
|
// MBB:
|
|
|
|
// cond = cmp ...
|
|
|
|
// v1 = CMOVge t1, f1, cond
|
|
|
|
// v2 = CMOVlt t2, f2, cond
|
|
|
|
// v3 = CMOVge v1, f3, cond
|
|
|
|
//
|
|
|
|
// After
|
|
|
|
// -----
|
|
|
|
// MBB:
|
|
|
|
// cond = cmp ...
|
|
|
|
// jge %SinkMBB
|
|
|
|
//
|
|
|
|
// FalseMBB:
|
|
|
|
// jmp %SinkMBB
|
|
|
|
//
|
|
|
|
// SinkMBB:
|
|
|
|
// %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
|
|
|
|
// %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
|
|
|
|
// ; true-value with false-value
|
|
|
|
// %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
|
|
|
|
// ; previous Phi instruction result
|
|
|
|
|
|
|
|
MachineInstr &MI = *Group.front();
|
|
|
|
MachineInstr *LastCMOV = Group.back();
|
|
|
|
DebugLoc DL = MI.getDebugLoc();
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
X86::CondCode CC = X86::CondCode(X86::getCondFromCMov(MI));
|
2017-07-17 01:39:56 +08:00
|
|
|
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
// Potentially swap the condition codes so that any memory operand to a CMOV
|
|
|
|
// is in the *false* position instead of the *true* position. We can invert
|
|
|
|
// any non-memory operand CMOV instructions to cope with this and we ensure
|
|
|
|
// memory operand CMOVs are only included with a single condition code.
|
|
|
|
if (llvm::any_of(Group, [&](MachineInstr *I) {
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
return I->mayLoad() && X86::getCondFromCMov(*I) == CC;
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
}))
|
|
|
|
std::swap(CC, OppCC);
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
MachineBasicBlock *MBB = MI.getParent();
|
|
|
|
MachineFunction::iterator It = ++MBB->getIterator();
|
|
|
|
MachineFunction *F = MBB->getParent();
|
|
|
|
const BasicBlock *BB = MBB->getBasicBlock();
|
|
|
|
|
|
|
|
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
|
|
|
|
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
|
|
|
|
F->insert(It, FalseMBB);
|
|
|
|
F->insert(It, SinkMBB);
|
|
|
|
|
|
|
|
// If the EFLAGS register isn't dead in the terminator, then claim that it's
|
|
|
|
// live into the sink and copy blocks.
|
|
|
|
if (checkEFLAGSLive(LastCMOV)) {
|
|
|
|
FalseMBB->addLiveIn(X86::EFLAGS);
|
|
|
|
SinkMBB->addLiveIn(X86::EFLAGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer the remainder of BB and its successor edges to SinkMBB.
|
|
|
|
SinkMBB->splice(SinkMBB->begin(), MBB,
|
|
|
|
std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
|
|
|
|
SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
|
|
|
|
|
|
|
// Add the false and sink blocks as its successors.
|
|
|
|
MBB->addSuccessor(FalseMBB);
|
|
|
|
MBB->addSuccessor(SinkMBB);
|
|
|
|
|
|
|
|
// Create the conditional branch instruction.
|
[X86] Merge the different Jcc instructions for each condition code into single instructions that store the condition code as an operand.
Summary:
This avoids needing an isel pattern for each condition code. And it removes translation switches for converting between Jcc instructions and condition codes.
Now the printer, encoder and disassembler take care of converting the immediate. We use InstAliases to handle the assembly matching. But we print using the asm string in the instruction definition. The instruction itself is marked IsCodeGenOnly=1 to hide it from the assembly parser.
Reviewers: spatel, lebedev.ri, courbet, gchatelet, RKSimon
Reviewed By: RKSimon
Subscribers: MatzeB, qcolombet, eraman, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60228
llvm-svn: 357802
2019-04-06 03:28:09 +08:00
|
|
|
BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
// Add the sink block to the false block successors.
|
|
|
|
FalseMBB->addSuccessor(SinkMBB);
|
|
|
|
|
|
|
|
MachineInstrBuilder MIB;
|
|
|
|
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
|
|
|
|
MachineBasicBlock::iterator MIItEnd =
|
|
|
|
std::next(MachineBasicBlock::iterator(LastCMOV));
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
MachineBasicBlock::iterator FalseInsertionPoint = FalseMBB->begin();
|
2017-07-17 01:39:56 +08:00
|
|
|
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
|
|
|
|
// First we need to insert an explicit load on the false path for any memory
|
|
|
|
// operand. We also need to potentially do register rewriting here, but it is
|
|
|
|
// simpler as the memory operands are always on the false path so we can
|
|
|
|
// simply take that input, whatever it is.
|
|
|
|
DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
|
|
|
|
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
|
|
|
|
auto &MI = *MIIt++;
|
|
|
|
// Skip any CMOVs in this group which don't load from memory.
|
|
|
|
if (!MI.mayLoad()) {
|
|
|
|
// Remember the false-side register input.
|
2017-08-20 07:35:50 +08:00
|
|
|
unsigned FalseReg =
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
MI.getOperand(X86::getCondFromCMov(MI) == CC ? 1 : 2).getReg();
|
2017-08-20 07:35:50 +08:00
|
|
|
// Walk back through any intermediate cmovs referenced.
|
2017-10-05 08:33:50 +08:00
|
|
|
while (true) {
|
2017-08-20 07:35:50 +08:00
|
|
|
auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
|
|
|
|
if (FRIt == FalseBBRegRewriteTable.end())
|
|
|
|
break;
|
|
|
|
FalseReg = FRIt->second;
|
|
|
|
}
|
|
|
|
FalseBBRegRewriteTable[MI.getOperand(0).getReg()] = FalseReg;
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The condition must be the *opposite* of the one we've decided to branch
|
|
|
|
// on as the branch will go *around* the load and the load should happen
|
|
|
|
// when the CMOV condition is false.
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
assert(X86::getCondFromCMov(MI) == OppCC &&
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
"Can only handle memory-operand cmov instructions with a condition "
|
|
|
|
"opposite to the selected branch direction.");
|
|
|
|
|
|
|
|
// The goal is to rewrite the cmov from:
|
|
|
|
//
|
|
|
|
// MBB:
|
|
|
|
// %A = CMOVcc %B (tied), (mem)
|
|
|
|
//
|
|
|
|
// to
|
|
|
|
//
|
|
|
|
// MBB:
|
|
|
|
// %A = CMOVcc %B (tied), %C
|
|
|
|
// FalseMBB:
|
|
|
|
// %C = MOV (mem)
|
|
|
|
//
|
|
|
|
// Which will allow the next loop to rewrite the CMOV in terms of a PHI:
|
|
|
|
//
|
|
|
|
// MBB:
|
|
|
|
// JMP!cc SinkMBB
|
|
|
|
// FalseMBB:
|
|
|
|
// %C = MOV (mem)
|
|
|
|
// SinkMBB:
|
|
|
|
// %A = PHI [ %C, FalseMBB ], [ %B, MBB]
|
|
|
|
|
|
|
|
// Get a fresh register to use as the destination of the MOV.
|
|
|
|
const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg());
|
|
|
|
unsigned TmpReg = MRI->createVirtualRegister(RC);
|
|
|
|
|
|
|
|
SmallVector<MachineInstr *, 4> NewMIs;
|
|
|
|
bool Unfolded = TII->unfoldMemoryOperand(*MBB->getParent(), MI, TmpReg,
|
|
|
|
/*UnfoldLoad*/ true,
|
|
|
|
/*UnfoldStore*/ false, NewMIs);
|
|
|
|
(void)Unfolded;
|
|
|
|
assert(Unfolded && "Should never fail to unfold a loading cmov!");
|
|
|
|
|
|
|
|
// Move the new CMOV to just before the old one and reset any impacted
|
|
|
|
// iterator.
|
|
|
|
auto *NewCMOV = NewMIs.pop_back_val();
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
assert(X86::getCondFromCMov(*NewCMOV) == OppCC &&
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
"Last new instruction isn't the expected CMOV!");
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
|
|
|
|
if (&*MIItBegin == &MI)
|
|
|
|
MIItBegin = MachineBasicBlock::iterator(NewCMOV);
|
|
|
|
|
|
|
|
// Sink whatever instructions were needed to produce the unfolded operand
|
|
|
|
// into the false block.
|
|
|
|
for (auto *NewMI : NewMIs) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
|
[x86] Teach the cmov converter to aggressively convert cmovs with memory
operands into control flow.
We have seen periodically performance problems with cmov where one
operand comes from memory. On modern x86 processors with strong branch
predictors and speculative execution, this tends to be much better done
with a branch than cmov. We routinely see cmov stalling while the load
is completed rather than continuing, and if there are subsequent
branches, they cannot be speculated in turn.
Also, in many (even simple) cases, macro fusion causes the control flow
version to be fewer uops.
Consider the IACA output for the initial sequence of code in a very hot
function in one of our internal benchmarks that motivates this, and notice the
micro-op reduction provided.
Before, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.20 Cycles Throughput Bottleneck: Port1
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | | 1.0 | | | | | CP | mov rcx, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.1 | 0.6 | 0.5 0.5 | 0.5 0.5 | | 0.4 | CP | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | mov rax, qword ptr [rsi]
| 3 | 1.8 | 0.6 | | | | 0.6 | CP | cmovbe rax, rdi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | jb 0xf
Total Num Of Uops: 9
```
After, SNB:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: Port5
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | |
---------------------------------------------------------------------
| 1 | 0.5 | 0.5 | | | | | | mov rax, rdi
| 0* | | | | | | | | xor edi, edi
| 2^ | 0.5 | 0.5 | 1.0 1.0 | | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | 0.5 | 0.5 | | | | | | mov ecx, 0x0
| 1 | | | | | | 1.0 | CP | jnbe 0x39
| 2^ | | | | 1.0 1.0 | | 1.0 | CP | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | jnb 0x3c
Total Num Of Uops: 7
```
The difference even manifests in a throughput cycle rate difference on Haswell.
Before, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 2.00 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rcx, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 0.5 0.5 | 0.5 0.5 | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | | 0.5 0.5 | 0.5 0.5 | | | | | | mov rax, qword ptr [rsi]
| 3 | 1.0 | 1.0 | | | | | 1.0 | | | cmovbe rax, rdi
| 2^ | 0.5 | | 0.5 0.5 | 0.5 0.5 | | | 0.5 | | | cmp byte ptr [rcx+0xf], 0x10
| 0F | | | | | | | | | | jb 0xf
Total Num Of Uops: 8
```
After, HSW:
```
Throughput Analysis Report
--------------------------
Block Throughput: 1.50 Cycles Throughput Bottleneck: FrontEnd
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 0* | | | | | | | | | | mov rax, rdi
| 0* | | | | | | | | | | xor edi, edi
| 2^ | | | 1.0 1.0 | | | 1.0 | | | | cmp byte ptr [rsi+0xf], 0xf
| 1 | | 1.0 | | | | | | | | mov ecx, 0x0
| 1 | | | | | | | 1.0 | | | jnbe 0x39
| 2^ | 1.0 | | | 1.0 1.0 | | | | | | cmp byte ptr [rax+0xf], 0x10
| 0F | | | | | | | | | | jnb 0x3c
Total Num Of Uops: 6
```
Note that this cannot be usefully restricted to inner loops. Much of the
hot code we see hitting this is not in an inner loop or not in a loop at
all. The optimization still remains effective and indeed critical for
some of our code.
I have run a suite of internal benchmarks with this change. I saw a few
very significant improvements and a very few minor regressions,
but overall this change rarely has a significant effect. However, the
improvements were very significant, and in quite important routines
responsible for a great deal of our C++ CPU cycles. The gains pretty
clealy outweigh the regressions for us.
I also ran the test-suite and SPEC2006. Only 11 binaries changed at all
and none of them showed any regressions.
Amjad Aboud at Intel also ran this over their benchmarks and saw no
regressions.
Differential Revision: https://reviews.llvm.org/D36858
llvm-svn: 311226
2017-08-19 13:01:19 +08:00
|
|
|
FalseMBB->insert(FalseInsertionPoint, NewMI);
|
|
|
|
// Re-map any operands that are from other cmovs to the inputs for this block.
|
|
|
|
for (auto &MOp : NewMI->uses()) {
|
|
|
|
if (!MOp.isReg())
|
|
|
|
continue;
|
|
|
|
auto It = FalseBBRegRewriteTable.find(MOp.getReg());
|
|
|
|
if (It == FalseBBRegRewriteTable.end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MOp.setReg(It->second);
|
|
|
|
// This might have been a kill when it referenced the cmov result, but
|
|
|
|
// it won't necessarily be once rewritten.
|
|
|
|
// FIXME: We could potentially improve this by tracking whether the
|
|
|
|
// operand to the cmov was also a kill, and then skipping the PHI node
|
|
|
|
// construction below.
|
|
|
|
MOp.setIsKill(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MBB->erase(MachineBasicBlock::iterator(MI),
|
|
|
|
std::next(MachineBasicBlock::iterator(MI)));
|
|
|
|
|
|
|
|
// Add this PHI to the rewrite table.
|
|
|
|
FalseBBRegRewriteTable[NewCMOV->getOperand(0).getReg()] = TmpReg;
|
|
|
|
}
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
// As we are creating the PHIs, we have to be careful if there is more than
|
|
|
|
// one. Later CMOVs may reference the results of earlier CMOVs, but later
|
|
|
|
// PHIs have to reference the individual true/false inputs from earlier PHIs.
|
|
|
|
// That also means that PHI construction must work forward from earlier to
|
|
|
|
// later, and that the code must maintain a mapping from earlier PHI's
|
|
|
|
// destination registers, and the registers that went into the PHI.
|
|
|
|
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
|
|
|
|
unsigned DestReg = MIIt->getOperand(0).getReg();
|
|
|
|
unsigned Op1Reg = MIIt->getOperand(1).getReg();
|
|
|
|
unsigned Op2Reg = MIIt->getOperand(2).getReg();
|
|
|
|
|
|
|
|
// If this CMOV we are processing is the opposite condition from the jump we
|
|
|
|
// generated, then we have to swap the operands for the PHI that is going to
|
|
|
|
// be generated.
|
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate.
Summary:
Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models.
This avoids needing an isel pattern for each condition code. And it removes
translation switches for converting between CMOV instructions and condition
codes.
Now the printer, encoder and disassembler take care of converting the immediate.
We use InstAliases to handle the assembly matching. But we print using the
asm string in the instruction definition. The instruction itself is marked
IsCodeGenOnly=1 to hide it from the assembly parser.
This does complicate the scheduler models a little since we can't assign the
A and BE instructions to a separate class now.
I plan to make similar changes for SETcc and Jcc.
Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet
Reviewed By: RKSimon
Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60041
llvm-svn: 357800
2019-04-06 03:27:41 +08:00
|
|
|
if (X86::getCondFromCMov(*MIIt) == OppCC)
|
2017-07-17 01:39:56 +08:00
|
|
|
std::swap(Op1Reg, Op2Reg);
|
|
|
|
|
|
|
|
auto Op1Itr = RegRewriteTable.find(Op1Reg);
|
|
|
|
if (Op1Itr != RegRewriteTable.end())
|
|
|
|
Op1Reg = Op1Itr->second.first;
|
|
|
|
|
|
|
|
auto Op2Itr = RegRewriteTable.find(Op2Reg);
|
|
|
|
if (Op2Itr != RegRewriteTable.end())
|
|
|
|
Op2Reg = Op2Itr->second.second;
|
|
|
|
|
|
|
|
// SinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, MBB ]
|
|
|
|
// ...
|
|
|
|
MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
|
|
|
|
.addReg(Op1Reg)
|
|
|
|
.addMBB(FalseMBB)
|
|
|
|
.addReg(Op2Reg)
|
|
|
|
.addMBB(MBB);
|
|
|
|
(void)MIB;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());
|
2017-07-17 01:39:56 +08:00
|
|
|
|
|
|
|
// Add this PHI to the rewrite table.
|
|
|
|
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now remove the CMOV(s).
|
|
|
|
MBB->erase(MIItBegin, MIItEnd);
|
|
|
|
}
|
|
|
|
|
2017-10-03 05:46:37 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
|
|
|
|
false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
|
|
|
|
INITIALIZE_PASS_END(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
|
|
|
|
false, false)
|
|
|
|
|
2017-07-17 01:39:56 +08:00
|
|
|
FunctionPass *llvm::createX86CmovConverterPass() {
|
|
|
|
return new X86CmovConverterPass();
|
|
|
|
}
|