From 32d840d29179383a28d59d68fccd74f52f316faf Mon Sep 17 00:00:00 2001 From: Sam Elliott Date: Thu, 14 Nov 2019 18:42:33 +0000 Subject: [PATCH] [RISCV] Use addi rather than add x0 Summary: The RISC-V backend used to generate `add , x0, ` in a few instances. It seems most places no longer generate this sequence. This is semantically equivalent to `addi , , 0`, but the latter has the advantage of being noted to be the canonical instruction to be used for moves (which microarchitectures can and should recognise as such). The changed testcases use instruction aliases - `mv , ` is an alias for `addi , , 0`. Reviewers: luismarques Reviewed By: luismarques Subscribers: hiraditya, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, pzheng, sameer.abuasal, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D70124 --- .../Target/RISCV/RISCVExpandPseudoInsts.cpp | 6 +-- llvm/test/CodeGen/RISCV/atomic-rmw.ll | 40 +++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index da5cd16e750c..84bce0f48562 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -319,9 +319,9 @@ static void doMaskedAtomicBinOpExpansion( default: llvm_unreachable("Unexpected AtomicRMW BinOp"); case AtomicRMWInst::Xchg: - BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg) - .addReg(RISCV::X0) - .addReg(IncrReg); + BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg) + .addReg(IncrReg) + .addImm(0); break; case AtomicRMWInst::Add: BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg) diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll index dc396efd530d..c94dc658792a 100644 --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -30,7 +30,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a4, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 @@ -61,7 +61,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a4, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 @@ -96,7 +96,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a4, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 @@ -127,7 +127,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a4, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 @@ -162,7 +162,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a4, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 @@ -193,7 +193,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a4, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 @@ -228,7 +228,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a4, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 @@ -259,7 +259,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a4, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 @@ -294,7 +294,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a4, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 @@ -325,7 +325,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a4, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 @@ -5031,7 +5031,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a3, a5 ; RV32IA-NEXT: and a5, a5, a4 ; RV32IA-NEXT: xor a5, a3, a5 @@ -5063,7 +5063,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a3, a5 ; RV64IA-NEXT: and a5, a5, a4 ; RV64IA-NEXT: xor a5, a3, a5 @@ -5099,7 +5099,7 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a3, a5 ; RV32IA-NEXT: and a5, a5, a4 ; RV32IA-NEXT: xor a5, a3, a5 @@ -5131,7 +5131,7 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a3, a5 ; RV64IA-NEXT: and a5, a5, a4 ; RV64IA-NEXT: xor a5, a3, a5 @@ -5167,7 +5167,7 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a3, a5 ; RV32IA-NEXT: and a5, a5, a4 ; RV32IA-NEXT: xor a5, a3, a5 @@ -5199,7 +5199,7 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a3, a5 ; RV64IA-NEXT: and a5, a5, a4 ; RV64IA-NEXT: xor a5, a3, a5 @@ -5235,7 +5235,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a3, a5 ; RV32IA-NEXT: and a5, a5, a4 ; RV32IA-NEXT: xor a5, a3, a5 @@ -5267,7 +5267,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a3, a5 ; RV64IA-NEXT: and a5, a5, a4 ; RV64IA-NEXT: xor a5, a3, a5 @@ -5303,7 +5303,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 ; RV32IA-NEXT: lr.w.aqrl a3, (a2) -; RV32IA-NEXT: add a5, zero, a1 +; RV32IA-NEXT: mv a5, a1 ; RV32IA-NEXT: xor a5, a3, a5 ; RV32IA-NEXT: and a5, a5, a4 ; RV32IA-NEXT: xor a5, a3, a5 @@ -5335,7 +5335,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 ; RV64IA-NEXT: lr.w.aqrl a3, (a2) -; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: mv a5, a1 ; RV64IA-NEXT: xor a5, a3, a5 ; RV64IA-NEXT: and a5, a5, a4 ; RV64IA-NEXT: xor a5, a3, a5