From 781e3d7ad82c30656e204ecec4635079807e41bd Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 29 Jun 2022 16:23:41 -0700 Subject: [PATCH] [RISCV] Pre-commit tests for D128869. NFC --- llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll | 94 +++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll index f52f4eef4f53..af4e4b233470 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll @@ -82,3 +82,97 @@ define i64 @test6(i32 signext %a, i32 signext %b) nounwind { %4 = ashr i64 %3, 16 ret i64 %4 } + +; The ashr+add+shl is canonical IR from InstCombine for +; (sext (add (trunc X to i32), 1) to i32). +; That can be implemented as addiw make sure we recover it. +define i64 @test7(i32* %0, i64 %1) { +; RV64I-LABEL: test7: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a1, 32 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: srai a0, a0, 32 +; RV64I-NEXT: ret + %3 = shl i64 %1, 32 + %4 = add i64 %3, 4294967296 + %5 = ashr exact i64 %4, 32 + ret i64 %5 +} + +; The ashr+add+shl is canonical IR from InstCombine for +; (sext (sub 1, (trunc X to i32)) to i32). +; That can be implemented as (li 1)+subw make sure we recover it. +define i64 @test8(i32* %0, i64 %1) { +; RV64I-LABEL: test8: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a1, 32 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: srai a0, a0, 32 +; RV64I-NEXT: ret + %3 = mul i64 %1, -4294967296 + %4 = add i64 %3, 4294967296 + %5 = ashr exact i64 %4, 32 + ret i64 %5 +} + +; The gep is here to introduce a shl by 2 after the ashr that will get folded +; and make this harder to recover. +define signext i32 @test9(i32* %0, i64 %1) { +; RV64I-LABEL: test9: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: lui a2, 4097 +; RV64I-NEXT: slli a2, a2, 20 +; RV64I-NEXT: add a1, a1, a2 +; RV64I-NEXT: srai a1, a1, 30 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: ret + %3 = shl i64 %1, 32 + %4 = add i64 %3, 17596481011712 ; 4097 << 32 + %5 = ashr exact i64 %4, 32 + %6 = getelementptr inbounds i32, i32* %0, i64 %5 + %7 = load i32, i32* %6, align 4 + ret i32 %7 +} + +; The gep is here to introduce a shl by 2 after the ashr that will get folded +; and make this harder to recover. +define signext i32 @test10(i32* %0, i64 %1) { +; RV64I-LABEL: test10: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: lui a2, 30141 +; RV64I-NEXT: addiw a2, a2, -747 +; RV64I-NEXT: slli a2, a2, 32 +; RV64I-NEXT: sub a1, a2, a1 +; RV64I-NEXT: srai a1, a1, 30 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: ret + %3 = mul i64 %1, -4294967296 + %4 = add i64 %3, 530242871224172544 ; 123456789 << 32 + %5 = ashr exact i64 %4, 32 + %6 = getelementptr inbounds i32, i32* %0, i64 %5 + %7 = load i32, i32* %6, align 4 + ret i32 %7 +} + +define i64 @test11(i32* %0, i64 %1) { +; RV64I-LABEL: test11: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a1, 32 +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: srai a0, a0, 32 +; RV64I-NEXT: ret + %3 = mul i64 %1, -4294967296 + %4 = add i64 %3, 9223372036854775808 ;0x8000'0000'0000'0000 + %5 = ashr exact i64 %4, 32 + ret i64 %5 +}