forked from OSchip/llvm-project
[CodeGen] add rotate builtins
This exposes the LLVM funnel shift intrinsics as more familiar bit rotation functions in clang (when both halves of a funnel shift are the same value, it's a rotate). We're free to name these as we want because we're not copying gcc, but if there's some other existing art (eg, the microsoft ops that are modified in this patch) that we want to replicate, we can change the names. The funnel shift intrinsics were added here: D49242 With improved codegen in: rL337966 rL339359 And basic IR optimization added in: rL338218 rL340022 ...so these are expected to produce asm output that's equal or better to the multi-instruction alternatives using primitive C/IR ops. In the motivating loop example from PR37387: https://bugs.llvm.org/show_bug.cgi?id=37387#c7 ...we get the expected 'rolq' x86 instructions if we substitute the rotate builtin into the source. Differential Revision: https://reviews.llvm.org/D50924 llvm-svn: 340135
This commit is contained in:
parent
803912ea57
commit
9116f0438c
|
@ -1739,6 +1739,70 @@ The '``__builtin_bitreverse``' family of builtins is used to reverse
|
|||
the bitpattern of an integer value; for example ``0b10110110`` becomes
|
||||
``0b01101101``.
|
||||
|
||||
``__builtin_rotateleft``
|
||||
------------------------
|
||||
|
||||
* ``__builtin_rotateleft8``
|
||||
* ``__builtin_rotateleft16``
|
||||
* ``__builtin_rotateleft32``
|
||||
* ``__builtin_rotateleft64``
|
||||
|
||||
**Syntax**:
|
||||
|
||||
.. code-block:: c++
|
||||
|
||||
__builtin_rotateleft32(x, y)
|
||||
|
||||
**Examples**:
|
||||
|
||||
.. code-block:: c++
|
||||
|
||||
uint8_t rot_x = __builtin_rotateleft8(x, y);
|
||||
uint16_t rot_x = __builtin_rotateleft16(x, y);
|
||||
uint32_t rot_x = __builtin_rotateleft32(x, y);
|
||||
uint64_t rot_x = __builtin_rotateleft64(x, y);
|
||||
|
||||
**Description**:
|
||||
|
||||
The '``__builtin_rotateleft``' family of builtins is used to rotate
|
||||
the bits in the first argument by the amount in the second argument.
|
||||
For example, ``0b10000110`` rotated left by 11 becomes ``0b00110100``.
|
||||
The shift value is treated as an unsigned amount modulo the size of
|
||||
the arguments. Both arguments and the result have the bitwidth specified
|
||||
by the name of the builtin.
|
||||
|
||||
``__builtin_rotateright``
|
||||
------------------------
|
||||
|
||||
* ``__builtin_rotateright8``
|
||||
* ``__builtin_rotateright16``
|
||||
* ``__builtin_rotateright32``
|
||||
* ``__builtin_rotateright64``
|
||||
|
||||
**Syntax**:
|
||||
|
||||
.. code-block:: c++
|
||||
|
||||
__builtin_rotateright32(x, y)
|
||||
|
||||
**Examples**:
|
||||
|
||||
.. code-block:: c++
|
||||
|
||||
uint8_t rot_x = __builtin_rotateright8(x, y);
|
||||
uint16_t rot_x = __builtin_rotateright16(x, y);
|
||||
uint32_t rot_x = __builtin_rotateright32(x, y);
|
||||
uint64_t rot_x = __builtin_rotateright64(x, y);
|
||||
|
||||
**Description**:
|
||||
|
||||
The '``__builtin_rotateright``' family of builtins is used to rotate
|
||||
the bits in the first argument by the amount in the second argument.
|
||||
For example, ``0b10000110`` rotated right by 3 becomes ``0b11010000``.
|
||||
The shift value is treated as an unsigned amount modulo the size of
|
||||
the arguments. Both arguments and the result have the bitwidth specified
|
||||
by the name of the builtin.
|
||||
|
||||
``__builtin_unreachable``
|
||||
-------------------------
|
||||
|
||||
|
|
|
@ -428,6 +428,15 @@ BUILTIN(__builtin_bitreverse16, "UsUs", "nc")
|
|||
BUILTIN(__builtin_bitreverse32, "UiUi", "nc")
|
||||
BUILTIN(__builtin_bitreverse64, "ULLiULLi", "nc")
|
||||
|
||||
BUILTIN(__builtin_rotateleft8, "UcUcUc", "nc")
|
||||
BUILTIN(__builtin_rotateleft16, "UsUsUs", "nc")
|
||||
BUILTIN(__builtin_rotateleft32, "UiUiUi", "nc")
|
||||
BUILTIN(__builtin_rotateleft64, "ULLiULLiULLi", "nc")
|
||||
BUILTIN(__builtin_rotateright8, "UcUcUc", "nc")
|
||||
BUILTIN(__builtin_rotateright16, "UsUsUs", "nc")
|
||||
BUILTIN(__builtin_rotateright32, "UiUiUi", "nc")
|
||||
BUILTIN(__builtin_rotateright64, "ULLiULLiULLi", "nc")
|
||||
|
||||
// Random GCC builtins
|
||||
BUILTIN(__builtin_constant_p, "i.", "nctu")
|
||||
BUILTIN(__builtin_classify_type, "i.", "nctu")
|
||||
|
|
|
@ -1647,46 +1647,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
"cast");
|
||||
return RValue::get(Result);
|
||||
}
|
||||
case Builtin::BI_rotr8:
|
||||
case Builtin::BI_rotr16:
|
||||
case Builtin::BI_rotr:
|
||||
case Builtin::BI_lrotr:
|
||||
case Builtin::BI_rotr64: {
|
||||
Value *Val = EmitScalarExpr(E->getArg(0));
|
||||
Value *Shift = EmitScalarExpr(E->getArg(1));
|
||||
|
||||
llvm::Type *ArgType = Val->getType();
|
||||
Shift = Builder.CreateIntCast(Shift, ArgType, false);
|
||||
unsigned ArgWidth = ArgType->getIntegerBitWidth();
|
||||
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
|
||||
|
||||
Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask);
|
||||
Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
|
||||
Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
|
||||
Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
|
||||
Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
|
||||
return RValue::get(Result);
|
||||
}
|
||||
case Builtin::BI_rotl8:
|
||||
case Builtin::BI_rotl16:
|
||||
case Builtin::BI_rotl:
|
||||
case Builtin::BI_lrotl:
|
||||
case Builtin::BI_rotl64: {
|
||||
Value *Val = EmitScalarExpr(E->getArg(0));
|
||||
Value *Shift = EmitScalarExpr(E->getArg(1));
|
||||
|
||||
llvm::Type *ArgType = Val->getType();
|
||||
Shift = Builder.CreateIntCast(Shift, ArgType, false);
|
||||
unsigned ArgWidth = ArgType->getIntegerBitWidth();
|
||||
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
|
||||
|
||||
Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask);
|
||||
Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
|
||||
Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
|
||||
Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
|
||||
Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
|
||||
return RValue::get(Result);
|
||||
}
|
||||
case Builtin::BI__builtin_unpredictable: {
|
||||
// Always return the argument of __builtin_unpredictable. LLVM does not
|
||||
// handle this builtin. Metadata for this builtin should be added directly
|
||||
|
@ -1741,6 +1701,43 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
case Builtin::BI__builtin_bitreverse64: {
|
||||
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
|
||||
}
|
||||
case Builtin::BI__builtin_rotateleft8:
|
||||
case Builtin::BI__builtin_rotateleft16:
|
||||
case Builtin::BI__builtin_rotateleft32:
|
||||
case Builtin::BI__builtin_rotateleft64:
|
||||
case Builtin::BI_rotl8: // Microsoft variants of rotate left
|
||||
case Builtin::BI_rotl16:
|
||||
case Builtin::BI_rotl:
|
||||
case Builtin::BI_lrotl:
|
||||
case Builtin::BI_rotl64: {
|
||||
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
|
||||
llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
|
||||
// The builtin's shift arg may have a different type than the source arg and
|
||||
// result, but the LLVM intrinsic uses the same type for all values.
|
||||
llvm::Type *Ty = Src->getType();
|
||||
ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
|
||||
Value *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
|
||||
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
|
||||
}
|
||||
case Builtin::BI__builtin_rotateright8:
|
||||
case Builtin::BI__builtin_rotateright16:
|
||||
case Builtin::BI__builtin_rotateright32:
|
||||
case Builtin::BI__builtin_rotateright64:
|
||||
case Builtin::BI_rotr8: // Microsoft variants of rotate right
|
||||
case Builtin::BI_rotr16:
|
||||
case Builtin::BI_rotr:
|
||||
case Builtin::BI_lrotr:
|
||||
case Builtin::BI_rotr64: {
|
||||
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
|
||||
llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
|
||||
// The builtin's shift arg may have a different type than the source arg and
|
||||
// result, but the LLVM intrinsic uses the same type for all values.
|
||||
llvm::Type *Ty = Src->getType();
|
||||
ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
|
||||
Value *F = CGM.getIntrinsic(Intrinsic::fshr, Ty);
|
||||
return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
|
||||
}
|
||||
|
||||
case Builtin::BI__builtin_object_size: {
|
||||
unsigned Type =
|
||||
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s
|
||||
|
||||
unsigned char rotl8(unsigned char x, unsigned char y) {
|
||||
// CHECK-LABEL: rotl8
|
||||
// CHECK: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i8 [[F]]
|
||||
|
||||
return __builtin_rotateleft8(x, y);
|
||||
}
|
||||
|
||||
short rotl16(short x, short y) {
|
||||
// CHECK-LABEL: rotl16
|
||||
// CHECK: [[F:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i16 [[F]]
|
||||
|
||||
return __builtin_rotateleft16(x, y);
|
||||
}
|
||||
|
||||
int rotl32(int x, unsigned int y) {
|
||||
// CHECK-LABEL: rotl32
|
||||
// CHECK: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i32 [[F]]
|
||||
|
||||
return __builtin_rotateleft32(x, y);
|
||||
}
|
||||
|
||||
unsigned long long rotl64(unsigned long long x, long long y) {
|
||||
// CHECK-LABEL: rotl64
|
||||
// CHECK: [[F:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i64 [[F]]
|
||||
|
||||
return __builtin_rotateleft64(x, y);
|
||||
}
|
||||
|
||||
char rotr8(char x, char y) {
|
||||
// CHECK-LABEL: rotr8
|
||||
// CHECK: [[F:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i8 [[F]]
|
||||
|
||||
return __builtin_rotateright8(x, y);
|
||||
}
|
||||
|
||||
unsigned short rotr16(unsigned short x, unsigned short y) {
|
||||
// CHECK-LABEL: rotr16
|
||||
// CHECK: [[F:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i16 [[F]]
|
||||
|
||||
return __builtin_rotateright16(x, y);
|
||||
}
|
||||
|
||||
unsigned int rotr32(unsigned int x, int y) {
|
||||
// CHECK-LABEL: rotr32
|
||||
// CHECK: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i32 [[F]]
|
||||
|
||||
return __builtin_rotateright32(x, y);
|
||||
}
|
||||
|
||||
long long rotr64(long long x, unsigned long long y) {
|
||||
// CHECK-LABEL: rotr64
|
||||
// CHECK: [[F:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]])
|
||||
// CHECK-NEXT: ret i64 [[F]]
|
||||
|
||||
return __builtin_rotateright64(x, y);
|
||||
}
|
||||
|
|
@ -30,66 +30,36 @@ unsigned char test_rotl8(unsigned char value, unsigned char shift) {
|
|||
return _rotl8(value, shift);
|
||||
}
|
||||
// CHECK: i8 @test_rotl8
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i8 [[SHIFT:%[0-9]+]], 7
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i8 [[VALUE:%[0-9]+]], [[LSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i8 0, [[SHIFT]]
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i8 [[NEGATE]], 7
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i8 [[VALUE]], [[RSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i8 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i8 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]])
|
||||
// CHECK: ret i8 [[R]]
|
||||
|
||||
unsigned short test_rotl16(unsigned short value, unsigned char shift) {
|
||||
return _rotl16(value, shift);
|
||||
}
|
||||
// CHECK: i16 @test_rotl16
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i16 [[SHIFT:%[0-9]+]], 15
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i16 [[VALUE:%[0-9]+]], [[LSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i16 0, [[SHIFT]]
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i16 [[NEGATE]], 15
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i16 [[VALUE]], [[RSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i16 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i16 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]])
|
||||
// CHECK: ret i16 [[R]]
|
||||
|
||||
unsigned int test_rotl(unsigned int value, int shift) {
|
||||
return _rotl(value, shift);
|
||||
}
|
||||
// CHECK: i32 @test_rotl
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i32 [[VALUE:%[0-9]+]], [[LSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]]
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i32 [[VALUE]], [[RSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i32 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK: ret i32 [[R]]
|
||||
|
||||
unsigned LONG test_lrotl(unsigned LONG value, int shift) {
|
||||
return _lrotl(value, shift);
|
||||
}
|
||||
// CHECK-32BIT-LONG: i32 @test_lrotl
|
||||
// CHECK-32BIT-LONG: [[LSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31
|
||||
// CHECK-32BIT-LONG: [[HIGH:%[0-9]+]] = shl i32 [[VALUE:%[0-9]+]], [[LSHIFT]]
|
||||
// CHECK-32BIT-LONG: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]]
|
||||
// CHECK-32BIT-LONG: [[RSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31
|
||||
// CHECK-32BIT-LONG: [[LOW:%[0-9]+]] = lshr i32 [[VALUE]], [[RSHIFT]]
|
||||
// CHECK-32BIT-LONG: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]]
|
||||
// CHECK-32BIT-LONG: ret i32 [[RESULT]]
|
||||
// CHECK-32BIT-LONG }
|
||||
// CHECK-32BIT-LONG: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK-32BIT-LONG: ret i32 [[R]]
|
||||
|
||||
unsigned __int64 test_rotl64(unsigned __int64 value, int shift) {
|
||||
return _rotl64(value, shift);
|
||||
}
|
||||
// CHECK: i64 @test_rotl64
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i64 [[SHIFT:%[0-9]+]], 63
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i64 [[VALUE:%[0-9]+]], [[LSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i64 0, [[SHIFT]]
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i64 [[NEGATE]], 63
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i64 [[VALUE]], [[RSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i64 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i64 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]])
|
||||
// CHECK: ret i64 [[R]]
|
||||
|
||||
// rotate right
|
||||
|
||||
|
@ -97,61 +67,34 @@ unsigned char test_rotr8(unsigned char value, unsigned char shift) {
|
|||
return _rotr8(value, shift);
|
||||
}
|
||||
// CHECK: i8 @test_rotr8
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i8 [[SHIFT:%[0-9]+]], 7
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i8 [[VALUE:%[0-9]+]], [[RSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i8 0, [[SHIFT]]
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i8 [[NEGATE]], 7
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i8 [[VALUE]], [[LSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i8 [[HIGH]], [[LOW]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]])
|
||||
// CHECK: ret i8 [[R]]
|
||||
|
||||
unsigned short test_rotr16(unsigned short value, unsigned char shift) {
|
||||
return _rotr16(value, shift);
|
||||
}
|
||||
// CHECK: i16 @test_rotr16
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i16 [[SHIFT:%[0-9]+]], 15
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i16 [[VALUE:%[0-9]+]], [[RSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i16 0, [[SHIFT]]
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i16 [[NEGATE]], 15
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i16 [[VALUE]], [[LSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i16 [[HIGH]], [[LOW]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]])
|
||||
// CHECK: ret i16 [[R]]
|
||||
|
||||
unsigned int test_rotr(unsigned int value, int shift) {
|
||||
return _rotr(value, shift);
|
||||
}
|
||||
// CHECK: i32 @test_rotr
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i32 [[VALUE:%[0-9]+]], [[RSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]]
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i32 [[VALUE]], [[LSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i32 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK: ret i32 [[R]]
|
||||
|
||||
unsigned LONG test_lrotr(unsigned LONG value, int shift) {
|
||||
return _lrotr(value, shift);
|
||||
}
|
||||
// CHECK-32BIT-LONG: i32 @test_lrotr
|
||||
// CHECK-32BIT-LONG: [[RSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31
|
||||
// CHECK-32BIT-LONG: [[LOW:%[0-9]+]] = lshr i32 [[VALUE:%[0-9]+]], [[RSHIFT]]
|
||||
// CHECK-32BIT-LONG: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]]
|
||||
// CHECK-32BIT-LONG: [[LSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31
|
||||
// CHECK-32BIT-LONG: [[HIGH:%[0-9]+]] = shl i32 [[VALUE]], [[LSHIFT]]
|
||||
// CHECK-32BIT-LONG: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]]
|
||||
// CHECK-32BIT-LONG: ret i32 [[RESULT]]
|
||||
// CHECK-32BIT-LONG }
|
||||
// CHECK-32BIT-LONG: [[R:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]])
|
||||
// CHECK-32BIT-LONG: ret i32 [[R]]
|
||||
|
||||
unsigned __int64 test_rotr64(unsigned __int64 value, int shift) {
|
||||
return _rotr64(value, shift);
|
||||
}
|
||||
// CHECK: i64 @test_rotr64
|
||||
// CHECK: [[RSHIFT:%[0-9]+]] = and i64 [[SHIFT:%[0-9]+]], 63
|
||||
// CHECK: [[LOW:%[0-9]+]] = lshr i64 [[VALUE:%[0-9]+]], [[RSHIFT]]
|
||||
// CHECK: [[NEGATE:%[0-9]+]] = sub i64 0, [[SHIFT]]
|
||||
// CHECK: [[LSHIFT:%[0-9]+]] = and i64 [[NEGATE]], 63
|
||||
// CHECK: [[HIGH:%[0-9]+]] = shl i64 [[VALUE]], [[LSHIFT]]
|
||||
// CHECK: [[RESULT:%[0-9]+]] = or i64 [[HIGH]], [[LOW]]
|
||||
// CHECK: ret i64 [[RESULT]]
|
||||
// CHECK }
|
||||
// CHECK: [[R:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]])
|
||||
// CHECK: ret i64 [[R]]
|
||||
|
||||
|
|
Loading…
Reference in New Issue