2019-06-20 17:05:02 +08:00
|
|
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
|
|
# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=FP32
|
|
|
|
# RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=FP64
|
|
|
|
--- |
|
|
|
|
|
|
|
|
define void @i64tof32() {entry: ret void}
|
|
|
|
define void @i32tof32() {entry: ret void}
|
|
|
|
define void @i16tof32() {entry: ret void}
|
|
|
|
define void @i8tof32() {entry: ret void}
|
|
|
|
define void @i64tof64() {entry: ret void}
|
|
|
|
define void @i32tof64() {entry: ret void}
|
|
|
|
define void @i16tof64() {entry: ret void}
|
|
|
|
define void @i8tof64() {entry: ret void}
|
|
|
|
define void @u64tof32() {entry: ret void}
|
2019-08-30 13:51:12 +08:00
|
|
|
define void @u32tof32() {entry: ret void}
|
|
|
|
define void @u16tof32() {entry: ret void}
|
|
|
|
define void @u8tof32() {entry: ret void}
|
2019-06-20 17:05:02 +08:00
|
|
|
define void @u64tof64() {entry: ret void}
|
2019-08-30 13:51:12 +08:00
|
|
|
define void @u32tof64() {entry: ret void}
|
|
|
|
define void @u16tof64() {entry: ret void}
|
|
|
|
define void @u8tof64() {entry: ret void}
|
2019-06-20 17:05:02 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i64tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0, $a1
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i64tof32
|
|
|
|
; FP32: liveins: $a0, $a1
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP32: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP32: JAL &__floatdisf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
|
|
|
; FP32: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
|
|
|
; FP32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $f0 = COPY [[COPY2]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: i64tof32
|
|
|
|
; FP64: liveins: $a0, $a1
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP64: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP64: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP64: JAL &__floatdisf, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
|
|
|
; FP64: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
|
|
|
; FP64: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $f0 = COPY [[COPY2]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%2:_(s32) = COPY $a1
|
|
|
|
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
|
|
|
|
%3:_(s32) = G_SITOFP %0(s64)
|
|
|
|
$f0 = COPY %3(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i32tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i32tof32
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32)
|
|
|
|
; FP32: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: i32tof32
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32)
|
|
|
|
; FP64: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%0:_(s32) = COPY $a0
|
|
|
|
%1:_(s32) = G_SITOFP %0(s32)
|
|
|
|
$f0 = COPY %1(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i16tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i16tof32
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP32: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: i16tof32
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP64: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP64: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP64: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s16) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s32) = G_SITOFP %0(s16)
|
|
|
|
$f0 = COPY %2(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i8tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i8tof32
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP32: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: i8tof32
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP64: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP64: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP64: $f0 = COPY [[SITOFP]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s8) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s32) = G_SITOFP %0(s8)
|
|
|
|
$f0 = COPY %2(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i64tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0, $a1
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i64tof64
|
|
|
|
; FP32: liveins: $a0, $a1
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP32: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP32: JAL &__floatdidf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $d0
|
|
|
|
; FP32: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
|
|
|
|
; FP32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $d0 = COPY [[COPY2]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: i64tof64
|
|
|
|
; FP64: liveins: $a0, $a1
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP64: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP64: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP64: JAL &__floatdidf, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $d0_64
|
|
|
|
; FP64: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0_64
|
|
|
|
; FP64: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $d0 = COPY [[COPY2]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%2:_(s32) = COPY $a1
|
|
|
|
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
|
|
|
|
%3:_(s64) = G_SITOFP %0(s64)
|
|
|
|
$d0 = COPY %3(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i32tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i32tof64
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32)
|
|
|
|
; FP32: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: i32tof64
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32)
|
|
|
|
; FP64: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%0:_(s32) = COPY $a0
|
|
|
|
%1:_(s64) = G_SITOFP %0(s32)
|
|
|
|
$d0 = COPY %1(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i16tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i16tof64
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP32: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: i16tof64
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP64: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP64: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP64: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s16) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s64) = G_SITOFP %0(s16)
|
|
|
|
$d0 = COPY %2(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: i8tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: i8tof64
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP32: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP32: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: i8tof64
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
[globalisel] Add G_SEXT_INREG
Summary:
Targets often have instructions that can sign-extend certain cases faster
than the equivalent shift-left/arithmetic-shift-right. Such cases can be
identified by matching a shift-left/shift-right pair but there are some
issues with this in the context of combines. For example, suppose you can
sign-extend 8-bit up to 32-bit with a target extend instruction.
%1:_(s32) = G_SHL %0:_(s32), i32 24 # (I've inlined the G_CONSTANT for brevity)
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_ASHR %2:_(s32), i32 1
would reasonably combine to:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 25
which no longer matches the special case. If your shifts and extend are
equal cost, this would break even as a pair of shifts but if your shift is
more expensive than the extend then it's cheaper as:
%2:_(s32) = G_SEXT_INREG %0:_(s32), i32 8
%3:_(s32) = G_ASHR %2:_(s32), i32 1
It's possible to match the shift-pair in ISel and emit an extend and ashr.
However, this is far from the only way to break this shift pair and make
it hard to match the extends. Another example is that with the right
known-zeros, this:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 24
%3:_(s32) = G_MUL %2:_(s32), i32 2
can become:
%1:_(s32) = G_SHL %0:_(s32), i32 24
%2:_(s32) = G_ASHR %1:_(s32), i32 23
All upstream targets have been configured to lower it to the current
G_SHL,G_ASHR pair but will likely want to make it legal in some cases to
handle their faster cases.
To follow-up: Provide a way to legalize based on the constant. At the
moment, I'm thinking that the best way to achieve this is to provide the
MI in LegalityQuery but that opens the door to breaking core principles
of the legalizer (legality is not context sensitive). That said, it's
worth noting that looking at other instructions and acting on that
information doesn't violate this principle in itself. It's only a
violation if, at the end of legalization, a pass that checks legality
without being able to see the context would say an instruction might not be
legal. That's a fairly subtle distinction so to give a concrete example,
saying %2 in:
%1 = G_CONSTANT 16
%2 = G_SEXT_INREG %0, %1
is legal is in violation of that principle if the legality of %2 depends
on %1 being constant and/or being 16. However, legalizing to either:
%2 = G_SEXT_INREG %0, 16
or:
%1 = G_CONSTANT 16
%2:_(s32) = G_SHL %0, %1
%3:_(s32) = G_ASHR %2, %1
depending on whether %1 is constant and 16 does not violate that principle
since both outputs are genuinely legal.
Reviewers: bogner, aditya_nandakumar, volkan, aemerson, paquette, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, kristof.beyls, javed.absar, hiraditya, jrtc27, atanasyan, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61289
llvm-svn: 368487
2019-08-10 05:11:20 +08:00
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
2019-06-20 17:05:02 +08:00
|
|
|
; FP64: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
|
|
|
|
; FP64: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
|
|
|
|
; FP64: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
|
|
|
|
; FP64: $d0 = COPY [[SITOFP]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s8) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s64) = G_SITOFP %0(s8)
|
|
|
|
$d0 = COPY %2(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u64tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0, $a1
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u64tof32
|
|
|
|
; FP32: liveins: $a0, $a1
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP32: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP32: JAL &__floatundisf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
|
|
|
; FP32: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
|
|
|
; FP32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $f0 = COPY [[COPY2]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: u64tof32
|
|
|
|
; FP64: liveins: $a0, $a1
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP64: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP64: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP64: JAL &__floatundisf, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
|
|
|
; FP64: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
|
|
|
; FP64: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $f0 = COPY [[COPY2]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%2:_(s32) = COPY $a1
|
|
|
|
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
|
|
|
|
%3:_(s32) = G_UITOFP %0(s64)
|
|
|
|
$f0 = COPY %3(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
2019-08-30 13:51:12 +08:00
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u32tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u32tof32
|
|
|
|
; FP32: liveins: $a0
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C1]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP32: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: u32tof32
|
|
|
|
; FP64: liveins: $a0
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C1]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP64: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%0:_(s32) = COPY $a0
|
|
|
|
%1:_(s32) = G_UITOFP %0(s32)
|
|
|
|
$f0 = COPY %1(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u16tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u16tof32
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP32: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: u16tof32
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP64: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s16) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s32) = G_UITOFP %0(s16)
|
|
|
|
$f0 = COPY %2(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u8tof32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u8tof32
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP32: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP32: RetRA implicit $f0
|
|
|
|
; FP64-LABEL: name: u8tof32
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[FSUB]](s64)
|
|
|
|
; FP64: $f0 = COPY [[FPTRUNC]](s32)
|
|
|
|
; FP64: RetRA implicit $f0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s8) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s32) = G_UITOFP %0(s8)
|
|
|
|
$f0 = COPY %2(s32)
|
|
|
|
RetRA implicit $f0
|
|
|
|
|
2019-06-20 17:05:02 +08:00
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u64tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-06-20 17:05:02 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0, $a1
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u64tof64
|
|
|
|
; FP32: liveins: $a0, $a1
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP32: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP32: JAL &__floatundidf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $d0
|
|
|
|
; FP32: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
|
|
|
|
; FP32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP32: $d0 = COPY [[COPY2]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: u64tof64
|
|
|
|
; FP64: liveins: $a0, $a1
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
|
|
|
|
; FP64: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $a0 = COPY [[COPY]](s32)
|
|
|
|
; FP64: $a1 = COPY [[COPY1]](s32)
|
|
|
|
; FP64: JAL &__floatundidf, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $d0_64
|
|
|
|
; FP64: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0_64
|
|
|
|
; FP64: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
|
|
|
; FP64: $d0 = COPY [[COPY2]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%2:_(s32) = COPY $a1
|
|
|
|
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
|
|
|
|
%3:_(s64) = G_UITOFP %0(s64)
|
|
|
|
$d0 = COPY %3(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
2019-08-30 13:51:12 +08:00
|
|
|
---
|
|
|
|
name: u32tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u32tof64
|
|
|
|
; FP32: liveins: $a0
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C1]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: u32tof64
|
|
|
|
; FP64: liveins: $a0
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C1]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%0:_(s32) = COPY $a0
|
|
|
|
%1:_(s64) = G_UITOFP %0(s32)
|
|
|
|
$d0 = COPY %1(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u16tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u16tof64
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: u16tof64
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s16) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s64) = G_UITOFP %0(s16)
|
|
|
|
$d0 = COPY %2(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: u8tof64
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 4
|
2019-08-30 13:51:12 +08:00
|
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
|
|
bb.1.entry:
|
|
|
|
liveins: $a0
|
|
|
|
|
|
|
|
; FP32-LABEL: name: u8tof64
|
|
|
|
; FP32: liveins: $a0
|
|
|
|
; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
|
|
|
|
; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP32: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP32: RetRA implicit $d0
|
|
|
|
; FP64-LABEL: name: u8tof64
|
|
|
|
; FP64: liveins: $a0
|
|
|
|
; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
|
|
|
; FP64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
|
|
|
|
; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
|
|
|
|
; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1127219200
|
|
|
|
; FP64: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[C1]](s32)
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x4330000000000000
|
2020-02-19 17:06:28 +08:00
|
|
|
; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[MV]], [[C2]]
|
2019-08-30 13:51:12 +08:00
|
|
|
; FP64: $d0 = COPY [[FSUB]](s64)
|
|
|
|
; FP64: RetRA implicit $d0
|
|
|
|
%1:_(s32) = COPY $a0
|
|
|
|
%0:_(s8) = G_TRUNC %1(s32)
|
|
|
|
%2:_(s64) = G_UITOFP %0(s8)
|
|
|
|
$d0 = COPY %2(s64)
|
|
|
|
RetRA implicit $d0
|
|
|
|
|
|
|
|
...
|