2018-08-31 19:26:51 +08:00
|
|
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
|
|
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
|
|
|
|
--- |
|
|
|
|
|
|
|
|
define float @test_fptrunc(double %in) {
|
|
|
|
%res = fptrunc double %in to float
|
|
|
|
ret float %res
|
|
|
|
}
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: test_fptrunc
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 16
|
2018-08-31 19:26:51 +08:00
|
|
|
legalized: true
|
|
|
|
regBankSelected: true
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: vecr }
|
|
|
|
- { id: 1, class: vecr }
|
|
|
|
- { id: 2, class: vecr }
|
|
|
|
- { id: 3, class: vecr }
|
|
|
|
body: |
|
|
|
|
bb.1 (%ir-block.0):
|
|
|
|
liveins: $xmm0
|
|
|
|
|
|
|
|
; ALL-LABEL: name: test_fptrunc
|
|
|
|
; ALL: liveins: $xmm0
|
|
|
|
; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
|
|
|
|
; ALL: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
|
|
|
|
; ALL: [[CVTSD2SSrr:%[0-9]+]]:fr32 = CVTSD2SSrr [[COPY1]]
|
|
|
|
; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSD2SSrr]]
|
|
|
|
; ALL: $xmm0 = COPY [[COPY2]]
|
|
|
|
; ALL: RET 0, implicit $xmm0
|
|
|
|
%1:vecr(s128) = COPY $xmm0
|
|
|
|
%0:vecr(s64) = G_TRUNC %1(s128)
|
|
|
|
%2:vecr(s32) = G_FPTRUNC %0(s64)
|
|
|
|
%3:vecr(s128) = G_ANYEXT %2(s32)
|
|
|
|
$xmm0 = COPY %3(s128)
|
|
|
|
RET 0, implicit $xmm0
|
|
|
|
|
|
|
|
...
|