2016-11-11 16:27:37 +08:00
|
|
|
//===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-11-11 16:27:37 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file implements the targeting of the Machinelegalizer class for ARM.
|
|
|
|
/// \todo This should be generated by TableGen.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMLegalizerInfo.h"
|
2017-06-15 18:53:31 +08:00
|
|
|
#include "ARMCallLowering.h"
|
2017-02-17 19:25:17 +08:00
|
|
|
#include "ARMSubtarget.h"
|
2017-04-24 17:12:19 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
|
2017-06-15 18:53:31 +08:00
|
|
|
#include "llvm/CodeGen/LowLevelType.h"
|
2017-04-24 17:12:19 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
2018-03-30 01:21:10 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
2018-01-30 01:37:29 +08:00
|
|
|
using namespace LegalizeActions;
|
2016-11-11 16:27:37 +08:00
|
|
|
|
[GlobalISel] Enable legalizing non-power-of-2 sized types.
This changes the interface of how targets describe how to legalize, see
the below description.
1. Interface for targets to describe how to legalize.
In GlobalISel, the API in the LegalizerInfo class is the main interface
for targets to specify which types are legal for which operations, and
what to do to turn illegal type/operation combinations into legal ones.
For each operation the type sizes that can be legalized without having
to change the size of the type are specified with a call to setAction.
This isn't different to how GlobalISel worked before. For example, for a
target that supports 32 and 64 bit adds natively:
for (auto Ty : {s32, s64})
setAction({G_ADD, 0, s32}, Legal);
or for a target that needs a library call for a 32 bit division:
setAction({G_SDIV, s32}, Libcall);
The main conceptual change to the LegalizerInfo API, is in specifying
how to legalize the type sizes for which a change of size is needed. For
example, in the above example, how to specify how all types from i1 to
i8388607 (apart from s32 and s64 which are legal) need to be legalized
and expressed in terms of operations on the available legal sizes
(again, i32 and i64 in this case). Before, the implementation only
allowed specifying power-of-2-sized types (e.g. setAction({G_ADD, 0,
s128}, NarrowScalar). A worse limitation was that if you'd wanted to
specify how to legalize all the sized types as allowed by the LLVM-IR
LangRef, i1 to i8388607, you'd have to call setAction 8388607-3 times
and probably would need a lot of memory to store all of these
specifications.
Instead, the legalization actions that need to change the size of the
type are specified now using a "SizeChangeStrategy". For example:
setLegalizeScalarToDifferentSizeStrategy(
G_ADD, 0, widenToLargerAndNarrowToLargest);
This example indicates that for type sizes for which there is a larger
size that can be legalized towards, do it by Widening the size.
For example, G_ADD on s17 will be legalized by first doing WidenScalar
to make it s32, after which it's legal.
The "NarrowToLargest" indicates what to do if there is no larger size
that can be legalized towards. E.g. G_ADD on s92 will be legalized by
doing NarrowScalar to s64.
Another example, taken from the ARM backend is:
for (unsigned Op : {G_SDIV, G_UDIV}) {
setLegalizeScalarToDifferentSizeStrategy(Op, 0,
widenToLargerTypesUnsupportedOtherwise);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Legal);
else
setAction({Op, s32}, Libcall);
}
For this example, G_SDIV on s8, on a target without a divide
instruction, would be legalized by first doing action (WidenScalar,
s32), followed by (Libcall, s32).
The same principle is also followed for when the number of vector lanes
on vector data types need to be changed, e.g.:
setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);
setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, widenToLargerTypesUnsupportedOtherwise);
As currently implemented here, vector types are legalized by first
making the vector element size legal, followed by then making the number
of lanes legal. The strategy to follow in the first step is set by a
call to setLegalizeVectorElementToDifferentSizeStrategy, see example
above. The strategy followed in the second step
"moreToWiderTypesAndLessToWidest" (see code for its definition),
indicating that vectors are widened to more elements so they map to
natively supported vector widths, or when there isn't a legal wider
vector, split the vector to map it to the widest vector supported.
Therefore, for the above specification, some example legalizations are:
* getAction({G_ADD, LLT::vector(3, 3)})
returns {WidenScalar, LLT::vector(3, 8)}
* getAction({G_ADD, LLT::vector(3, 8)})
then returns {MoreElements, LLT::vector(8, 8)}
* getAction({G_ADD, LLT::vector(20, 8)})
returns {FewerElements, LLT::vector(16, 8)}
2. Key implementation aspects.
How to legalize a specific (operation, type index, size) tuple is
represented by mapping intervals of integers representing a range of
size types to an action to take, e.g.:
setScalarAction({G_ADD, LLT:scalar(1)},
{{1, WidenScalar}, // bit sizes [ 1, 31[
{32, Legal}, // bit sizes [32, 33[
{33, WidenScalar}, // bit sizes [33, 64[
{64, Legal}, // bit sizes [64, 65[
{65, NarrowScalar} // bit sizes [65, +inf[
});
Please note that most of the code to do the actual lowering of
non-power-of-2 sized types is currently missing, this is just trying to
make it possible for targets to specify what is legal, and how non-legal
types should be legalized. Probably quite a bit of further work is
needed in the actual legalizing and the other passes in GlobalISel to
support non-power-of-2 sized types.
I hope the documentation in LegalizerInfo.h and the examples provided in the
various {Target}LegalizerInfo.cpp and LegalizerInfoTest.cpp explains well
enough how this is meant to be used.
This drops the need for LLT::{half,double}...Size().
Differential Revision: https://reviews.llvm.org/D30529
llvm-svn: 317560
2017-11-07 18:34:34 +08:00
|
|
|
/// FIXME: The following static functions are SizeChangeStrategy functions
|
|
|
|
/// that are meant to temporarily mimic the behaviour of the old legalization
|
|
|
|
/// based on doubling/halving non-legal types as closely as possible. This is
|
|
|
|
/// not entirly possible as only legalizing the types that are exactly a power
|
|
|
|
/// of 2 times the size of the legal types would require specifying all those
|
|
|
|
/// sizes explicitly.
|
|
|
|
/// In practice, not specifying those isn't a problem, and the below functions
|
|
|
|
/// should disappear quickly as we add support for legalizing non-power-of-2
|
|
|
|
/// sized types further.
|
|
|
|
static void
|
|
|
|
addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
|
|
|
|
const LegalizerInfo::SizeAndActionsVec &v) {
|
|
|
|
for (unsigned i = 0; i < v.size(); ++i) {
|
|
|
|
result.push_back(v[i]);
|
|
|
|
if (i + 1 < v[i].first && i + 1 < v.size() &&
|
|
|
|
v[i + 1].first != v[i].first + 1)
|
2018-01-30 01:37:29 +08:00
|
|
|
result.push_back({v[i].first + 1, Unsupported});
|
[GlobalISel] Enable legalizing non-power-of-2 sized types.
This changes the interface of how targets describe how to legalize, see
the below description.
1. Interface for targets to describe how to legalize.
In GlobalISel, the API in the LegalizerInfo class is the main interface
for targets to specify which types are legal for which operations, and
what to do to turn illegal type/operation combinations into legal ones.
For each operation the type sizes that can be legalized without having
to change the size of the type are specified with a call to setAction.
This isn't different to how GlobalISel worked before. For example, for a
target that supports 32 and 64 bit adds natively:
for (auto Ty : {s32, s64})
setAction({G_ADD, 0, s32}, Legal);
or for a target that needs a library call for a 32 bit division:
setAction({G_SDIV, s32}, Libcall);
The main conceptual change to the LegalizerInfo API, is in specifying
how to legalize the type sizes for which a change of size is needed. For
example, in the above example, how to specify how all types from i1 to
i8388607 (apart from s32 and s64 which are legal) need to be legalized
and expressed in terms of operations on the available legal sizes
(again, i32 and i64 in this case). Before, the implementation only
allowed specifying power-of-2-sized types (e.g. setAction({G_ADD, 0,
s128}, NarrowScalar). A worse limitation was that if you'd wanted to
specify how to legalize all the sized types as allowed by the LLVM-IR
LangRef, i1 to i8388607, you'd have to call setAction 8388607-3 times
and probably would need a lot of memory to store all of these
specifications.
Instead, the legalization actions that need to change the size of the
type are specified now using a "SizeChangeStrategy". For example:
setLegalizeScalarToDifferentSizeStrategy(
G_ADD, 0, widenToLargerAndNarrowToLargest);
This example indicates that for type sizes for which there is a larger
size that can be legalized towards, do it by Widening the size.
For example, G_ADD on s17 will be legalized by first doing WidenScalar
to make it s32, after which it's legal.
The "NarrowToLargest" indicates what to do if there is no larger size
that can be legalized towards. E.g. G_ADD on s92 will be legalized by
doing NarrowScalar to s64.
Another example, taken from the ARM backend is:
for (unsigned Op : {G_SDIV, G_UDIV}) {
setLegalizeScalarToDifferentSizeStrategy(Op, 0,
widenToLargerTypesUnsupportedOtherwise);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Legal);
else
setAction({Op, s32}, Libcall);
}
For this example, G_SDIV on s8, on a target without a divide
instruction, would be legalized by first doing action (WidenScalar,
s32), followed by (Libcall, s32).
The same principle is also followed for when the number of vector lanes
on vector data types need to be changed, e.g.:
setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);
setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, widenToLargerTypesUnsupportedOtherwise);
As currently implemented here, vector types are legalized by first
making the vector element size legal, followed by then making the number
of lanes legal. The strategy to follow in the first step is set by a
call to setLegalizeVectorElementToDifferentSizeStrategy, see example
above. The strategy followed in the second step
"moreToWiderTypesAndLessToWidest" (see code for its definition),
indicating that vectors are widened to more elements so they map to
natively supported vector widths, or when there isn't a legal wider
vector, split the vector to map it to the widest vector supported.
Therefore, for the above specification, some example legalizations are:
* getAction({G_ADD, LLT::vector(3, 3)})
returns {WidenScalar, LLT::vector(3, 8)}
* getAction({G_ADD, LLT::vector(3, 8)})
then returns {MoreElements, LLT::vector(8, 8)}
* getAction({G_ADD, LLT::vector(20, 8)})
returns {FewerElements, LLT::vector(16, 8)}
2. Key implementation aspects.
How to legalize a specific (operation, type index, size) tuple is
represented by mapping intervals of integers representing a range of
size types to an action to take, e.g.:
setScalarAction({G_ADD, LLT:scalar(1)},
{{1, WidenScalar}, // bit sizes [ 1, 31[
{32, Legal}, // bit sizes [32, 33[
{33, WidenScalar}, // bit sizes [33, 64[
{64, Legal}, // bit sizes [64, 65[
{65, NarrowScalar} // bit sizes [65, +inf[
});
Please note that most of the code to do the actual lowering of
non-power-of-2 sized types is currently missing, this is just trying to
make it possible for targets to specify what is legal, and how non-legal
types should be legalized. Probably quite a bit of further work is
needed in the actual legalizing and the other passes in GlobalISel to
support non-power-of-2 sized types.
I hope the documentation in LegalizerInfo.h and the examples provided in the
various {Target}LegalizerInfo.cpp and LegalizerInfoTest.cpp explains well
enough how this is meant to be used.
This drops the need for LLT::{half,double}...Size().
Differential Revision: https://reviews.llvm.org/D30529
llvm-svn: 317560
2017-11-07 18:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static LegalizerInfo::SizeAndActionsVec
|
|
|
|
widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) {
|
|
|
|
assert(v.size() >= 1);
|
|
|
|
assert(v[0].first > 17);
|
2018-01-30 01:37:29 +08:00
|
|
|
LegalizerInfo::SizeAndActionsVec result = {{1, Unsupported},
|
|
|
|
{8, WidenScalar},
|
|
|
|
{9, Unsupported},
|
|
|
|
{16, WidenScalar},
|
|
|
|
{17, Unsupported}};
|
[GlobalISel] Enable legalizing non-power-of-2 sized types.
This changes the interface of how targets describe how to legalize, see
the below description.
1. Interface for targets to describe how to legalize.
In GlobalISel, the API in the LegalizerInfo class is the main interface
for targets to specify which types are legal for which operations, and
what to do to turn illegal type/operation combinations into legal ones.
For each operation the type sizes that can be legalized without having
to change the size of the type are specified with a call to setAction.
This isn't different to how GlobalISel worked before. For example, for a
target that supports 32 and 64 bit adds natively:
for (auto Ty : {s32, s64})
setAction({G_ADD, 0, s32}, Legal);
or for a target that needs a library call for a 32 bit division:
setAction({G_SDIV, s32}, Libcall);
The main conceptual change to the LegalizerInfo API, is in specifying
how to legalize the type sizes for which a change of size is needed. For
example, in the above example, how to specify how all types from i1 to
i8388607 (apart from s32 and s64 which are legal) need to be legalized
and expressed in terms of operations on the available legal sizes
(again, i32 and i64 in this case). Before, the implementation only
allowed specifying power-of-2-sized types (e.g. setAction({G_ADD, 0,
s128}, NarrowScalar). A worse limitation was that if you'd wanted to
specify how to legalize all the sized types as allowed by the LLVM-IR
LangRef, i1 to i8388607, you'd have to call setAction 8388607-3 times
and probably would need a lot of memory to store all of these
specifications.
Instead, the legalization actions that need to change the size of the
type are specified now using a "SizeChangeStrategy". For example:
setLegalizeScalarToDifferentSizeStrategy(
G_ADD, 0, widenToLargerAndNarrowToLargest);
This example indicates that for type sizes for which there is a larger
size that can be legalized towards, do it by Widening the size.
For example, G_ADD on s17 will be legalized by first doing WidenScalar
to make it s32, after which it's legal.
The "NarrowToLargest" indicates what to do if there is no larger size
that can be legalized towards. E.g. G_ADD on s92 will be legalized by
doing NarrowScalar to s64.
Another example, taken from the ARM backend is:
for (unsigned Op : {G_SDIV, G_UDIV}) {
setLegalizeScalarToDifferentSizeStrategy(Op, 0,
widenToLargerTypesUnsupportedOtherwise);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Legal);
else
setAction({Op, s32}, Libcall);
}
For this example, G_SDIV on s8, on a target without a divide
instruction, would be legalized by first doing action (WidenScalar,
s32), followed by (Libcall, s32).
The same principle is also followed for when the number of vector lanes
on vector data types need to be changed, e.g.:
setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);
setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, widenToLargerTypesUnsupportedOtherwise);
As currently implemented here, vector types are legalized by first
making the vector element size legal, followed by then making the number
of lanes legal. The strategy to follow in the first step is set by a
call to setLegalizeVectorElementToDifferentSizeStrategy, see example
above. The strategy followed in the second step
"moreToWiderTypesAndLessToWidest" (see code for its definition),
indicating that vectors are widened to more elements so they map to
natively supported vector widths, or when there isn't a legal wider
vector, split the vector to map it to the widest vector supported.
Therefore, for the above specification, some example legalizations are:
* getAction({G_ADD, LLT::vector(3, 3)})
returns {WidenScalar, LLT::vector(3, 8)}
* getAction({G_ADD, LLT::vector(3, 8)})
then returns {MoreElements, LLT::vector(8, 8)}
* getAction({G_ADD, LLT::vector(20, 8)})
returns {FewerElements, LLT::vector(16, 8)}
2. Key implementation aspects.
How to legalize a specific (operation, type index, size) tuple is
represented by mapping intervals of integers representing a range of
size types to an action to take, e.g.:
setScalarAction({G_ADD, LLT:scalar(1)},
{{1, WidenScalar}, // bit sizes [ 1, 31[
{32, Legal}, // bit sizes [32, 33[
{33, WidenScalar}, // bit sizes [33, 64[
{64, Legal}, // bit sizes [64, 65[
{65, NarrowScalar} // bit sizes [65, +inf[
});
Please note that most of the code to do the actual lowering of
non-power-of-2 sized types is currently missing, this is just trying to
make it possible for targets to specify what is legal, and how non-legal
types should be legalized. Probably quite a bit of further work is
needed in the actual legalizing and the other passes in GlobalISel to
support non-power-of-2 sized types.
I hope the documentation in LegalizerInfo.h and the examples provided in the
various {Target}LegalizerInfo.cpp and LegalizerInfoTest.cpp explains well
enough how this is meant to be used.
This drops the need for LLT::{half,double}...Size().
Differential Revision: https://reviews.llvm.org/D30529
llvm-svn: 317560
2017-11-07 18:34:34 +08:00
|
|
|
addAndInterleaveWithUnsupported(result, v);
|
|
|
|
auto Largest = result.back().first;
|
2018-01-30 01:37:29 +08:00
|
|
|
result.push_back({Largest + 1, Unsupported});
|
[GlobalISel] Enable legalizing non-power-of-2 sized types.
This changes the interface of how targets describe how to legalize, see
the below description.
1. Interface for targets to describe how to legalize.
In GlobalISel, the API in the LegalizerInfo class is the main interface
for targets to specify which types are legal for which operations, and
what to do to turn illegal type/operation combinations into legal ones.
For each operation the type sizes that can be legalized without having
to change the size of the type are specified with a call to setAction.
This isn't different to how GlobalISel worked before. For example, for a
target that supports 32 and 64 bit adds natively:
for (auto Ty : {s32, s64})
setAction({G_ADD, 0, s32}, Legal);
or for a target that needs a library call for a 32 bit division:
setAction({G_SDIV, s32}, Libcall);
The main conceptual change to the LegalizerInfo API, is in specifying
how to legalize the type sizes for which a change of size is needed. For
example, in the above example, how to specify how all types from i1 to
i8388607 (apart from s32 and s64 which are legal) need to be legalized
and expressed in terms of operations on the available legal sizes
(again, i32 and i64 in this case). Before, the implementation only
allowed specifying power-of-2-sized types (e.g. setAction({G_ADD, 0,
s128}, NarrowScalar). A worse limitation was that if you'd wanted to
specify how to legalize all the sized types as allowed by the LLVM-IR
LangRef, i1 to i8388607, you'd have to call setAction 8388607-3 times
and probably would need a lot of memory to store all of these
specifications.
Instead, the legalization actions that need to change the size of the
type are specified now using a "SizeChangeStrategy". For example:
setLegalizeScalarToDifferentSizeStrategy(
G_ADD, 0, widenToLargerAndNarrowToLargest);
This example indicates that for type sizes for which there is a larger
size that can be legalized towards, do it by Widening the size.
For example, G_ADD on s17 will be legalized by first doing WidenScalar
to make it s32, after which it's legal.
The "NarrowToLargest" indicates what to do if there is no larger size
that can be legalized towards. E.g. G_ADD on s92 will be legalized by
doing NarrowScalar to s64.
Another example, taken from the ARM backend is:
for (unsigned Op : {G_SDIV, G_UDIV}) {
setLegalizeScalarToDifferentSizeStrategy(Op, 0,
widenToLargerTypesUnsupportedOtherwise);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Legal);
else
setAction({Op, s32}, Libcall);
}
For this example, G_SDIV on s8, on a target without a divide
instruction, would be legalized by first doing action (WidenScalar,
s32), followed by (Libcall, s32).
The same principle is also followed for when the number of vector lanes
on vector data types need to be changed, e.g.:
setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);
setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, widenToLargerTypesUnsupportedOtherwise);
As currently implemented here, vector types are legalized by first
making the vector element size legal, followed by then making the number
of lanes legal. The strategy to follow in the first step is set by a
call to setLegalizeVectorElementToDifferentSizeStrategy, see example
above. The strategy followed in the second step
"moreToWiderTypesAndLessToWidest" (see code for its definition),
indicating that vectors are widened to more elements so they map to
natively supported vector widths, or when there isn't a legal wider
vector, split the vector to map it to the widest vector supported.
Therefore, for the above specification, some example legalizations are:
* getAction({G_ADD, LLT::vector(3, 3)})
returns {WidenScalar, LLT::vector(3, 8)}
* getAction({G_ADD, LLT::vector(3, 8)})
then returns {MoreElements, LLT::vector(8, 8)}
* getAction({G_ADD, LLT::vector(20, 8)})
returns {FewerElements, LLT::vector(16, 8)}
2. Key implementation aspects.
How to legalize a specific (operation, type index, size) tuple is
represented by mapping intervals of integers representing a range of
size types to an action to take, e.g.:
setScalarAction({G_ADD, LLT:scalar(1)},
{{1, WidenScalar}, // bit sizes [ 1, 31[
{32, Legal}, // bit sizes [32, 33[
{33, WidenScalar}, // bit sizes [33, 64[
{64, Legal}, // bit sizes [64, 65[
{65, NarrowScalar} // bit sizes [65, +inf[
});
Please note that most of the code to do the actual lowering of
non-power-of-2 sized types is currently missing, this is just trying to
make it possible for targets to specify what is legal, and how non-legal
types should be legalized. Probably quite a bit of further work is
needed in the actual legalizing and the other passes in GlobalISel to
support non-power-of-2 sized types.
I hope the documentation in LegalizerInfo.h and the examples provided in the
various {Target}LegalizerInfo.cpp and LegalizerInfoTest.cpp explains well
enough how this is meant to be used.
This drops the need for LLT::{half,double}...Size().
Differential Revision: https://reviews.llvm.org/D30529
llvm-svn: 317560
2017-11-07 18:34:34 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-07-05 19:53:51 +08:00
|
|
|
static bool AEABI(const ARMSubtarget &ST) {
|
|
|
|
return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
|
|
|
|
}
|
|
|
|
|
2017-02-17 19:25:17 +08:00
|
|
|
ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
|
2016-12-16 20:54:46 +08:00
|
|
|
using namespace TargetOpcode;
|
2016-12-19 22:07:56 +08:00
|
|
|
|
2016-12-19 19:26:31 +08:00
|
|
|
const LLT p0 = LLT::pointer(0, 32);
|
2016-12-19 22:07:56 +08:00
|
|
|
|
2017-01-25 16:47:40 +08:00
|
|
|
const LLT s1 = LLT::scalar(1);
|
2016-12-19 22:07:56 +08:00
|
|
|
const LLT s8 = LLT::scalar(8);
|
|
|
|
const LLT s16 = LLT::scalar(16);
|
2016-12-16 20:54:46 +08:00
|
|
|
const LLT s32 = LLT::scalar(32);
|
2017-02-16 17:09:49 +08:00
|
|
|
const LLT s64 = LLT::scalar(64);
|
2016-12-16 20:54:46 +08:00
|
|
|
|
2018-12-12 18:32:15 +08:00
|
|
|
if (ST.isThumb1Only()) {
|
|
|
|
// Thumb1 is not supported yet.
|
|
|
|
computeTables();
|
|
|
|
verify(*ST.getInstrInfo());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-13 20:06:54 +08:00
|
|
|
getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
|
|
|
|
.legalForCartesianProduct({s32}, {s1, s8, s16});
|
|
|
|
|
2018-12-14 19:58:14 +08:00
|
|
|
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
|
|
|
|
.legalFor({s32})
|
|
|
|
.minScalar(0, s32);
|
|
|
|
|
2019-01-25 18:48:42 +08:00
|
|
|
getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL})
|
|
|
|
.legalFor({{s32, s32}})
|
|
|
|
.clampScalar(1, s32, s32);
|
|
|
|
|
2019-01-28 18:37:30 +08:00
|
|
|
bool HasHWDivide = (!ST.isThumb() && ST.hasDivideInARMMode()) ||
|
|
|
|
(ST.isThumb() && ST.hasDivideInThumbMode());
|
|
|
|
if (HasHWDivide)
|
|
|
|
getActionDefinitionsBuilder({G_SDIV, G_UDIV})
|
|
|
|
.legalFor({s32})
|
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
else
|
|
|
|
getActionDefinitionsBuilder({G_SDIV, G_UDIV})
|
|
|
|
.libcallFor({s32})
|
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
|
|
|
|
for (unsigned Op : {G_SREM, G_UREM}) {
|
|
|
|
setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16);
|
|
|
|
if (HasHWDivide)
|
|
|
|
setAction({Op, s32}, Lower);
|
|
|
|
else if (AEABI(ST))
|
|
|
|
setAction({Op, s32}, Custom);
|
|
|
|
else
|
|
|
|
setAction({Op, s32}, Libcall);
|
|
|
|
}
|
|
|
|
|
2018-12-14 21:45:38 +08:00
|
|
|
getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
|
|
|
|
getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}});
|
|
|
|
|
2018-12-19 17:55:10 +08:00
|
|
|
getActionDefinitionsBuilder(G_CONSTANT)
|
|
|
|
.legalFor({s32, p0})
|
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
|
2018-12-12 18:32:15 +08:00
|
|
|
// We're keeping these builders around because we'll want to add support for
|
|
|
|
// floating point to them.
|
|
|
|
auto &LoadStoreBuilder =
|
|
|
|
getActionDefinitionsBuilder({G_LOAD, G_STORE})
|
|
|
|
.legalForTypesWithMemSize({
|
|
|
|
{s1, p0, 8},
|
|
|
|
{s8, p0, 8},
|
|
|
|
{s16, p0, 16},
|
|
|
|
{s32, p0, 32},
|
|
|
|
{p0, p0, 32}});
|
|
|
|
|
2019-02-05 18:21:37 +08:00
|
|
|
getActionDefinitionsBuilder(G_GEP).legalFor({{p0, s32}});
|
|
|
|
|
2018-12-06 17:26:14 +08:00
|
|
|
if (ST.isThumb()) {
|
|
|
|
// FIXME: merge with the code for non-Thumb.
|
|
|
|
computeTables();
|
|
|
|
verify(*ST.getInstrInfo());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
|
|
|
|
getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
|
2016-12-19 19:26:31 +08:00
|
|
|
|
2018-11-26 19:07:02 +08:00
|
|
|
if (ST.hasV5TOps()) {
|
|
|
|
getActionDefinitionsBuilder(G_CTLZ)
|
2019-01-31 10:09:57 +08:00
|
|
|
.legalFor({s32, s32})
|
|
|
|
.clampScalar(1, s32, s32)
|
2018-11-26 19:07:02 +08:00
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
|
2019-01-31 10:09:57 +08:00
|
|
|
.lowerFor({s32, s32})
|
|
|
|
.clampScalar(1, s32, s32)
|
2018-11-26 19:07:02 +08:00
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
} else {
|
|
|
|
getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
|
2019-01-31 10:09:57 +08:00
|
|
|
.libcallFor({s32, s32})
|
|
|
|
.clampScalar(1, s32, s32)
|
2018-11-26 19:07:02 +08:00
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
getActionDefinitionsBuilder(G_CTLZ)
|
2019-01-31 10:09:57 +08:00
|
|
|
.lowerFor({s32, s32})
|
|
|
|
.clampScalar(1, s32, s32)
|
2018-11-26 19:07:02 +08:00
|
|
|
.clampScalar(0, s32, s32);
|
|
|
|
}
|
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_SELECT).legalForCartesianProduct({s32, p0},
|
|
|
|
{s1});
|
2017-02-28 17:02:42 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
|
2017-06-27 17:19:51 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_ICMP)
|
|
|
|
.legalForCartesianProduct({s1}, {s32, p0})
|
|
|
|
.minScalar(1, s32);
|
2018-01-04 21:09:14 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
// We're keeping these builders around because we'll want to add support for
|
|
|
|
// floating point to them.
|
2018-01-31 23:16:17 +08:00
|
|
|
auto &PhiBuilder =
|
|
|
|
getActionDefinitionsBuilder(G_PHI).legalFor({s32, p0}).minScalar(0, s32);
|
2017-06-19 17:40:51 +08:00
|
|
|
|
2017-04-07 17:41:39 +08:00
|
|
|
if (!ST.useSoftFloat() && ST.hasVFP2()) {
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(
|
|
|
|
{G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG})
|
|
|
|
.legalFor({s32, s64});
|
2017-07-06 17:09:33 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
LoadStoreBuilder.legalFor({{s64, p0}});
|
|
|
|
PhiBuilder.legalFor({s64});
|
2017-12-18 21:22:28 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1},
|
|
|
|
{s32, s64});
|
2018-01-17 21:34:10 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}});
|
|
|
|
getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}});
|
2018-01-17 21:34:10 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}});
|
|
|
|
getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}});
|
2018-01-30 15:54:52 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
|
|
|
|
.legalForCartesianProduct({s32}, {s32, s64});
|
|
|
|
getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
|
|
|
|
.legalForCartesianProduct({s32, s64}, {s32});
|
2017-04-11 18:52:34 +08:00
|
|
|
} else {
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV})
|
|
|
|
.libcallFor({s32, s64});
|
2017-07-06 17:09:33 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
LoadStoreBuilder.maxScalar(0, s32);
|
2018-01-17 21:34:10 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
for (auto Ty : {s32, s64})
|
|
|
|
setAction({G_FNEG, Ty}, Lower);
|
2018-01-17 21:34:10 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64});
|
2018-01-30 15:54:52 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1},
|
|
|
|
{s32, s64});
|
2018-01-30 17:15:17 +08:00
|
|
|
|
2017-07-06 17:09:33 +08:00
|
|
|
if (AEABI(ST))
|
|
|
|
setFCmpLibcallsAEABI();
|
|
|
|
else
|
|
|
|
setFCmpLibcallsGNU();
|
2018-01-31 22:55:07 +08:00
|
|
|
|
2018-06-01 00:16:48 +08:00
|
|
|
getActionDefinitionsBuilder(G_FPEXT).libcallFor({{s64, s32}});
|
|
|
|
getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({{s32, s64}});
|
2018-01-31 22:55:07 +08:00
|
|
|
|
|
|
|
getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
|
|
|
|
.libcallForCartesianProduct({s32}, {s32, s64});
|
|
|
|
getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
|
|
|
|
.libcallForCartesianProduct({s32, s64}, {s32});
|
2017-02-17 19:25:17 +08:00
|
|
|
}
|
2017-02-08 21:23:04 +08:00
|
|
|
|
2018-01-12 19:30:45 +08:00
|
|
|
if (!ST.useSoftFloat() && ST.hasVFP4())
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64});
|
2018-01-12 19:30:45 +08:00
|
|
|
else
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64});
|
2018-01-12 19:30:45 +08:00
|
|
|
|
2018-01-31 22:55:07 +08:00
|
|
|
getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
|
2017-04-07 17:41:39 +08:00
|
|
|
|
2016-11-11 16:27:37 +08:00
|
|
|
computeTables();
|
2018-06-01 00:16:48 +08:00
|
|
|
verify(*ST.getInstrInfo());
|
2016-11-11 16:27:37 +08:00
|
|
|
}
|
2017-04-24 17:12:19 +08:00
|
|
|
|
2017-07-06 17:09:33 +08:00
|
|
|
void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
|
|
|
|
// FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
|
|
|
|
// default-initialized.
|
|
|
|
FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OEQ] = {
|
|
|
|
{RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OGE] = {
|
|
|
|
{RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OGT] = {
|
|
|
|
{RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OLE] = {
|
|
|
|
{RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OLT] = {
|
|
|
|
{RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UNO] = {
|
|
|
|
{RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ONE] = {
|
|
|
|
{RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE},
|
|
|
|
{RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UEQ] = {
|
|
|
|
{RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE},
|
|
|
|
{RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
|
2017-07-11 16:50:01 +08:00
|
|
|
|
|
|
|
FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OEQ] = {
|
|
|
|
{RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OGE] = {
|
|
|
|
{RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OGT] = {
|
|
|
|
{RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OLE] = {
|
|
|
|
{RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OLT] = {
|
|
|
|
{RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UNO] = {
|
|
|
|
{RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ONE] = {
|
|
|
|
{RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE},
|
|
|
|
{RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UEQ] = {
|
|
|
|
{RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE},
|
|
|
|
{RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
|
2017-07-06 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ARMLegalizerInfo::setFCmpLibcallsGNU() {
|
|
|
|
// FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
|
|
|
|
// default-initialized.
|
|
|
|
FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT},
|
|
|
|
{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ},
|
|
|
|
{RTLIB::UO_F32, CmpInst::ICMP_NE}};
|
2017-07-11 16:50:01 +08:00
|
|
|
|
|
|
|
FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT},
|
|
|
|
{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
|
|
|
|
FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ},
|
|
|
|
{RTLIB::UO_F64, CmpInst::ICMP_NE}};
|
2017-07-06 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ARMLegalizerInfo::FCmpLibcallsList
|
2017-07-11 16:50:01 +08:00
|
|
|
ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate,
|
|
|
|
unsigned Size) const {
|
2017-07-06 17:09:33 +08:00
|
|
|
assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate");
|
2017-07-11 16:50:01 +08:00
|
|
|
if (Size == 32)
|
|
|
|
return FCmp32Libcalls[Predicate];
|
|
|
|
if (Size == 64)
|
|
|
|
return FCmp64Libcalls[Predicate];
|
|
|
|
llvm_unreachable("Unsupported size for FCmp predicate");
|
2017-07-06 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
2017-04-24 17:12:19 +08:00
|
|
|
bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
|
|
|
|
MachineRegisterInfo &MRI,
|
2018-12-06 04:14:52 +08:00
|
|
|
MachineIRBuilder &MIRBuilder,
|
|
|
|
GISelChangeObserver &Observer) const {
|
2017-04-24 17:12:19 +08:00
|
|
|
using namespace TargetOpcode;
|
|
|
|
|
2017-07-05 20:57:24 +08:00
|
|
|
MIRBuilder.setInstr(MI);
|
2018-01-10 18:01:49 +08:00
|
|
|
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
|
2017-07-05 20:57:24 +08:00
|
|
|
|
2017-04-24 17:12:19 +08:00
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2017-06-15 18:53:31 +08:00
|
|
|
case G_SREM:
|
|
|
|
case G_UREM: {
|
|
|
|
unsigned OriginalResult = MI.getOperand(0).getReg();
|
|
|
|
auto Size = MRI.getType(OriginalResult).getSizeInBits();
|
|
|
|
if (Size != 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto Libcall =
|
|
|
|
MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
|
|
|
|
|
|
|
|
// Our divmod libcalls return a struct containing the quotient and the
|
|
|
|
// remainder. We need to create a virtual register for it.
|
|
|
|
Type *ArgTy = Type::getInt32Ty(Ctx);
|
|
|
|
StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true);
|
|
|
|
auto RetVal = MRI.createGenericVirtualRegister(
|
|
|
|
getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout()));
|
|
|
|
|
2017-07-05 20:57:24 +08:00
|
|
|
auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy},
|
|
|
|
{{MI.getOperand(1).getReg(), ArgTy},
|
|
|
|
{MI.getOperand(2).getReg(), ArgTy}});
|
2017-06-15 18:53:31 +08:00
|
|
|
if (Status != LegalizerHelper::Legalized)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The remainder is the second result of divmod. Split the return value into
|
|
|
|
// a new, unused register for the quotient and the destination of the
|
|
|
|
// original instruction for the remainder.
|
|
|
|
MIRBuilder.buildUnmerge(
|
|
|
|
{MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult},
|
|
|
|
RetVal);
|
2017-07-06 17:09:33 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case G_FCMP: {
|
2017-07-11 16:50:01 +08:00
|
|
|
assert(MRI.getType(MI.getOperand(2).getReg()) ==
|
|
|
|
MRI.getType(MI.getOperand(3).getReg()) &&
|
|
|
|
"Mismatched operands for G_FCMP");
|
|
|
|
auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
|
2017-07-06 17:09:33 +08:00
|
|
|
|
|
|
|
auto OriginalResult = MI.getOperand(0).getReg();
|
|
|
|
auto Predicate =
|
|
|
|
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
|
2017-07-11 16:50:01 +08:00
|
|
|
auto Libcalls = getFCmpLibcalls(Predicate, OpSize);
|
2017-07-06 17:09:33 +08:00
|
|
|
|
|
|
|
if (Libcalls.empty()) {
|
|
|
|
assert((Predicate == CmpInst::FCMP_TRUE ||
|
|
|
|
Predicate == CmpInst::FCMP_FALSE) &&
|
|
|
|
"Predicate needs libcalls, but none specified");
|
|
|
|
MIRBuilder.buildConstant(OriginalResult,
|
|
|
|
Predicate == CmpInst::FCMP_TRUE ? 1 : 0);
|
2017-07-11 17:43:51 +08:00
|
|
|
MI.eraseFromParent();
|
2017-07-06 17:09:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-07-11 16:50:01 +08:00
|
|
|
assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
|
|
|
|
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
|
2017-07-06 17:09:33 +08:00
|
|
|
auto *RetTy = Type::getInt32Ty(Ctx);
|
|
|
|
|
|
|
|
SmallVector<unsigned, 2> Results;
|
|
|
|
for (auto Libcall : Libcalls) {
|
|
|
|
auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
|
|
|
auto Status =
|
|
|
|
createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy},
|
|
|
|
{{MI.getOperand(2).getReg(), ArgTy},
|
|
|
|
{MI.getOperand(3).getReg(), ArgTy}});
|
|
|
|
|
|
|
|
if (Status != LegalizerHelper::Legalized)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto ProcessedResult =
|
|
|
|
Libcalls.size() == 1
|
|
|
|
? OriginalResult
|
|
|
|
: MRI.createGenericVirtualRegister(MRI.getType(OriginalResult));
|
|
|
|
|
|
|
|
// We have a result, but we need to transform it into a proper 1-bit 0 or
|
|
|
|
// 1, taking into account the different peculiarities of the values
|
|
|
|
// returned by the comparison functions.
|
|
|
|
CmpInst::Predicate ResultPred = Libcall.Predicate;
|
|
|
|
if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) {
|
|
|
|
// We have a nice 0 or 1, and we just need to truncate it back to 1 bit
|
|
|
|
// to keep the types consistent.
|
|
|
|
MIRBuilder.buildTrunc(ProcessedResult, LibcallResult);
|
|
|
|
} else {
|
|
|
|
// We need to compare against 0.
|
|
|
|
assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate");
|
|
|
|
auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
|
|
|
MIRBuilder.buildConstant(Zero, 0);
|
|
|
|
MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
|
|
|
|
}
|
|
|
|
Results.push_back(ProcessedResult);
|
|
|
|
}
|
2017-06-15 18:53:31 +08:00
|
|
|
|
2017-07-06 17:09:33 +08:00
|
|
|
if (Results.size() != 1) {
|
|
|
|
assert(Results.size() == 2 && "Unexpected number of results");
|
|
|
|
MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]);
|
|
|
|
}
|
2017-07-05 20:57:24 +08:00
|
|
|
break;
|
2017-06-15 18:53:31 +08:00
|
|
|
}
|
2018-01-10 18:01:49 +08:00
|
|
|
case G_FCONSTANT: {
|
|
|
|
// Convert to integer constants, while preserving the binary representation.
|
|
|
|
auto AsInteger =
|
|
|
|
MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt();
|
|
|
|
MIRBuilder.buildConstant(MI.getOperand(0).getReg(),
|
|
|
|
*ConstantInt::get(Ctx, AsInteger));
|
|
|
|
break;
|
|
|
|
}
|
2017-04-24 17:12:19 +08:00
|
|
|
}
|
2017-07-05 20:57:24 +08:00
|
|
|
|
|
|
|
MI.eraseFromParent();
|
|
|
|
return true;
|
2017-04-24 17:12:19 +08:00
|
|
|
}
|