2018-11-21 22:37:49 +08:00
|
|
|
// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
|
|
|
|
|
2019-08-09 23:30:13 +08:00
|
|
|
// CHECK: @i32_global = internal global i32 42
|
2019-09-04 00:10:24 +08:00
|
|
|
llvm.mlir.global @i32_global(42: i32) : !llvm.i32
|
2019-08-09 23:30:13 +08:00
|
|
|
|
2019-09-04 18:45:38 +08:00
|
|
|
// CHECK: @i32_const = internal constant i53 52
|
|
|
|
llvm.mlir.global constant @i32_const(52: i53) : !llvm.i53
|
|
|
|
|
|
|
|
// CHECK: @int_global_array = internal global [3 x i32] [i32 62, i32 62, i32 62]
|
|
|
|
llvm.mlir.global @int_global_array(dense<62> : vector<3xi32>) : !llvm<"[3 x i32]">
|
2019-08-09 23:30:13 +08:00
|
|
|
|
2019-09-19 19:50:17 +08:00
|
|
|
// CHECK: @i32_global_addr_space = internal addrspace(7) global i32 62
|
|
|
|
llvm.mlir.global @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : !llvm.i32
|
|
|
|
|
2019-08-09 23:30:13 +08:00
|
|
|
// CHECK: @float_global = internal global float 0.000000e+00
|
2019-09-04 00:10:24 +08:00
|
|
|
llvm.mlir.global @float_global(0.0: f32) : !llvm.float
|
2019-08-09 23:30:13 +08:00
|
|
|
|
2019-09-04 18:45:38 +08:00
|
|
|
// CHECK: @float_global_array = internal global [1 x float] [float -5.000000e+00]
|
|
|
|
llvm.mlir.global @float_global_array(dense<[-5.0]> : vector<1xf32>) : !llvm<"[1 x float]">
|
|
|
|
|
2019-08-09 23:59:45 +08:00
|
|
|
// CHECK: @string_const = internal constant [6 x i8] c"foobar"
|
2019-09-04 00:10:24 +08:00
|
|
|
llvm.mlir.global constant @string_const("foobar") : !llvm<"[6 x i8]">
|
2019-08-09 23:59:45 +08:00
|
|
|
|
2019-09-21 16:19:43 +08:00
|
|
|
// CHECK: @int_global_undef = internal global i64 undef
|
|
|
|
llvm.mlir.global @int_global_undef() : !llvm.i64
|
|
|
|
|
2019-11-06 07:10:28 +08:00
|
|
|
// CHECK: @int_gep = internal constant i32* getelementptr (i32, i32* @i32_global, i32 2)
|
|
|
|
llvm.mlir.global constant @int_gep() : !llvm<"i32*"> {
|
|
|
|
%addr = llvm.mlir.addressof @i32_global : !llvm<"i32*">
|
|
|
|
%_c0 = llvm.mlir.constant(2: i32) :!llvm.i32
|
|
|
|
%gepinit = llvm.getelementptr %addr[%_c0] : (!llvm<"i32*">, !llvm.i32) -> !llvm<"i32*">
|
|
|
|
llvm.return %gepinit : !llvm<"i32*">
|
|
|
|
}
|
|
|
|
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
//
|
|
|
|
// Declarations of the allocation functions to be linked against.
|
|
|
|
//
|
|
|
|
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK: declare i8* @malloc(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @malloc(!llvm.i64) -> !llvm<"i8*">
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK: declare void @free(i8*)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
|
|
|
|
|
|
|
|
//
|
2018-12-29 13:24:30 +08:00
|
|
|
// Basic functionality: function and block conversion, function calls,
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// phi nodes, scalar type conversion, arithmetic operations.
|
|
|
|
//
|
|
|
|
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-LABEL: define void @empty() {
|
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
// CHECK-NEXT: }
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @empty() {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
2019-08-12 21:10:29 +08:00
|
|
|
// CHECK-LABEL: @global_refs
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @global_refs() {
|
2019-08-12 21:10:29 +08:00
|
|
|
// Check load from globals.
|
|
|
|
// CHECK: load i32, i32* @i32_global
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.addressof @i32_global : !llvm<"i32*">
|
2019-08-12 21:10:29 +08:00
|
|
|
%1 = llvm.load %0 : !llvm<"i32*">
|
|
|
|
|
|
|
|
// Check the contracted form of load from array constants.
|
|
|
|
// CHECK: load i8, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @string_const, i64 0, i64 0)
|
2019-09-04 00:10:24 +08:00
|
|
|
%2 = llvm.mlir.addressof @string_const : !llvm<"[6 x i8]*">
|
|
|
|
%c0 = llvm.mlir.constant(0 : index) : !llvm.i64
|
2019-08-12 21:10:29 +08:00
|
|
|
%3 = llvm.getelementptr %2[%c0, %c0] : (!llvm<"[6 x i8]*">, !llvm.i64, !llvm.i64) -> !llvm<"i8*">
|
|
|
|
%4 = llvm.load %3 : !llvm<"i8*">
|
|
|
|
|
|
|
|
llvm.return
|
|
|
|
}
|
|
|
|
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-LABEL: declare void @body(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @body(!llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @simple_loop() {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @simple_loop() {
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK: br label %[[SIMPLE_bb1:[0-9]+]]
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// Constants are inlined in LLVM rather than a separate instruction.
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[SIMPLE_bb1]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[SIMPLE_bb2:[0-9]+]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(1 : index) : !llvm.i64
|
|
|
|
%1 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%0 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[SIMPLE_bb2]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %[[SIMPLE_bb3:[0-9]+]] ], [ 1, %[[SIMPLE_bb1]] ]
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 42
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %[[SIMPLE_bb3]], label %[[SIMPLE_bb4:[0-9]+]]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb3
|
|
|
|
%3 = llvm.icmp "slt" %2, %1 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %3, ^bb3, ^bb4
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[SIMPLE_bb3]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @body(i64 %{{[0-9]+}})
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[SIMPLE_bb2]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb3: // pred: ^bb2
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @body(%2) : (!llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%4 = llvm.mlir.constant(1 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.add %2, %4 : !llvm.i64
|
|
|
|
llvm.br ^bb2(%5 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[SIMPLE_bb4]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb4: // pred: ^bb2
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @simple_caller() {
|
|
|
|
// CHECK-NEXT: call void @simple_loop()
|
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
// CHECK-NEXT: }
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @simple_caller() {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.call @simple_loop() : () -> ()
|
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
2019-01-03 02:20:00 +08:00
|
|
|
//func @simple_indirect_caller() {
|
2018-12-30 03:32:37 +08:00
|
|
|
//^bb0:
|
2018-11-21 22:37:49 +08:00
|
|
|
// %f = constant @simple_loop : () -> ()
|
|
|
|
// call_indirect %f() : () -> ()
|
|
|
|
// return
|
|
|
|
//}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @ml_caller() {
|
|
|
|
// CHECK-NEXT: call void @simple_loop()
|
|
|
|
// CHECK-NEXT: call void @more_imperfectly_nested_loops()
|
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
// CHECK-NEXT: }
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @ml_caller() {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.call @simple_loop() : () -> ()
|
|
|
|
llvm.call @more_imperfectly_nested_loops() : () -> ()
|
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: declare i64 @body_args(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @body_args(!llvm.i64) -> !llvm.i64
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-LABEL: declare i32 @other(i64, i32)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @other(!llvm.i64, !llvm.i32) -> !llvm.i32
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define i32 @func_args(i32 {{%.*}}, i32 {{%.*}}) {
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[ARGS_bb1:[0-9]+]]
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @func_args(%arg0: !llvm.i32, %arg1: !llvm.i32) -> !llvm.i32 {
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(0 : i32) : !llvm.i32
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[ARGS_bb1]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[ARGS_bb2:[0-9]+]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%1 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%2 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%1 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[ARGS_bb2]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: %5 = phi i64 [ %12, %[[ARGS_bb3:[0-9]+]] ], [ 0, %[[ARGS_bb1]] ]
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %6 = icmp slt i64 %5, 42
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br i1 %6, label %[[ARGS_bb3]], label %[[ARGS_bb4:[0-9]+]]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb2(%3: !llvm.i64): // 2 preds: ^bb1, ^bb3
|
|
|
|
%4 = llvm.icmp "slt" %3, %2 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %4, ^bb3, ^bb4
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[ARGS_bb3]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %8 = call i64 @body_args(i64 %5)
|
|
|
|
// CHECK-NEXT: %9 = call i32 @other(i64 %8, i32 %0)
|
|
|
|
// CHECK-NEXT: %10 = call i32 @other(i64 %8, i32 %9)
|
|
|
|
// CHECK-NEXT: %11 = call i32 @other(i64 %8, i32 %1)
|
|
|
|
// CHECK-NEXT: %12 = add i64 %5, 1
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[ARGS_bb2]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb3: // pred: ^bb2
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.call @body_args(%3) : (!llvm.i64) -> !llvm.i64
|
|
|
|
%6 = llvm.call @other(%5, %arg0) : (!llvm.i64, !llvm.i32) -> !llvm.i32
|
|
|
|
%7 = llvm.call @other(%5, %6) : (!llvm.i64, !llvm.i32) -> !llvm.i32
|
|
|
|
%8 = llvm.call @other(%5, %arg1) : (!llvm.i64, !llvm.i32) -> !llvm.i32
|
2019-09-04 00:10:24 +08:00
|
|
|
%9 = llvm.mlir.constant(1 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%10 = llvm.add %3, %9 : !llvm.i64
|
|
|
|
llvm.br ^bb2(%10 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[ARGS_bb4]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %14 = call i32 @other(i64 0, i32 0)
|
|
|
|
// CHECK-NEXT: ret i32 %14
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb4: // pred: ^bb2
|
2019-09-04 00:10:24 +08:00
|
|
|
%11 = llvm.mlir.constant(0 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%12 = llvm.call @other(%11, %0) : (!llvm.i64, !llvm.i32) -> !llvm.i32
|
|
|
|
llvm.return %12 : !llvm.i32
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: declare void @pre(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @pre(!llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// CHECK: declare void @body2(i64, i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @body2(!llvm.i64, !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// CHECK: declare void @post(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @post(!llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: define void @imperfectly_nested_loops() {
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb1:[0-9]+]]
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @imperfectly_nested_loops() {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb1]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb2:[0-9]+]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%1 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%0 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb2]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: %3 = phi i64 [ %13, %[[IMPER_bb7:[0-9]+]] ], [ 0, %[[IMPER_bb1]] ]
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %4 = icmp slt i64 %3, 42
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br i1 %4, label %[[IMPER_bb3:[0-9]+]], label %[[IMPER_bb8:[0-9]+]]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb7
|
|
|
|
%3 = llvm.icmp "slt" %2, %1 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %3, ^bb3, ^bb8
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb3]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @pre(i64 %3)
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb4:[0-9]+]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb3: // pred: ^bb2
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @pre(%2) : (!llvm.i64) -> ()
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb4
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb4]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb5:[0-9]+]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb4: // pred: ^bb3
|
2019-09-04 00:10:24 +08:00
|
|
|
%4 = llvm.mlir.constant(7 : index) : !llvm.i64
|
|
|
|
%5 = llvm.mlir.constant(56 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb5(%4 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb5]]:
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: %8 = phi i64 [ %11, %[[IMPER_bb6:[0-9]+]] ], [ 7, %[[IMPER_bb4]] ]
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %9 = icmp slt i64 %8, 56
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br i1 %9, label %[[IMPER_bb6]], label %[[IMPER_bb7]]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6
|
|
|
|
%7 = llvm.icmp "slt" %6, %5 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %7, ^bb6, ^bb7
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb6]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @body2(i64 %3, i64 %8)
|
|
|
|
// CHECK-NEXT: %11 = add i64 %8, 2
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb5]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb6: // pred: ^bb5
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.constant(2 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%9 = llvm.add %6, %8 : !llvm.i64
|
|
|
|
llvm.br ^bb5(%9 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb7]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @post(i64 %3)
|
|
|
|
// CHECK-NEXT: %13 = add i64 %3, 1
|
2018-12-30 03:32:37 +08:00
|
|
|
// CHECK-NEXT: br label %[[IMPER_bb2]]
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb7: // pred: ^bb5
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @post(%2) : (!llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%10 = llvm.mlir.constant(1 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%11 = llvm.add %2, %10 : !llvm.i64
|
|
|
|
llvm.br ^bb2(%11 : !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: [[IMPER_bb8]]:
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb8: // pred: ^bb2
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK: declare void @mid(i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @mid(!llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// CHECK: declare void @body3(i64, i64)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @body3(!llvm.i64, !llvm.i64)
|
2018-11-21 22:37:49 +08:00
|
|
|
|
|
|
|
// A complete function transformation check.
|
|
|
|
// CHECK-LABEL: define void @more_imperfectly_nested_loops() {
|
|
|
|
// CHECK-NEXT: br label %1
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 1: ; preds = %0
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: br label %2
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 2: ; preds = %19, %1
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %3 = phi i64 [ %20, %19 ], [ 0, %1 ]
|
|
|
|
// CHECK-NEXT: %4 = icmp slt i64 %3, 42
|
|
|
|
// CHECK-NEXT: br i1 %4, label %5, label %21
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 5: ; preds = %2
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @pre(i64 %3)
|
|
|
|
// CHECK-NEXT: br label %6
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 6: ; preds = %5
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: br label %7
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 7: ; preds = %10, %6
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %8 = phi i64 [ %11, %10 ], [ 7, %6 ]
|
|
|
|
// CHECK-NEXT: %9 = icmp slt i64 %8, 56
|
|
|
|
// CHECK-NEXT: br i1 %9, label %10, label %12
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 10: ; preds = %7
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @body2(i64 %3, i64 %8)
|
|
|
|
// CHECK-NEXT: %11 = add i64 %8, 2
|
|
|
|
// CHECK-NEXT: br label %7
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 12: ; preds = %7
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @mid(i64 %3)
|
|
|
|
// CHECK-NEXT: br label %13
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 13: ; preds = %12
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: br label %14
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 14: ; preds = %17, %13
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: %15 = phi i64 [ %18, %17 ], [ 18, %13 ]
|
|
|
|
// CHECK-NEXT: %16 = icmp slt i64 %15, 37
|
|
|
|
// CHECK-NEXT: br i1 %16, label %17, label %19
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 17: ; preds = %14
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @body3(i64 %3, i64 %15)
|
|
|
|
// CHECK-NEXT: %18 = add i64 %15, 3
|
|
|
|
// CHECK-NEXT: br label %14
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 19: ; preds = %14
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: call void @post(i64 %3)
|
|
|
|
// CHECK-NEXT: %20 = add i64 %3, 1
|
|
|
|
// CHECK-NEXT: br label %2
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 21: ; preds = %2
|
2018-11-21 22:37:49 +08:00
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
// CHECK-NEXT: }
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @more_imperfectly_nested_loops() {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%1 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%0 : !llvm.i64)
|
|
|
|
^bb2(%2: !llvm.i64): // 2 preds: ^bb1, ^bb11
|
|
|
|
%3 = llvm.icmp "slt" %2, %1 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %3, ^bb3, ^bb12
|
|
|
|
^bb3: // pred: ^bb2
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @pre(%2) : (!llvm.i64) -> ()
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb4
|
|
|
|
^bb4: // pred: ^bb3
|
2019-09-04 00:10:24 +08:00
|
|
|
%4 = llvm.mlir.constant(7 : index) : !llvm.i64
|
|
|
|
%5 = llvm.mlir.constant(56 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb5(%4 : !llvm.i64)
|
|
|
|
^bb5(%6: !llvm.i64): // 2 preds: ^bb4, ^bb6
|
|
|
|
%7 = llvm.icmp "slt" %6, %5 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %7, ^bb6, ^bb7
|
|
|
|
^bb6: // pred: ^bb5
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @body2(%2, %6) : (!llvm.i64, !llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.constant(2 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%9 = llvm.add %6, %8 : !llvm.i64
|
|
|
|
llvm.br ^bb5(%9 : !llvm.i64)
|
2019-04-03 06:33:54 +08:00
|
|
|
^bb7: // pred: ^bb5
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @mid(%2) : (!llvm.i64) -> ()
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb8
|
|
|
|
^bb8: // pred: ^bb7
|
2019-09-04 00:10:24 +08:00
|
|
|
%10 = llvm.mlir.constant(18 : index) : !llvm.i64
|
|
|
|
%11 = llvm.mlir.constant(37 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb9(%10 : !llvm.i64)
|
|
|
|
^bb9(%12: !llvm.i64): // 2 preds: ^bb8, ^bb10
|
|
|
|
%13 = llvm.icmp "slt" %12, %11 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %13, ^bb10, ^bb11
|
|
|
|
^bb10: // pred: ^bb9
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @body3(%2, %12) : (!llvm.i64, !llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%14 = llvm.mlir.constant(3 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%15 = llvm.add %12, %14 : !llvm.i64
|
|
|
|
llvm.br ^bb9(%15 : !llvm.i64)
|
2019-04-03 06:33:54 +08:00
|
|
|
^bb11: // pred: ^bb9
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call @post(%2) : (!llvm.i64) -> ()
|
2019-09-04 00:10:24 +08:00
|
|
|
%16 = llvm.mlir.constant(1 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%17 = llvm.add %2, %16 : !llvm.i64
|
|
|
|
llvm.br ^bb2(%17 : !llvm.i64)
|
2019-04-03 06:33:54 +08:00
|
|
|
^bb12: // pred: ^bb2
|
|
|
|
llvm.return
|
2018-11-21 22:37:49 +08:00
|
|
|
}
|
|
|
|
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
//
|
|
|
|
// MemRef type conversion, allocation and communication with functions.
|
|
|
|
//
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @memref_alloc()
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @memref_alloc() {
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(10 : index) : !llvm.i64
|
|
|
|
%1 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%2 = llvm.mul %0, %1 : !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%3 = llvm.mlir.undef : !llvm<"{ float* }">
|
|
|
|
%4 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.mul %2, %4 : !llvm.i64
|
|
|
|
%6 = llvm.call @malloc(%5) : (!llvm.i64) -> !llvm<"i8*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%7 = llvm.bitcast %6 : !llvm<"i8*"> to !llvm<"float*">
|
|
|
|
%8 = llvm.insertvalue %7, %3[0] : !llvm<"{ float* }">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: declare i64 @get_index()
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @get_index() -> !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: define void @store_load_static()
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @store_load_static() {
|
2018-12-30 03:32:37 +08:00
|
|
|
^bb0:
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(10 : index) : !llvm.i64
|
|
|
|
%1 = llvm.mlir.undef : !llvm<"{ float* }">
|
|
|
|
%2 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%3 = llvm.mul %0, %2 : !llvm.i64
|
|
|
|
%4 = llvm.call @malloc(%3) : (!llvm.i64) -> !llvm<"i8*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%5 = llvm.bitcast %4 : !llvm<"i8*"> to !llvm<"float*">
|
|
|
|
%6 = llvm.insertvalue %5, %1[0] : !llvm<"{ float* }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%9 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%8 : !llvm.i64)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb2(%10: !llvm.i64): // 2 preds: ^bb1, ^bb3
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10
|
2019-04-06 14:56:49 +08:00
|
|
|
%11 = llvm.icmp "slt" %10, %9 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %11, ^bb3, ^bb4
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb3: // pred: ^bb2
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%12 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%13 = llvm.extractvalue %6[0] : !llvm<"{ float* }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%14 = llvm.getelementptr %13[%10] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %7, %14 : !llvm<"float*">
|
2019-09-04 00:10:24 +08:00
|
|
|
%15 = llvm.mlir.constant(1 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
2019-04-06 14:56:49 +08:00
|
|
|
%16 = llvm.add %10, %15 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br label %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%16 : !llvm.i64)
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb4: // pred: ^bb2
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb5
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb5: // pred: ^bb4
|
2019-09-04 00:10:24 +08:00
|
|
|
%17 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%18 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb6(%17 : !llvm.i64)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb6(%19: !llvm.i64): // 2 preds: ^bb5, ^bb7
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, 10
|
2019-04-06 14:56:49 +08:00
|
|
|
%20 = llvm.icmp "slt" %19, %18 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %20, ^bb7, ^bb8
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb7: // pred: ^bb6
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%21 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%22 = llvm.extractvalue %6[0] : !llvm<"{ float* }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%23 = llvm.getelementptr %22[%19] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%24 = llvm.load %23 : !llvm<"float*">
|
2019-09-04 00:10:24 +08:00
|
|
|
%25 = llvm.mlir.constant(1 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
2019-04-06 14:56:49 +08:00
|
|
|
%26 = llvm.add %19, %25 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br label %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb6(%26 : !llvm.i64)
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb8: // pred: ^bb6
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
}
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}})
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @store_load_dynamic(%arg0: !llvm.i64) {
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.undef : !llvm<"{ float*, i64 }">
|
|
|
|
%1 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%2 = llvm.mul %arg0, %1 : !llvm.i64
|
|
|
|
%3 = llvm.call @malloc(%2) : (!llvm.i64) -> !llvm<"i8*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%4 = llvm.bitcast %3 : !llvm<"i8*"> to !llvm<"float*">
|
|
|
|
%5 = llvm.insertvalue %4, %0[0] : !llvm<"{ float*, i64 }">
|
|
|
|
%6 = llvm.insertvalue %arg0, %5[1] : !llvm<"{ float*, i64 }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br label %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb1
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb1: // pred: ^bb0
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.constant(0 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%8 : !llvm.i64)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb2(%9: !llvm.i64): // 2 preds: ^bb1, ^bb3
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
%10 = llvm.icmp "slt" %9, %arg0 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %10, ^bb3, ^bb4
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb3: // pred: ^bb2
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
%11 = llvm.extractvalue %6[1] : !llvm<"{ float*, i64 }">
|
|
|
|
%12 = llvm.extractvalue %6[0] : !llvm<"{ float*, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%13 = llvm.getelementptr %12[%9] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %7, %13 : !llvm<"float*">
|
2019-09-04 00:10:24 +08:00
|
|
|
%14 = llvm.mlir.constant(1 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
2019-04-06 14:56:49 +08:00
|
|
|
%15 = llvm.add %9, %14 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br label %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb2(%15 : !llvm.i64)
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb4: // pred: ^bb3
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.br ^bb5
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb5: // pred: ^bb4
|
2019-09-04 00:10:24 +08:00
|
|
|
%16 = llvm.mlir.constant(0 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb6(%16 : !llvm.i64)
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = phi i64 [ %{{[0-9]+}}, %{{[0-9]+}} ], [ 0, %{{[0-9]+}} ]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb6(%17: !llvm.i64): // 2 preds: ^bb5, ^bb7
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = icmp slt i64 %{{[0-9]+}}, %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
%18 = llvm.icmp "slt" %17, %arg0 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.cond_br %18, ^bb7, ^bb8
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb7: // pred: ^bb6
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
%19 = llvm.extractvalue %6[1] : !llvm<"{ float*, i64 }">
|
|
|
|
%20 = llvm.extractvalue %6[0] : !llvm<"{ float*, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%21 = llvm.getelementptr %20[%17] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%22 = llvm.load %21 : !llvm<"float*">
|
2019-09-04 00:10:24 +08:00
|
|
|
%23 = llvm.mlir.constant(1 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
2019-04-06 14:56:49 +08:00
|
|
|
%24 = llvm.add %17, %23 : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: br label %{{[0-9]+}}
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb6(%24 : !llvm.i64)
|
2019-02-14 07:30:24 +08:00
|
|
|
^bb8: // pred: ^bb6
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
}
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define void @store_load_mixed(i64 {{%.*}})
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @store_load_mixed(%arg0: !llvm.i64) {
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(10 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 2, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 10
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } undef, float* %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 10, 2
|
2019-09-04 00:10:24 +08:00
|
|
|
%1 = llvm.mlir.constant(2 : index) : !llvm.i64
|
|
|
|
%2 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%3 = llvm.mul %1, %arg0 : !llvm.i64
|
|
|
|
%4 = llvm.mul %3, %2 : !llvm.i64
|
|
|
|
%5 = llvm.mul %4, %0 : !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%6 = llvm.mlir.undef : !llvm<"{ float*, i64, i64 }">
|
|
|
|
%7 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%8 = llvm.mul %5, %7 : !llvm.i64
|
|
|
|
%9 = llvm.call @malloc(%8) : (!llvm.i64) -> !llvm<"i8*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%10 = llvm.bitcast %9 : !llvm<"i8*"> to !llvm<"float*">
|
|
|
|
%11 = llvm.insertvalue %10, %6[0] : !llvm<"{ float*, i64, i64 }">
|
|
|
|
%12 = llvm.insertvalue %arg0, %11[1] : !llvm<"{ float*, i64, i64 }">
|
|
|
|
%13 = llvm.insertvalue %0, %12[2] : !llvm<"{ float*, i64, i64 }">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
|
2019-09-04 00:10:24 +08:00
|
|
|
%14 = llvm.mlir.constant(1 : index) : !llvm.i64
|
|
|
|
%15 = llvm.mlir.constant(2 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%16 = llvm.call @get_index() : () -> !llvm.i64
|
|
|
|
%17 = llvm.call @get_index() : () -> !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%18 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
|
|
|
|
%19 = llvm.mlir.constant(2 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 1, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
%20 = llvm.extractvalue %13[1] : !llvm<"{ float*, i64, i64 }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%21 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%22 = llvm.extractvalue %13[2] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%23 = llvm.mul %14, %20 : !llvm.i64
|
|
|
|
%24 = llvm.add %23, %15 : !llvm.i64
|
|
|
|
%25 = llvm.mul %24, %21 : !llvm.i64
|
|
|
|
%26 = llvm.add %25, %16 : !llvm.i64
|
|
|
|
%27 = llvm.mul %26, %22 : !llvm.i64
|
|
|
|
%28 = llvm.add %27, %17 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%29 = llvm.extractvalue %13[0] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%30 = llvm.getelementptr %29[%28] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %18, %30 : !llvm<"float*">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%31 = llvm.mlir.constant(2 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%32 = llvm.extractvalue %13[1] : !llvm<"{ float*, i64, i64 }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%33 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%34 = llvm.extractvalue %13[2] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%35 = llvm.mul %17, %32 : !llvm.i64
|
|
|
|
%36 = llvm.add %35, %16 : !llvm.i64
|
|
|
|
%37 = llvm.mul %36, %33 : !llvm.i64
|
|
|
|
%38 = llvm.add %37, %15 : !llvm.i64
|
|
|
|
%39 = llvm.mul %38, %34 : !llvm.i64
|
|
|
|
%40 = llvm.add %39, %14 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%41 = llvm.extractvalue %13[0] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%42 = llvm.getelementptr %41[%40] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%43 = llvm.load %42 : !llvm<"float*">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
}
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @memref_args_rets(%arg0: !llvm<"{ float* }">, %arg1: !llvm<"{ float*, i64 }">, %arg2: !llvm<"{ float*, i64 }">) -> !llvm<"{ float*, i64 }"> {
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(7 : index) : !llvm.i64
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
|
2019-04-06 14:56:49 +08:00
|
|
|
%1 = llvm.call @get_index() : () -> !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%2 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
|
|
|
|
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%3 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%4 = llvm.extractvalue %arg0[0] : !llvm<"{ float* }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.getelementptr %4[%0] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %2, %5 : !llvm<"float*">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
|
|
|
|
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
%6 = llvm.extractvalue %arg1[1] : !llvm<"{ float*, i64 }">
|
|
|
|
%7 = llvm.extractvalue %arg1[0] : !llvm<"{ float*, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%8 = llvm.getelementptr %7[%0] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %2, %8 : !llvm<"float*">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%9 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%10 = llvm.extractvalue %arg2[1] : !llvm<"{ float*, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%11 = llvm.mul %0, %10 : !llvm.i64
|
|
|
|
%12 = llvm.add %11, %1 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%13 = llvm.extractvalue %arg2[0] : !llvm<"{ float*, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%14 = llvm.getelementptr %13[%12] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.store %2, %14 : !llvm<"float*">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
|
2019-02-14 07:30:24 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
|
2019-09-04 00:10:24 +08:00
|
|
|
%15 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%16 = llvm.mul %15, %1 : !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%17 = llvm.mlir.undef : !llvm<"{ float*, i64 }">
|
|
|
|
%18 = llvm.mlir.constant(4 : index) : !llvm.i64
|
2019-04-06 14:56:49 +08:00
|
|
|
%19 = llvm.mul %16, %18 : !llvm.i64
|
|
|
|
%20 = llvm.call @malloc(%19) : (!llvm.i64) -> !llvm<"i8*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%21 = llvm.bitcast %20 : !llvm<"i8*"> to !llvm<"float*">
|
|
|
|
%22 = llvm.insertvalue %21, %17[0] : !llvm<"{ float*, i64 }">
|
|
|
|
%23 = llvm.insertvalue %1, %22[1] : !llvm<"{ float*, i64 }">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
// CHECK-NEXT: ret { float*, i64 } %{{[0-9]+}}
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return %23 : !llvm<"{ float*, i64 }">
|
LLVM IR lowering: support simple MemRef types
Introduce initial support for MemRef types, including type conversion,
allocation and deallocation, read and write element-wise access, passing
MemRefs to and returning from functions. Affine map compositions and
non-default memory spaces are NOT YET supported.
Lowered code needs to handle potentially dynamic sizes of the MemRef. To do
so, it replaces a MemRef-typed value with a special MemRef descriptor that
carries the data and the dynamic sizes together. A MemRef type is converted to
LLVM's first-class structure type with the first element being the pointer to
the data buffer with data layed out linearly, followed by as many integer-typed
elements as MemRef has dynamic sizes. The type of these elements is that of
MLIR index lowered to LLVM. For example, `memref<?x42x?xf32>` is converted to
`{ f32*, i64, i64 }` provided `index` is lowered to `i64`. While it is
possible to convert MemRefs with fully static sizes to simple pointers to their
elemental types, we opted for consistency and convert them to the
single-element structure. This makes the conversion code simpler and the
calling convention of the generated LLVM IR functions consistent.
Loads from and stores to a MemRef element are lowered to a sequence of LLVM
instructions that, first, computes the linearized index of the element in the
data buffer using the access indices and combining the static sizes with the
dynamic sizes stored in the descriptor, and then loads from or stores to the
buffer element indexed by the linearized subscript. While some of the index
computations may be redundant (i.e., consecutive load and store to the same
location in the same scope could reuse the linearized index), we emit them for
every operation. A subsequent optimization pass may eliminate them if
necessary.
MemRef allocation and deallocation is performed using external functions
`__mlir_alloc(index) -> i8*` and `__mlir_free(i8*)` that must be implemented by
the caller. These functions behave similarly to `malloc` and `free`, but can
be extended to support different memory spaces in future. Allocation and
deallocation instructions take care of casting the pointers. Prior to calling
the allocation function, the emitted code creates an SSA Value for the
descriptor and uses it to store the dynamic sizes of the MemRef passed to the
allocation operation. It further emits instructions that compute the dynamic
amount of memory to allocate in bytes. Finally, the allocation stores the
result of calling the `__mlir_alloc` in the MemRef descriptor. Deallocation
extracts the pointer to the allocated memory from the descriptor and calls
`__mlir_free` on it. The descriptor itself is not modified and, being
stack-allocated, ceases to exist when it goes out of scope.
MLIR functions that access MemRef values as arguments or return them are
converted to LLVM IR functions that accept MemRef descriptors as LLVM IR
structure types by value. This significantly simplifies the calling convention
at the LLVM IR level and avoids handling descriptors in the dynamic memory,
however is not always comaptible with LLVM IR functions emitted from C code
with similar signatures. A separate LLVM pass may be introduced in the future
to provide C-compatible calling conventions for LLVM IR functions generated
from MLIR.
PiperOrigin-RevId: 223134883
2018-11-28 18:32:10 +08:00
|
|
|
}
|
|
|
|
|
2018-11-28 23:07:56 +08:00
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @memref_dim(%arg0: !llvm<"{ float*, i64, i64 }">) -> !llvm.i64 {
|
2018-11-28 23:07:56 +08:00
|
|
|
// Expecting this to create an LLVM constant.
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
// CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
|
2019-04-03 06:33:54 +08:00
|
|
|
%1 = llvm.extractvalue %arg0[1] : !llvm<"{ float*, i64, i64 }">
|
2018-11-28 23:07:56 +08:00
|
|
|
// Expecting this to create an LLVM constant.
|
2019-09-04 00:10:24 +08:00
|
|
|
%2 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
// CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2
|
2019-04-03 06:33:54 +08:00
|
|
|
%3 = llvm.extractvalue %arg0[2] : !llvm<"{ float*, i64, i64 }">
|
2018-11-28 23:07:56 +08:00
|
|
|
// Checking that the constant for d0 has been created.
|
|
|
|
// CHECK-NEXT: %4 = add i64 42, %2
|
2019-04-06 14:56:49 +08:00
|
|
|
%4 = llvm.add %0, %1 : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
// Checking that the constant for d2 has been created.
|
|
|
|
// CHECK-NEXT: %5 = add i64 10, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.add %2, %3 : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
// CHECK-NEXT: %6 = add i64 %4, %5
|
2019-04-06 14:56:49 +08:00
|
|
|
%6 = llvm.add %4, %5 : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
// CHECK-NEXT: ret i64 %6
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.return %6 : !llvm.i64
|
2018-11-28 23:07:56 +08:00
|
|
|
}
|
2018-12-04 22:16:26 +08:00
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @get_i64() -> !llvm.i64
|
|
|
|
llvm.func @get_f32() -> !llvm.float
|
|
|
|
llvm.func @get_memref() -> !llvm<"{ float*, i64, i64 }">
|
2018-12-04 22:16:26 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn() {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @multireturn() -> !llvm<"{ i64, float, { float*, i64, i64 } }"> {
|
2019-04-06 14:56:49 +08:00
|
|
|
%0 = llvm.call @get_i64() : () -> !llvm.i64
|
|
|
|
%1 = llvm.call @get_f32() : () -> !llvm.float
|
2019-04-03 06:33:54 +08:00
|
|
|
%2 = llvm.call @get_memref() : () -> !llvm<"{ float*, i64, i64 }">
|
2018-12-04 22:16:26 +08:00
|
|
|
// CHECK: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
|
|
|
|
// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, { float*, i64, i64 } %{{[0-9]+}}, 2
|
|
|
|
// CHECK-NEXT: ret { i64, float, { float*, i64, i64 } } %{{[0-9]+}}
|
2019-09-04 00:10:24 +08:00
|
|
|
%3 = llvm.mlir.undef : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
2019-04-03 06:33:54 +08:00
|
|
|
%4 = llvm.insertvalue %0, %3[0] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
%5 = llvm.insertvalue %1, %4[1] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
%6 = llvm.insertvalue %2, %5[2] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
llvm.return %6 : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
2018-12-04 22:16:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @multireturn_caller() {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @multireturn_caller() {
|
2018-12-04 22:16:26 +08:00
|
|
|
// CHECK-NEXT: %1 = call { i64, float, { float*, i64, i64 } } @multireturn()
|
|
|
|
// CHECK-NEXT: [[ret0:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 0
|
|
|
|
// CHECK-NEXT: [[ret1:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 1
|
|
|
|
// CHECK-NEXT: [[ret2:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 2
|
2019-04-03 06:33:54 +08:00
|
|
|
%0 = llvm.call @multireturn() : () -> !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
%1 = llvm.extractvalue %0[0] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
%2 = llvm.extractvalue %0[1] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
|
|
|
%3 = llvm.extractvalue %0[2] : !llvm<"{ i64, float, { float*, i64, i64 } }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%4 = llvm.mlir.constant(42) : !llvm.i64
|
2018-12-04 22:16:26 +08:00
|
|
|
// CHECK: add i64 [[ret0]], 42
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.add %1, %4 : !llvm.i64
|
2019-09-04 00:10:24 +08:00
|
|
|
%6 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
|
2018-12-04 22:16:26 +08:00
|
|
|
// CHECK: fadd float [[ret1]], 4.200000e+01
|
2019-04-06 14:56:49 +08:00
|
|
|
%7 = llvm.fadd %2, %6 : !llvm.float
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.constant(0 : index) : !llvm.i64
|
|
|
|
%9 = llvm.mlir.constant(42 : index) : !llvm.i64
|
2018-12-04 22:16:26 +08:00
|
|
|
// CHECK: extractvalue { float*, i64, i64 } [[ret2]], 0
|
2019-04-03 06:33:54 +08:00
|
|
|
%10 = llvm.extractvalue %3[1] : !llvm<"{ float*, i64, i64 }">
|
2019-09-04 00:10:24 +08:00
|
|
|
%11 = llvm.mlir.constant(10 : index) : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%12 = llvm.extractvalue %3[2] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%13 = llvm.mul %8, %10 : !llvm.i64
|
|
|
|
%14 = llvm.add %13, %8 : !llvm.i64
|
|
|
|
%15 = llvm.mul %14, %11 : !llvm.i64
|
|
|
|
%16 = llvm.add %15, %8 : !llvm.i64
|
|
|
|
%17 = llvm.mul %16, %12 : !llvm.i64
|
|
|
|
%18 = llvm.add %17, %8 : !llvm.i64
|
2019-04-03 06:33:54 +08:00
|
|
|
%19 = llvm.extractvalue %3[0] : !llvm<"{ float*, i64, i64 }">
|
2019-04-06 14:56:49 +08:00
|
|
|
%20 = llvm.getelementptr %19[%18] : (!llvm<"float*">, !llvm.i64) -> !llvm<"float*">
|
2019-04-03 06:33:54 +08:00
|
|
|
%21 = llvm.load %20 : !llvm<"float*">
|
|
|
|
llvm.return
|
2018-12-04 22:16:26 +08:00
|
|
|
}
|
2018-12-12 22:11:33 +08:00
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define <4 x float> @vector_ops(<4 x float> {{%.*}}, <4 x i1> {{%.*}}, <4 x i64> {{%.*}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @vector_ops(%arg0: !llvm<"<4 x float>">, %arg1: !llvm<"<4 x i1>">, %arg2: !llvm<"<4 x i64>">) -> !llvm<"<4 x float>"> {
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm<"<4 x float>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %4 = fadd <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
|
2019-04-03 06:33:54 +08:00
|
|
|
%1 = llvm.fadd %arg0, %0 : !llvm<"<4 x float>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %5 = select <4 x i1> %1, <4 x float> %4, <4 x float> %0
|
2019-04-03 06:33:54 +08:00
|
|
|
%2 = llvm.select %arg1, %1, %arg0 : !llvm<"<4 x i1>">, !llvm<"<4 x float>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %6 = sdiv <4 x i64> %2, %2
|
2019-04-03 06:33:54 +08:00
|
|
|
%3 = llvm.sdiv %arg2, %arg2 : !llvm<"<4 x i64>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %7 = udiv <4 x i64> %2, %2
|
2019-04-03 06:33:54 +08:00
|
|
|
%4 = llvm.udiv %arg2, %arg2 : !llvm<"<4 x i64>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %8 = srem <4 x i64> %2, %2
|
2019-04-03 06:33:54 +08:00
|
|
|
%5 = llvm.srem %arg2, %arg2 : !llvm<"<4 x i64>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %9 = urem <4 x i64> %2, %2
|
2019-04-03 06:33:54 +08:00
|
|
|
%6 = llvm.urem %arg2, %arg2 : !llvm<"<4 x i64>">
|
2019-02-21 22:30:53 +08:00
|
|
|
// CHECK-NEXT: %10 = fdiv <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
|
2019-04-03 06:33:54 +08:00
|
|
|
%7 = llvm.fdiv %arg0, %0 : !llvm<"<4 x float>">
|
2019-02-21 22:30:53 +08:00
|
|
|
// CHECK-NEXT: %11 = frem <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
|
2019-04-03 06:33:54 +08:00
|
|
|
%8 = llvm.frem %arg0, %0 : !llvm<"<4 x float>">
|
2019-04-10 17:27:30 +08:00
|
|
|
// CHECK-NEXT: %12 = and <4 x i64> %2, %2
|
|
|
|
%9 = llvm.and %arg2, %arg2 : !llvm<"<4 x i64>">
|
|
|
|
// CHECK-NEXT: %13 = or <4 x i64> %2, %2
|
|
|
|
%10 = llvm.or %arg2, %arg2 : !llvm<"<4 x i64>">
|
|
|
|
// CHECK-NEXT: %14 = xor <4 x i64> %2, %2
|
|
|
|
%11 = llvm.xor %arg2, %arg2 : !llvm<"<4 x i64>">
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: ret <4 x float> %4
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return %1 : !llvm<"<4 x float>">
|
2018-12-12 22:11:33 +08:00
|
|
|
}
|
2018-12-27 04:09:06 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: @ops
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.i32, %arg3: !llvm.i32) -> !llvm<"{ float, i32 }"> {
|
2018-12-27 04:09:06 +08:00
|
|
|
// CHECK-NEXT: fsub float %0, %1
|
2019-04-06 14:56:49 +08:00
|
|
|
%0 = llvm.fsub %arg0, %arg1 : !llvm.float
|
2019-01-03 00:52:19 +08:00
|
|
|
// CHECK-NEXT: %6 = sub i32 %2, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%1 = llvm.sub %arg2, %arg3 : !llvm.i32
|
2019-01-03 00:52:19 +08:00
|
|
|
// CHECK-NEXT: %7 = icmp slt i32 %2, %6
|
2019-04-06 14:56:49 +08:00
|
|
|
%2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %8 = select i1 %7, i32 %2, i32 %6
|
2019-04-06 14:56:49 +08:00
|
|
|
%3 = llvm.select %2, %arg2, %1 : !llvm.i1, !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %9 = sdiv i32 %2, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%4 = llvm.sdiv %arg2, %arg3 : !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %10 = udiv i32 %2, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%5 = llvm.udiv %arg2, %arg3 : !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %11 = srem i32 %2, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%6 = llvm.srem %arg2, %arg3 : !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
// CHECK-NEXT: %12 = urem i32 %2, %3
|
2019-04-06 14:56:49 +08:00
|
|
|
%7 = llvm.urem %arg2, %arg3 : !llvm.i32
|
2019-01-07 06:09:00 +08:00
|
|
|
|
2019-09-04 00:10:24 +08:00
|
|
|
%8 = llvm.mlir.undef : !llvm<"{ float, i32 }">
|
2019-04-03 06:33:54 +08:00
|
|
|
%9 = llvm.insertvalue %0, %8[0] : !llvm<"{ float, i32 }">
|
|
|
|
%10 = llvm.insertvalue %3, %9[1] : !llvm<"{ float, i32 }">
|
2019-02-21 22:30:53 +08:00
|
|
|
|
|
|
|
// CHECK: %15 = fdiv float %0, %1
|
2019-04-06 14:56:49 +08:00
|
|
|
%11 = llvm.fdiv %arg0, %arg1 : !llvm.float
|
2019-02-21 22:30:53 +08:00
|
|
|
// CHECK-NEXT: %16 = frem float %0, %1
|
2019-04-06 14:56:49 +08:00
|
|
|
%12 = llvm.frem %arg0, %arg1 : !llvm.float
|
2019-02-21 22:30:53 +08:00
|
|
|
|
2019-04-10 17:27:30 +08:00
|
|
|
// CHECK-NEXT: %17 = and i32 %2, %3
|
|
|
|
%13 = llvm.and %arg2, %arg3 : !llvm.i32
|
|
|
|
// CHECK-NEXT: %18 = or i32 %2, %3
|
|
|
|
%14 = llvm.or %arg2, %arg3 : !llvm.i32
|
|
|
|
// CHECK-NEXT: %19 = xor i32 %2, %3
|
|
|
|
%15 = llvm.xor %arg2, %arg3 : !llvm.i32
|
2019-10-01 13:55:53 +08:00
|
|
|
// CHECK-NEXT: %20 = shl i32 %2, %3
|
|
|
|
%16 = llvm.shl %arg2, %arg3 : !llvm.i32
|
|
|
|
// CHECK-NEXT: %21 = lshr i32 %2, %3
|
|
|
|
%17 = llvm.lshr %arg2, %arg3 : !llvm.i32
|
|
|
|
// CHECK-NEXT: %22 = ashr i32 %2, %3
|
|
|
|
%18 = llvm.ashr %arg2, %arg3 : !llvm.i32
|
2019-04-10 17:27:30 +08:00
|
|
|
|
2019-11-06 15:39:25 +08:00
|
|
|
// CHECK-NEXT: fneg float %0
|
|
|
|
%19 = llvm.fneg %arg0 : !llvm.float
|
|
|
|
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return %10 : !llvm<"{ float, i32 }">
|
2018-12-27 04:09:06 +08:00
|
|
|
}
|
2019-02-15 22:25:30 +08:00
|
|
|
|
|
|
|
//
|
|
|
|
// Indirect function calls
|
|
|
|
//
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define void @indirect_const_call(i64 {{%.*}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @indirect_const_call(%arg0: !llvm.i64) {
|
2019-02-15 22:25:30 +08:00
|
|
|
// CHECK-NEXT: call void @body(i64 %0)
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(@body) : !llvm<"void (i64)*">
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.call %0(%arg0) : (!llvm.i64) -> ()
|
2019-02-15 22:25:30 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2019-02-15 22:25:30 +08:00
|
|
|
}
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @indirect_call(%arg0: !llvm<"i32 (float)*">, %arg1: !llvm.float) -> !llvm.i32 {
|
2019-02-15 22:25:30 +08:00
|
|
|
// CHECK-NEXT: %3 = call i32 %0(float %1)
|
2019-04-06 14:56:49 +08:00
|
|
|
%0 = llvm.call %arg0(%arg1) : (!llvm.float) -> !llvm.i32
|
2019-02-15 22:25:30 +08:00
|
|
|
// CHECK-NEXT: ret i32 %3
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.return %0 : !llvm.i32
|
2019-02-15 22:25:30 +08:00
|
|
|
}
|
|
|
|
|
2019-02-26 18:02:26 +08:00
|
|
|
//
|
|
|
|
// Check that we properly construct phi nodes in the blocks that have the same
|
|
|
|
// predecessor more than once.
|
|
|
|
//
|
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define void @cond_br_arguments(i1 {{%.*}}, i1 {{%.*}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @cond_br_arguments(%arg0: !llvm.i1, %arg1: !llvm.i1) {
|
2019-02-26 18:02:26 +08:00
|
|
|
// CHECK-NEXT: br i1 %0, label %3, label %5
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.cond_br %arg0, ^bb1(%arg0 : !llvm.i1), ^bb2
|
2019-02-26 18:02:26 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 3:
|
2019-02-26 18:02:26 +08:00
|
|
|
// CHECK-NEXT: %4 = phi i1 [ %1, %5 ], [ %0, %2 ]
|
2019-04-06 14:56:49 +08:00
|
|
|
^bb1(%0 : !llvm.i1):
|
2019-02-26 18:02:26 +08:00
|
|
|
// CHECK-NEXT: ret void
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2019-02-26 18:02:26 +08:00
|
|
|
|
2019-03-23 08:54:26 +08:00
|
|
|
// CHECK: 5:
|
2019-04-03 06:33:54 +08:00
|
|
|
^bb2:
|
2019-02-26 18:02:26 +08:00
|
|
|
// CHECK-NEXT: br label %3
|
2019-04-06 14:56:49 +08:00
|
|
|
llvm.br ^bb1(%arg1 : !llvm.i1)
|
2019-02-26 18:02:26 +08:00
|
|
|
}
|
2019-03-07 01:34:53 +08:00
|
|
|
|
2019-08-05 16:39:26 +08:00
|
|
|
// CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}}) {
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @llvm_noalias(%arg0: !llvm<"float*"> {llvm.noalias = true}) {
|
2019-04-03 06:33:54 +08:00
|
|
|
llvm.return
|
2019-03-07 01:34:53 +08:00
|
|
|
}
|
2019-04-08 08:11:02 +08:00
|
|
|
|
2019-08-23 09:58:51 +08:00
|
|
|
// CHECK-LABEL: @llvm_varargs(...)
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @llvm_varargs(...)
|
2019-05-23 17:03:14 +08:00
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @intpointerconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
|
2019-05-23 17:03:14 +08:00
|
|
|
// CHECK: %2 = inttoptr i32 %0 to i32*
|
|
|
|
// CHECK-NEXT: %3 = ptrtoint i32* %2 to i32
|
|
|
|
%1 = llvm.inttoptr %arg0 : !llvm.i32 to !llvm<"i32*">
|
|
|
|
%2 = llvm.ptrtoint %1 : !llvm<"i32*"> to !llvm.i32
|
|
|
|
llvm.return %2 : !llvm.i32
|
|
|
|
}
|
|
|
|
|
2019-11-01 22:31:33 +08:00
|
|
|
llvm.func @fpconversion(%arg0 : !llvm.i32) -> !llvm.i32 {
|
|
|
|
// CHECK: %2 = sitofp i32 %0 to float
|
|
|
|
// CHECK-NEXT: %3 = fptosi float %2 to i32
|
|
|
|
// CHECK-NEXT: %4 = uitofp i32 %3 to float
|
|
|
|
// CHECK-NEXT: %5 = fptoui float %4 to i32
|
|
|
|
%1 = llvm.sitofp %arg0 : !llvm.i32 to !llvm.float
|
|
|
|
%2 = llvm.fptosi %1 : !llvm.float to !llvm.i32
|
|
|
|
%3 = llvm.uitofp %2 : !llvm.i32 to !llvm.float
|
|
|
|
%4 = llvm.fptoui %3 : !llvm.float to !llvm.i32
|
|
|
|
llvm.return %4 : !llvm.i32
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @addrspace
|
|
|
|
llvm.func @addrspace(%arg0 : !llvm<"i32*">) -> !llvm<"i32 addrspace(2)*"> {
|
|
|
|
// CHECK: %2 = addrspacecast i32* %0 to i32 addrspace(2)*
|
|
|
|
%1 = llvm.addrspacecast %arg0 : !llvm<"i32*"> to !llvm<"i32 addrspace(2)*">
|
|
|
|
llvm.return %1 : !llvm<"i32 addrspace(2)*">
|
|
|
|
}
|
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @stringconstant() -> !llvm<"i8*"> {
|
2019-09-04 00:10:24 +08:00
|
|
|
%1 = llvm.mlir.constant("Hello world!") : !llvm<"i8*">
|
2019-05-24 19:49:56 +08:00
|
|
|
// CHECK: ret [12 x i8] c"Hello world!"
|
|
|
|
llvm.return %1 : !llvm<"i8*">
|
|
|
|
}
|
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @noreach() {
|
2019-08-08 16:05:26 +08:00
|
|
|
// CHECK: unreachable
|
|
|
|
llvm.unreachable
|
|
|
|
}
|
2019-08-09 09:29:23 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: define void @fcmp
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
|
2019-08-09 09:29:23 +08:00
|
|
|
// CHECK: fcmp oeq float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ogt float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp oge float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp olt float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ole float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp one float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ord float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ueq float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ugt float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp uge float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ult float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp ule float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp une float %0, %1
|
|
|
|
// CHECK-NEXT: fcmp uno float %0, %1
|
|
|
|
%0 = llvm.fcmp "oeq" %arg0, %arg1 : !llvm.float
|
|
|
|
%1 = llvm.fcmp "ogt" %arg0, %arg1 : !llvm.float
|
|
|
|
%2 = llvm.fcmp "oge" %arg0, %arg1 : !llvm.float
|
|
|
|
%3 = llvm.fcmp "olt" %arg0, %arg1 : !llvm.float
|
|
|
|
%4 = llvm.fcmp "ole" %arg0, %arg1 : !llvm.float
|
|
|
|
%5 = llvm.fcmp "one" %arg0, %arg1 : !llvm.float
|
|
|
|
%6 = llvm.fcmp "ord" %arg0, %arg1 : !llvm.float
|
|
|
|
%7 = llvm.fcmp "ueq" %arg0, %arg1 : !llvm.float
|
|
|
|
%8 = llvm.fcmp "ugt" %arg0, %arg1 : !llvm.float
|
|
|
|
%9 = llvm.fcmp "uge" %arg0, %arg1 : !llvm.float
|
|
|
|
%10 = llvm.fcmp "ult" %arg0, %arg1 : !llvm.float
|
|
|
|
%11 = llvm.fcmp "ule" %arg0, %arg1 : !llvm.float
|
|
|
|
%12 = llvm.fcmp "une" %arg0, %arg1 : !llvm.float
|
|
|
|
%13 = llvm.fcmp "uno" %arg0, %arg1 : !llvm.float
|
|
|
|
llvm.return
|
|
|
|
}
|
2019-08-09 20:24:47 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: @vect
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @vect(%arg0: !llvm<"<4 x float>">, %arg1: !llvm.i32, %arg2: !llvm.float) {
|
2019-11-26 06:44:20 +08:00
|
|
|
// CHECK-NEXT: extractelement <4 x float> {{.*}}, i32
|
|
|
|
// CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i32
|
2019-08-09 20:24:47 +08:00
|
|
|
// CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 7>
|
2019-11-26 06:44:20 +08:00
|
|
|
%0 = llvm.extractelement %arg0[%arg1 : !llvm.i32] : !llvm<"<4 x float>">
|
|
|
|
%1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i32] : !llvm<"<4 x float>">
|
2019-08-09 20:24:47 +08:00
|
|
|
%2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm<"<4 x float>">, !llvm<"<4 x float>">
|
|
|
|
llvm.return
|
|
|
|
}
|
2019-08-19 09:54:50 +08:00
|
|
|
|
2019-11-26 06:44:20 +08:00
|
|
|
// CHECK-LABEL: @vect_i64idx
|
|
|
|
llvm.func @vect_i64idx(%arg0: !llvm<"<4 x float>">, %arg1: !llvm.i64, %arg2: !llvm.float) {
|
|
|
|
// CHECK-NEXT: extractelement <4 x float> {{.*}}, i64
|
|
|
|
// CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i64
|
|
|
|
%0 = llvm.extractelement %arg0[%arg1 : !llvm.i64] : !llvm<"<4 x float>">
|
|
|
|
%1 = llvm.insertelement %arg2, %arg0[%arg1 : !llvm.i64] : !llvm<"<4 x float>">
|
|
|
|
llvm.return
|
|
|
|
}
|
|
|
|
|
2019-08-19 09:54:50 +08:00
|
|
|
// CHECK-LABEL: @alloca
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @alloca(%size : !llvm.i64) {
|
2019-08-19 09:54:50 +08:00
|
|
|
// CHECK: alloca
|
|
|
|
// CHECK-NOT: align
|
|
|
|
llvm.alloca %size x !llvm.i32 {alignment = 0} : (!llvm.i64) -> (!llvm<"i32*">)
|
|
|
|
// CHECK-NEXT: alloca {{.*}} align 8
|
|
|
|
llvm.alloca %size x !llvm.i32 {alignment = 8} : (!llvm.i64) -> (!llvm<"i32*">)
|
|
|
|
llvm.return
|
|
|
|
}
|
2019-08-23 09:58:51 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: @constants
|
2019-10-10 16:33:33 +08:00
|
|
|
llvm.func @constants() -> !llvm<"<4 x float>"> {
|
2019-08-23 09:58:51 +08:00
|
|
|
// CHECK: ret <4 x float> <float 4.2{{0*}}e+01, float 0.{{0*}}e+00, float 0.{{0*}}e+00, float 0.{{0*}}e+00>
|
2019-09-04 00:10:24 +08:00
|
|
|
%0 = llvm.mlir.constant(sparse<[[0]], [4.2e+01]> : vector<4xf32>) : !llvm<"<4 x float>">
|
2019-08-23 09:58:51 +08:00
|
|
|
llvm.return %0 : !llvm<"<4 x float>">
|
|
|
|
}
|
2019-08-27 05:18:47 +08:00
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
// CHECK-LABEL: @fp_casts
|
|
|
|
llvm.func @fp_casts(%fp1 : !llvm<"float">, %fp2 : !llvm<"double">) -> !llvm.i16 {
|
2019-08-27 05:18:47 +08:00
|
|
|
// CHECK: fptrunc double {{.*}} to float
|
|
|
|
%a = llvm.fptrunc %fp2 : !llvm<"double"> to !llvm<"float">
|
|
|
|
// CHECK: fpext float {{.*}} to double
|
|
|
|
%b = llvm.fpext %fp1 : !llvm<"float"> to !llvm<"double">
|
|
|
|
// CHECK: fptosi double {{.*}} to i16
|
|
|
|
%c = llvm.fptosi %b : !llvm<"double"> to !llvm.i16
|
|
|
|
llvm.return %c : !llvm.i16
|
|
|
|
}
|
2019-09-22 07:14:07 +08:00
|
|
|
|
2019-10-10 16:33:33 +08:00
|
|
|
// CHECK-LABEL: @integer_extension_and_truncation
|
|
|
|
llvm.func @integer_extension_and_truncation(%a : !llvm.i32) {
|
2019-09-22 07:14:07 +08:00
|
|
|
// CHECK: sext i32 {{.*}} to i64
|
|
|
|
// CHECK: zext i32 {{.*}} to i64
|
|
|
|
// CHECK: trunc i32 {{.*}} to i16
|
|
|
|
%0 = llvm.sext %a : !llvm.i32 to !llvm.i64
|
|
|
|
%1 = llvm.zext %a : !llvm.i32 to !llvm.i64
|
|
|
|
%2 = llvm.trunc %a : !llvm.i32 to !llvm.i16
|
|
|
|
llvm.return
|
|
|
|
}
|
2019-10-11 21:13:25 +08:00
|
|
|
|
|
|
|
// Check that the auxiliary `null` operation is converted into a `null` value.
|
|
|
|
// CHECK-LABEL: @null
|
|
|
|
llvm.func @null() -> !llvm<"i32*"> {
|
|
|
|
%0 = llvm.mlir.null : !llvm<"i32*">
|
|
|
|
// CHECK: ret i32* null
|
|
|
|
llvm.return %0 : !llvm<"i32*">
|
|
|
|
}
|