[mlir][sparse] Rename index_t to index_type again

While testing LLVM 14.0.0 rc1 on Solaris, I ran into a compile failure:

                   from /var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp:22:
  /usr/include/sys/types.h:103:16: error: conflicting declaration ‘typedef short int index_t’
    103 | typedef short  index_t;
        |                ^~~~~~~
  In file included from
/var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp:17:
  /var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h:26:7:
note: previous declaration as ‘using index_t = uint64_t’
     26 | using index_t = uint64_t;
        |       ^~~~~~~

The same issue had already occured in the past and fixed in D72619
<https://reviews.llvm.org/D72619>.  More detailed explanation can also be
found there.

Tested on `amd64-pc-solaris2.11` and `sparcv9-solaris2.11`.

Differential Revision: https://reviews.llvm.org/D119323
This commit is contained in:
Rainer Orth 2022-02-09 21:59:52 +01:00
parent d39f4ac494
commit d2215e79ac
2 changed files with 28 additions and 27 deletions

View File

@ -23,7 +23,7 @@ extern "C" {
/// type is 64-bit, but targets with different "index" bit widths should link /// type is 64-bit, but targets with different "index" bit widths should link
/// with an alternatively built runtime support library. /// with an alternatively built runtime support library.
// TODO: support such targets? // TODO: support such targets?
using index_t = uint64_t; using index_type = uint64_t;
/// Encoding of overhead types (both pointer overhead and indices /// Encoding of overhead types (both pointer overhead and indices
/// overhead), for "overloading" @newSparseTensor. /// overhead), for "overloading" @newSparseTensor.

View File

@ -796,7 +796,7 @@ extern "C" {
#define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \
void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \ void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \
index_t d) { \ index_type d) { \
assert(ref &&tensor); \ assert(ref &&tensor); \
std::vector<TYPE> *v; \ std::vector<TYPE> *v; \
static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \ static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \
@ -808,15 +808,15 @@ extern "C" {
#define IMPL_ADDELT(NAME, TYPE) \ #define IMPL_ADDELT(NAME, TYPE) \
void *_mlir_ciface_##NAME(void *tensor, TYPE value, \ void *_mlir_ciface_##NAME(void *tensor, TYPE value, \
StridedMemRefType<index_t, 1> *iref, \ StridedMemRefType<index_type, 1> *iref, \
StridedMemRefType<index_t, 1> *pref) { \ StridedMemRefType<index_type, 1> *pref) { \
assert(tensor &&iref &&pref); \ assert(tensor &&iref &&pref); \
assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ assert(iref->strides[0] == 1 && pref->strides[0] == 1); \
assert(iref->sizes[0] == pref->sizes[0]); \ assert(iref->sizes[0] == pref->sizes[0]); \
const index_t *indx = iref->data + iref->offset; \ const index_type *indx = iref->data + iref->offset; \
const index_t *perm = pref->data + pref->offset; \ const index_type *perm = pref->data + pref->offset; \
uint64_t isize = iref->sizes[0]; \ uint64_t isize = iref->sizes[0]; \
std::vector<index_t> indices(isize); \ std::vector<index_type> indices(isize); \
for (uint64_t r = 0; r < isize; r++) \ for (uint64_t r = 0; r < isize; r++) \
indices[perm[r]] = indx[r]; \ indices[perm[r]] = indx[r]; \
static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \ static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \
@ -824,11 +824,12 @@ extern "C" {
} }
#define IMPL_GETNEXT(NAME, V) \ #define IMPL_GETNEXT(NAME, V) \
bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *iref, \ bool _mlir_ciface_##NAME(void *tensor, \
StridedMemRefType<index_type, 1> *iref, \
StridedMemRefType<V, 0> *vref) { \ StridedMemRefType<V, 0> *vref) { \
assert(tensor &&iref &&vref); \ assert(tensor &&iref &&vref); \
assert(iref->strides[0] == 1); \ assert(iref->strides[0] == 1); \
index_t *indx = iref->data + iref->offset; \ index_type *indx = iref->data + iref->offset; \
V *value = vref->data + vref->offset; \ V *value = vref->data + vref->offset; \
const uint64_t isize = iref->sizes[0]; \ const uint64_t isize = iref->sizes[0]; \
auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \ auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \
@ -844,30 +845,30 @@ extern "C" {
} }
#define IMPL_LEXINSERT(NAME, V) \ #define IMPL_LEXINSERT(NAME, V) \
void _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *cref, \ void _mlir_ciface_##NAME(void *tensor, \
V val) { \ StridedMemRefType<index_type, 1> *cref, V val) { \
assert(tensor &&cref); \ assert(tensor &&cref); \
assert(cref->strides[0] == 1); \ assert(cref->strides[0] == 1); \
index_t *cursor = cref->data + cref->offset; \ index_type *cursor = cref->data + cref->offset; \
assert(cursor); \ assert(cursor); \
static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \ static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \
} }
#define IMPL_EXPINSERT(NAME, V) \ #define IMPL_EXPINSERT(NAME, V) \
void _mlir_ciface_##NAME( \ void _mlir_ciface_##NAME( \
void *tensor, StridedMemRefType<index_t, 1> *cref, \ void *tensor, StridedMemRefType<index_type, 1> *cref, \
StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \ StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \
StridedMemRefType<index_t, 1> *aref, index_t count) { \ StridedMemRefType<index_type, 1> *aref, index_type count) { \
assert(tensor &&cref &&vref &&fref &&aref); \ assert(tensor &&cref &&vref &&fref &&aref); \
assert(cref->strides[0] == 1); \ assert(cref->strides[0] == 1); \
assert(vref->strides[0] == 1); \ assert(vref->strides[0] == 1); \
assert(fref->strides[0] == 1); \ assert(fref->strides[0] == 1); \
assert(aref->strides[0] == 1); \ assert(aref->strides[0] == 1); \
assert(vref->sizes[0] == fref->sizes[0]); \ assert(vref->sizes[0] == fref->sizes[0]); \
index_t *cursor = cref->data + cref->offset; \ index_type *cursor = cref->data + cref->offset; \
V *values = vref->data + vref->offset; \ V *values = vref->data + vref->offset; \
bool *filled = fref->data + fref->offset; \ bool *filled = fref->data + fref->offset; \
index_t *added = aref->data + aref->offset; \ index_type *added = aref->data + aref->offset; \
static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \ static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \
cursor, values, filled, added, count); \ cursor, values, filled, added, count); \
} }
@ -883,11 +884,11 @@ extern "C" {
delete coo; \ delete coo; \
} }
// Assume index_t is in fact uint64_t, so that _mlir_ciface_newSparseTensor // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
// can safely rewrite kIndex to kU64. We make this assertion to guarantee // can safely rewrite kIndex to kU64. We make this assertion to guarantee
// that this file cannot get out of sync with its header. // that this file cannot get out of sync with its header.
static_assert(std::is_same<index_t, uint64_t>::value, static_assert(std::is_same<index_type, uint64_t>::value,
"Expected index_t == uint64_t"); "Expected index_type == uint64_t");
/// Constructs a new sparse tensor. This is the "swiss army knife" /// Constructs a new sparse tensor. This is the "swiss army knife"
/// method for materializing sparse tensors into the computation. /// method for materializing sparse tensors into the computation.
@ -901,8 +902,8 @@ static_assert(std::is_same<index_t, uint64_t>::value,
/// kToIterator = returns iterator from storage in ptr (call getNext() to use) /// kToIterator = returns iterator from storage in ptr (call getNext() to use)
void * void *
_mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
StridedMemRefType<index_t, 1> *sref, StridedMemRefType<index_type, 1> *sref,
StridedMemRefType<index_t, 1> *pref, StridedMemRefType<index_type, 1> *pref,
OverheadType ptrTp, OverheadType indTp, OverheadType ptrTp, OverheadType indTp,
PrimaryType valTp, Action action, void *ptr) { PrimaryType valTp, Action action, void *ptr) {
assert(aref && sref && pref); assert(aref && sref && pref);
@ -910,8 +911,8 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
pref->strides[0] == 1); pref->strides[0] == 1);
assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
const DimLevelType *sparsity = aref->data + aref->offset; const DimLevelType *sparsity = aref->data + aref->offset;
const index_t *sizes = sref->data + sref->offset; const index_type *sizes = sref->data + sref->offset;
const index_t *perm = pref->data + pref->offset; const index_type *perm = pref->data + pref->offset;
uint64_t rank = aref->sizes[0]; uint64_t rank = aref->sizes[0];
// Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
@ -1010,14 +1011,14 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
} }
/// Methods that provide direct access to pointers. /// Methods that provide direct access to pointers.
IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers) IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers)
IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers) IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers) IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers) IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers) IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
/// Methods that provide direct access to indices. /// Methods that provide direct access to indices.
IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices) IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices)
IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices) IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices) IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices) IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
@ -1092,7 +1093,7 @@ IMPL_OUT(outSparseTensorI8, int8_t)
/// Helper method to read a sparse tensor filename from the environment, /// Helper method to read a sparse tensor filename from the environment,
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
char *getTensorFilename(index_t id) { char *getTensorFilename(index_type id) {
char var[80]; char var[80];
sprintf(var, "TENSOR%" PRIu64, id); sprintf(var, "TENSOR%" PRIu64, id);
char *env = getenv(var); char *env = getenv(var);
@ -1100,7 +1101,7 @@ char *getTensorFilename(index_t id) {
} }
/// Returns size of sparse tensor in given dimension. /// Returns size of sparse tensor in given dimension.
index_t sparseDimSize(void *tensor, index_t d) { index_type sparseDimSize(void *tensor, index_type d) {
return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d); return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
} }