2020-10-07 03:15:36 +08:00
|
|
|
//===- SparseUtils.cpp - Sparse Utils for MLIR execution ------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2021-01-16 11:49:01 +08:00
|
|
|
// This file implements a light-weight runtime support library that is useful
|
|
|
|
// for sparse tensor manipulations. The functionality provided in this library
|
2020-10-07 03:15:36 +08:00
|
|
|
// is meant to simplify benchmarking, testing, and debugging MLIR code that
|
|
|
|
// operates on sparse tensors. The provided functionality is **not** part
|
|
|
|
// of core MLIR, however.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-11-12 10:40:01 +08:00
|
|
|
#include "mlir/ExecutionEngine/CRunnerUtils.h"
|
|
|
|
|
|
|
|
#ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
2020-10-07 03:15:36 +08:00
|
|
|
#include <cctype>
|
|
|
|
#include <cinttypes>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstdlib>
|
|
|
|
#include <cstring>
|
2021-01-16 11:49:01 +08:00
|
|
|
#include <vector>
|
2020-10-07 03:15:36 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2021-02-18 08:47:33 +08:00
|
|
|
// Internal support for storing and reading sparse tensors.
|
|
|
|
//
|
|
|
|
// The following memory-resident sparse storage schemes are supported:
|
|
|
|
//
|
|
|
|
// (a) A coordinate scheme for temporarily storing and lexicographically
|
|
|
|
// sorting a sparse tensor by index.
|
|
|
|
//
|
|
|
|
// (b) A "one-size-fits-all" sparse storage scheme defined by per-rank
|
|
|
|
// sparse/dense annnotations to be used by generated MLIR code.
|
|
|
|
//
|
|
|
|
// The following external formats are supported:
|
2021-01-16 11:49:01 +08:00
|
|
|
//
|
|
|
|
// (1) Matrix Market Exchange (MME): *.mtx
|
|
|
|
// https://math.nist.gov/MatrixMarket/formats.html
|
|
|
|
//
|
|
|
|
// (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
|
|
|
|
// http://frostt.io/tensors/file-formats.html
|
2020-10-07 03:15:36 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// A sparse tensor element in coordinate scheme (value and indices).
|
|
|
|
/// For example, a rank-1 vector element would look like
|
|
|
|
/// ({i}, a[i])
|
|
|
|
/// and a rank-5 tensor element like
|
|
|
|
/// ({i,j,k,l,m}, a[i,j,k,l,m])
|
|
|
|
struct Element {
|
2021-01-21 02:37:22 +08:00
|
|
|
Element(const std::vector<uint64_t> &ind, double val)
|
2021-01-16 11:49:01 +08:00
|
|
|
: indices(ind), value(val){};
|
2021-01-21 02:37:22 +08:00
|
|
|
std::vector<uint64_t> indices;
|
2021-01-16 11:49:01 +08:00
|
|
|
double value;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A memory-resident sparse tensor in coordinate scheme (collection of
|
|
|
|
/// elements). This data structure is used to read a sparse tensor from
|
|
|
|
/// external file format into memory and sort the elements lexicographically
|
|
|
|
/// by indices before passing it back to the client (most packed storage
|
|
|
|
/// formats require the elements to appear in lexicographic index order).
|
|
|
|
struct SparseTensor {
|
|
|
|
public:
|
2021-01-21 02:37:22 +08:00
|
|
|
SparseTensor(const std::vector<uint64_t> &szs, uint64_t capacity)
|
|
|
|
: sizes(szs), pos(0) {
|
|
|
|
elements.reserve(capacity);
|
|
|
|
}
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Adds element as indices and value.
|
2021-01-21 02:37:22 +08:00
|
|
|
void add(const std::vector<uint64_t> &ind, double val) {
|
2021-02-19 14:01:39 +08:00
|
|
|
assert(getRank() == ind.size());
|
|
|
|
for (int64_t r = 0, rank = getRank(); r < rank; r++)
|
2021-01-21 02:37:22 +08:00
|
|
|
assert(ind[r] < sizes[r]); // within bounds
|
2021-01-16 11:49:01 +08:00
|
|
|
elements.emplace_back(Element(ind, val));
|
|
|
|
}
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Sorts elements lexicographically by index.
|
2021-01-16 11:49:01 +08:00
|
|
|
void sort() { std::sort(elements.begin(), elements.end(), lexOrder); }
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Primitive one-time iteration.
|
2021-01-16 11:49:01 +08:00
|
|
|
const Element &next() { return elements[pos++]; }
|
2021-02-19 14:01:39 +08:00
|
|
|
/// Returns rank.
|
|
|
|
uint64_t getRank() const { return sizes.size(); }
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Getter for sizes array.
|
|
|
|
const std::vector<uint64_t> &getSizes() const { return sizes; }
|
|
|
|
/// Getter for elements array.
|
|
|
|
const std::vector<Element> &getElements() const { return elements; }
|
2021-01-16 11:49:01 +08:00
|
|
|
|
|
|
|
private:
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Returns true if indices of e1 < indices of e2.
|
2021-01-16 11:49:01 +08:00
|
|
|
static bool lexOrder(const Element &e1, const Element &e2) {
|
|
|
|
assert(e1.indices.size() == e2.indices.size());
|
|
|
|
for (int64_t r = 0, rank = e1.indices.size(); r < rank; r++) {
|
|
|
|
if (e1.indices[r] == e2.indices[r])
|
|
|
|
continue;
|
|
|
|
return e1.indices[r] < e2.indices[r];
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2021-01-21 02:37:22 +08:00
|
|
|
std::vector<uint64_t> sizes; // per-rank dimension sizes
|
2021-01-16 11:49:01 +08:00
|
|
|
std::vector<Element> elements;
|
|
|
|
uint64_t pos;
|
|
|
|
};
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Abstract base class of sparse tensor storage. Note that we use
|
|
|
|
/// function overloading to implement "partial" method specialization.
|
|
|
|
class SparseTensorStorageBase {
|
|
|
|
public:
|
|
|
|
virtual uint64_t getDimSize(uint64_t) = 0;
|
|
|
|
virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
|
|
|
|
virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
|
|
|
|
virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
|
|
|
|
virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
|
|
|
|
virtual void getValues(std::vector<double> **) { fatal("valf64"); }
|
|
|
|
virtual void getValues(std::vector<float> **) { fatal("valf32"); }
|
|
|
|
virtual ~SparseTensorStorageBase() {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void fatal(const char *tp) {
|
|
|
|
fprintf(stderr, "unsupported %s\n", tp);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-10 08:22:22 +08:00
|
|
|
/// A memory-resident sparse tensor using a storage scheme based on per-rank
|
|
|
|
/// annotations on dense/sparse. This data structure provides a bufferized
|
|
|
|
/// form of an imaginary SparseTensorType, until such a type becomes a
|
|
|
|
/// first-class citizen of MLIR. In contrast to generating setup methods for
|
|
|
|
/// each differently annotated sparse tensor, this method provides a convenient
|
|
|
|
/// "one-size-fits-all" solution that simply takes an input tensor and
|
|
|
|
/// annotations to implement all required setup in a general manner.
|
|
|
|
template <typename P, typename I, typename V>
|
2021-02-18 08:47:33 +08:00
|
|
|
class SparseTensorStorage : public SparseTensorStorageBase {
|
2021-02-10 08:22:22 +08:00
|
|
|
public:
|
|
|
|
/// Constructs sparse tensor storage scheme following the given
|
|
|
|
/// per-rank dimension dense/sparse annotations.
|
|
|
|
SparseTensorStorage(SparseTensor *tensor, bool *sparsity)
|
2021-02-19 14:01:39 +08:00
|
|
|
: sizes(tensor->getSizes()), pointers(getRank()), indices(getRank()) {
|
2021-02-10 08:22:22 +08:00
|
|
|
// Provide hints on capacity.
|
|
|
|
// TODO: needs fine-tuning based on sparsity
|
2021-02-19 14:01:39 +08:00
|
|
|
uint64_t nnz = tensor->getElements().size();
|
|
|
|
values.reserve(nnz);
|
|
|
|
for (uint64_t d = 0, s = 1, rank = getRank(); d < rank; d++) {
|
|
|
|
s *= sizes[d];
|
2021-02-10 08:22:22 +08:00
|
|
|
if (sparsity[d]) {
|
2021-02-18 08:47:33 +08:00
|
|
|
pointers[d].reserve(s + 1);
|
2021-02-10 08:22:22 +08:00
|
|
|
indices[d].reserve(s);
|
|
|
|
s = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then setup the tensor.
|
2021-02-19 14:01:39 +08:00
|
|
|
traverse(tensor, sparsity, 0, nnz, 0);
|
2021-02-18 08:47:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~SparseTensorStorage() {}
|
|
|
|
|
2021-02-19 14:01:39 +08:00
|
|
|
uint64_t getRank() const { return sizes.size(); }
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
uint64_t getDimSize(uint64_t d) override { return sizes[d]; }
|
2021-02-19 14:01:39 +08:00
|
|
|
|
|
|
|
// Partially specialize these three methods based on template types.
|
2021-02-18 08:47:33 +08:00
|
|
|
void getPointers(std::vector<P> **out, uint64_t d) override {
|
|
|
|
*out = &pointers[d];
|
|
|
|
}
|
|
|
|
void getIndices(std::vector<I> **out, uint64_t d) override {
|
|
|
|
*out = &indices[d];
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
2021-02-18 08:47:33 +08:00
|
|
|
void getValues(std::vector<V> **out) override { *out = &values; }
|
2021-02-10 08:22:22 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// Initializes sparse tensor storage scheme from a memory-resident
|
|
|
|
/// representation of an external sparse tensor. This method prepares
|
|
|
|
/// the pointers and indices arrays under the given per-rank dimension
|
|
|
|
/// dense/sparse annotations.
|
|
|
|
void traverse(SparseTensor *tensor, bool *sparsity, uint64_t lo, uint64_t hi,
|
|
|
|
uint64_t d) {
|
2021-02-18 08:47:33 +08:00
|
|
|
const std::vector<Element> &elements = tensor->getElements();
|
2021-02-10 08:22:22 +08:00
|
|
|
// Once dimensions are exhausted, insert the numerical values.
|
2021-02-19 14:01:39 +08:00
|
|
|
if (d == getRank()) {
|
2021-02-10 08:22:22 +08:00
|
|
|
values.push_back(lo < hi ? elements[lo].value : 0.0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Prepare a sparse pointer structure at this dimension.
|
2021-02-18 08:47:33 +08:00
|
|
|
if (sparsity[d] && pointers[d].empty())
|
|
|
|
pointers[d].push_back(0);
|
2021-02-10 08:22:22 +08:00
|
|
|
// Visit all elements in this interval.
|
|
|
|
uint64_t full = 0;
|
|
|
|
while (lo < hi) {
|
|
|
|
// Find segment in interval with same index elements in this dimension.
|
|
|
|
unsigned idx = elements[lo].indices[d];
|
|
|
|
unsigned seg = lo + 1;
|
|
|
|
while (seg < hi && elements[seg].indices[d] == idx)
|
|
|
|
seg++;
|
|
|
|
// Handle segment in interval for sparse or dense dimension.
|
|
|
|
if (sparsity[d]) {
|
|
|
|
indices[d].push_back(idx);
|
|
|
|
} else {
|
|
|
|
for (; full < idx; full++)
|
|
|
|
traverse(tensor, sparsity, 0, 0, d + 1); // pass empty
|
|
|
|
full++;
|
|
|
|
}
|
|
|
|
traverse(tensor, sparsity, lo, seg, d + 1);
|
|
|
|
// And move on to next segment in interval.
|
|
|
|
lo = seg;
|
|
|
|
}
|
|
|
|
// Finalize the sparse pointer structure at this dimension.
|
|
|
|
if (sparsity[d]) {
|
2021-02-18 08:47:33 +08:00
|
|
|
pointers[d].push_back(indices[d].size());
|
2021-02-10 08:22:22 +08:00
|
|
|
} else {
|
2021-02-18 08:47:33 +08:00
|
|
|
for (uint64_t sz = tensor->getSizes()[d]; full < sz; full++)
|
2021-02-10 08:22:22 +08:00
|
|
|
traverse(tensor, sparsity, 0, 0, d + 1); // pass empty
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
private:
|
2021-02-10 08:22:22 +08:00
|
|
|
std::vector<uint64_t> sizes; // per-rank dimension sizes
|
2021-02-18 08:47:33 +08:00
|
|
|
std::vector<std::vector<P>> pointers;
|
2021-02-10 08:22:22 +08:00
|
|
|
std::vector<std::vector<I>> indices;
|
|
|
|
std::vector<V> values;
|
|
|
|
};
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
/// Templated reader.
|
|
|
|
template <typename P, typename I, typename V>
|
2021-02-19 14:01:39 +08:00
|
|
|
void *newSparseTensor(char *filename, bool *sparsity, uint64_t size) {
|
2021-02-18 08:47:33 +08:00
|
|
|
uint64_t idata[64];
|
|
|
|
SparseTensor *t = static_cast<SparseTensor *>(openTensorC(filename, idata));
|
2021-02-19 14:01:39 +08:00
|
|
|
assert(size == t->getRank()); // sparsity array must match rank
|
2021-02-18 08:47:33 +08:00
|
|
|
SparseTensorStorageBase *tensor =
|
|
|
|
new SparseTensorStorage<P, I, V>(t, sparsity);
|
|
|
|
delete t;
|
|
|
|
return tensor;
|
|
|
|
}
|
2021-02-10 08:22:22 +08:00
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Helper to convert string to lower case.
|
2020-10-07 03:15:36 +08:00
|
|
|
static char *toLower(char *token) {
|
|
|
|
for (char *c = token; *c; c++)
|
|
|
|
*c = tolower(*c);
|
|
|
|
return token;
|
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Read the MME header of a general sparse matrix of type real.
|
|
|
|
static void readMMEHeader(FILE *file, char *name, uint64_t *idata) {
|
2020-10-07 03:15:36 +08:00
|
|
|
char line[1025];
|
|
|
|
char header[64];
|
|
|
|
char object[64];
|
|
|
|
char format[64];
|
|
|
|
char field[64];
|
|
|
|
char symmetry[64];
|
|
|
|
// Read header line.
|
|
|
|
if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
|
|
|
|
symmetry) != 5) {
|
|
|
|
fprintf(stderr, "Corrupt header in %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
// Make sure this is a general sparse matrix.
|
|
|
|
if (strcmp(toLower(header), "%%matrixmarket") ||
|
|
|
|
strcmp(toLower(object), "matrix") ||
|
|
|
|
strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
|
|
|
|
strcmp(toLower(symmetry), "general")) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Cannot find a general sparse matrix with type real in %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
// Skip comments.
|
|
|
|
while (1) {
|
|
|
|
if (!fgets(line, 1025, file)) {
|
|
|
|
fprintf(stderr, "Cannot find data in %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if (line[0] != '%')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Next line contains M N NNZ.
|
2021-01-16 11:49:01 +08:00
|
|
|
idata[0] = 2; // rank
|
|
|
|
if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
|
|
|
|
idata + 1) != 3) {
|
2020-10-07 03:15:36 +08:00
|
|
|
fprintf(stderr, "Cannot find size in %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Read the "extended" FROSTT header. Although not part of the documented
|
|
|
|
/// format, we assume that the file starts with optional comments followed
|
|
|
|
/// by two lines that define the rank, the number of nonzeros, and the
|
|
|
|
/// dimensions sizes (one per rank) of the sparse tensor.
|
|
|
|
static void readExtFROSTTHeader(FILE *file, char *name, uint64_t *idata) {
|
|
|
|
char line[1025];
|
|
|
|
// Skip comments.
|
|
|
|
while (1) {
|
|
|
|
if (!fgets(line, 1025, file)) {
|
|
|
|
fprintf(stderr, "Cannot find data in %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if (line[0] != '#')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Next line contains RANK and NNZ.
|
|
|
|
if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
|
|
|
|
fprintf(stderr, "Cannot find metadata in %s\n", name);
|
2020-10-07 03:15:36 +08:00
|
|
|
exit(1);
|
|
|
|
}
|
2021-01-16 11:49:01 +08:00
|
|
|
// Followed by a line with the dimension sizes (one per rank).
|
|
|
|
for (uint64_t r = 0; r < idata[0]; r++) {
|
|
|
|
if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
|
|
|
|
fprintf(stderr, "Cannot find dimension size %s\n", name);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2020-10-07 03:15:36 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2020-10-07 03:15:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2021-01-16 11:49:01 +08:00
|
|
|
// Public API of the sparse runtime support library that enables MLIR code
|
|
|
|
// to read a sparse tensor from an external format (MME for FROSTT).
|
2020-10-07 03:15:36 +08:00
|
|
|
//
|
2021-01-16 11:49:01 +08:00
|
|
|
// For example, a sparse matrix in MME can be read as follows.
|
2020-10-07 03:15:36 +08:00
|
|
|
//
|
2021-01-16 11:49:01 +08:00
|
|
|
// %tensor = call @openTensor(%fileName, %idata)
|
|
|
|
// : (!llvm.ptr<i8>, memref<?xindex>) -> (!llvm.ptr<i8>)
|
|
|
|
// %rank = load %idata[%c0] : memref<?xindex> # always 2 for MME
|
|
|
|
// %nnz = load %idata[%c1] : memref<?xindex>
|
|
|
|
// %m = load %idata[%c2] : memref<?xindex>
|
|
|
|
// %n = load %idata[%c3] : memref<?xindex>
|
|
|
|
// .. prepare reading in m x n sparse tensor A with nnz nonzero elements ..
|
|
|
|
// scf.for %k = %c0 to %nnz step %c1 {
|
|
|
|
// call @readTensorItem(%tensor, %idata, %ddata)
|
|
|
|
// : (!llvm.ptr<i8>, memref<?xindex>, memref<?xf64>) -> ()
|
|
|
|
// %i = load %idata[%c0] : memref<?xindex>
|
|
|
|
// %j = load %idata[%c1] : memref<?xindex>
|
|
|
|
// %d = load %ddata[%c0] : memref<?xf64>
|
|
|
|
// .. process next nonzero element A[i][j] = d
|
|
|
|
// where the elements appear in lexicographic order ..
|
2020-10-07 03:15:36 +08:00
|
|
|
// }
|
2021-01-16 11:49:01 +08:00
|
|
|
// call @closeTensor(%tensor) : (!llvm.ptr<i8>) -> ()
|
2020-10-07 03:15:36 +08:00
|
|
|
//
|
|
|
|
//
|
2020-11-12 10:40:01 +08:00
|
|
|
// Note that input parameters in the "MLIRized" version of a function mimic
|
2021-02-10 08:22:22 +08:00
|
|
|
// the data layout of a MemRef<?xT> (but cannot use a direct struct). The
|
|
|
|
// output parameter uses a direct struct.
|
2020-11-12 10:40:01 +08:00
|
|
|
//
|
2020-10-07 03:15:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-02-10 08:22:22 +08:00
|
|
|
extern "C" {
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Reads in a sparse tensor with the given filename. The call yields a
|
|
|
|
/// pointer to an opaque memory-resident sparse tensor object that is only
|
|
|
|
/// understood by other methods in the sparse runtime support library. An
|
|
|
|
/// array parameter is used to pass the rank, the number of nonzero elements,
|
|
|
|
/// and the dimension sizes (one per rank).
|
2021-02-10 08:22:22 +08:00
|
|
|
void *openTensorC(char *filename, uint64_t *idata) {
|
2021-01-16 11:49:01 +08:00
|
|
|
// Open the file.
|
|
|
|
FILE *file = fopen(filename, "r");
|
|
|
|
if (!file) {
|
|
|
|
fprintf(stderr, "Cannot find %s\n", filename);
|
2020-10-07 03:15:36 +08:00
|
|
|
exit(1);
|
|
|
|
}
|
2021-01-16 11:49:01 +08:00
|
|
|
// Perform some file format dependent set up.
|
|
|
|
if (strstr(filename, ".mtx")) {
|
|
|
|
readMMEHeader(file, filename, idata);
|
|
|
|
} else if (strstr(filename, ".tns")) {
|
|
|
|
readExtFROSTTHeader(file, filename, idata);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Unknown format %s\n", filename);
|
2020-10-07 03:15:36 +08:00
|
|
|
exit(1);
|
|
|
|
}
|
2021-01-21 02:37:22 +08:00
|
|
|
// Prepare sparse tensor object with per-rank dimension sizes
|
|
|
|
// and the number of nonzeros as initial capacity.
|
2021-01-16 11:49:01 +08:00
|
|
|
uint64_t rank = idata[0];
|
|
|
|
uint64_t nnz = idata[1];
|
2021-01-21 02:37:22 +08:00
|
|
|
std::vector<uint64_t> indices(rank);
|
|
|
|
for (uint64_t r = 0; r < rank; r++)
|
|
|
|
indices[r] = idata[2 + r];
|
|
|
|
SparseTensor *tensor = new SparseTensor(indices, nnz);
|
|
|
|
// Read all nonzero elements.
|
2021-01-16 11:49:01 +08:00
|
|
|
for (uint64_t k = 0; k < nnz; k++) {
|
|
|
|
for (uint64_t r = 0; r < rank; r++) {
|
2021-01-21 02:37:22 +08:00
|
|
|
if (fscanf(file, "%" PRIu64, &indices[r]) != 1) {
|
2021-01-16 11:49:01 +08:00
|
|
|
fprintf(stderr, "Cannot find next index in %s\n", filename);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
indices[r]--; // 0-based index
|
|
|
|
}
|
2021-01-21 02:37:22 +08:00
|
|
|
double value;
|
2021-01-16 11:49:01 +08:00
|
|
|
if (fscanf(file, "%lg\n", &value) != 1) {
|
|
|
|
fprintf(stderr, "Cannot find next value in %s\n", filename);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
tensor->add(indices, value);
|
|
|
|
}
|
|
|
|
// Close the file and return sorted tensor.
|
|
|
|
fclose(file);
|
|
|
|
tensor->sort(); // sort lexicographically
|
|
|
|
return tensor;
|
2020-10-07 03:15:36 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// "MLIRized" version.
|
2021-02-10 08:22:22 +08:00
|
|
|
void *openTensor(char *filename, uint64_t *ibase, uint64_t *idata,
|
|
|
|
uint64_t ioff, uint64_t isize, uint64_t istride) {
|
2021-01-16 11:49:01 +08:00
|
|
|
assert(istride == 1);
|
|
|
|
return openTensorC(filename, idata + ioff);
|
2020-11-12 10:40:01 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Yields the next element from the given opaque sparse tensor object.
|
2021-02-10 08:22:22 +08:00
|
|
|
void readTensorItemC(void *tensor, uint64_t *idata, double *ddata) {
|
2021-01-16 11:49:01 +08:00
|
|
|
const Element &e = static_cast<SparseTensor *>(tensor)->next();
|
|
|
|
for (uint64_t r = 0, rank = e.indices.size(); r < rank; r++)
|
|
|
|
idata[r] = e.indices[r];
|
|
|
|
ddata[0] = e.value;
|
2020-10-07 03:15:36 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// "MLIRized" version.
|
2021-02-10 08:22:22 +08:00
|
|
|
void readTensorItem(void *tensor, uint64_t *ibase, uint64_t *idata,
|
|
|
|
uint64_t ioff, uint64_t isize, uint64_t istride,
|
|
|
|
double *dbase, double *ddata, uint64_t doff, uint64_t dsize,
|
|
|
|
uint64_t dstride) {
|
2021-01-16 11:49:01 +08:00
|
|
|
assert(istride == 1 && dstride == 1);
|
|
|
|
readTensorItemC(tensor, idata + ioff, ddata + doff);
|
2020-11-12 10:40:01 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Closes the given opaque sparse tensor object, releasing its memory
|
2021-02-10 08:22:22 +08:00
|
|
|
/// resources. After this call, the opaque object cannot be used anymore.
|
|
|
|
void closeTensor(void *tensor) { delete static_cast<SparseTensor *>(tensor); }
|
2020-10-07 03:15:36 +08:00
|
|
|
|
2021-01-16 11:49:01 +08:00
|
|
|
/// Helper method to read a sparse tensor filename from the environment,
|
|
|
|
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
|
2021-02-10 08:22:22 +08:00
|
|
|
char *getTensorFilename(uint64_t id) {
|
2020-10-07 03:15:36 +08:00
|
|
|
char var[80];
|
2021-01-16 11:49:01 +08:00
|
|
|
sprintf(var, "TENSOR%" PRIu64, id);
|
2020-10-07 03:15:36 +08:00
|
|
|
char *env = getenv(var);
|
|
|
|
return env;
|
|
|
|
}
|
2020-11-12 10:40:01 +08:00
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Public API of the sparse runtime support library that support an opaque
|
|
|
|
// implementation of a bufferized SparseTensor in MLIR. This could be replaced
|
|
|
|
// by actual codegen in MLIR.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2021-02-10 08:22:22 +08:00
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
// Cannot use templates with C linkage.
|
|
|
|
|
|
|
|
struct MemRef1DU64 {
|
|
|
|
const uint64_t *base;
|
|
|
|
const uint64_t *data;
|
|
|
|
uint64_t off;
|
|
|
|
uint64_t sizes[1];
|
|
|
|
uint64_t strides[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemRef1DU32 {
|
|
|
|
const uint32_t *base;
|
|
|
|
const uint32_t *data;
|
|
|
|
uint64_t off;
|
|
|
|
uint64_t sizes[1];
|
|
|
|
uint64_t strides[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemRef1DF64 {
|
|
|
|
const double *base;
|
|
|
|
const double *data;
|
|
|
|
uint64_t off;
|
|
|
|
uint64_t sizes[1];
|
|
|
|
uint64_t strides[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemRef1DF32 {
|
|
|
|
const float *base;
|
|
|
|
const float *data;
|
|
|
|
uint64_t off;
|
|
|
|
uint64_t sizes[1];
|
|
|
|
uint64_t strides[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
enum TypeEnum : uint64_t { kF64 = 0, kF32 = 1, kU64 = 2, kU32 = 3 };
|
2021-02-10 08:22:22 +08:00
|
|
|
|
|
|
|
void *newSparseTensor(char *filename, bool *abase, bool *adata, uint64_t aoff,
|
2021-02-18 08:47:33 +08:00
|
|
|
uint64_t asize, uint64_t astride, uint64_t ptrTp,
|
|
|
|
uint64_t indTp, uint64_t valTp) {
|
2021-02-10 08:22:22 +08:00
|
|
|
assert(astride == 1);
|
2021-02-18 08:47:33 +08:00
|
|
|
bool *sparsity = abase + aoff;
|
|
|
|
if (ptrTp == kU64 && indTp == kU64 && valTp == kF64)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint64_t, uint64_t, double>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU64 && indTp == kU64 && valTp == kF32)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint64_t, uint64_t, float>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU64 && indTp == kU32 && valTp == kF64)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint64_t, uint32_t, double>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU64 && indTp == kU32 && valTp == kF32)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint64_t, uint32_t, float>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU32 && indTp == kU64 && valTp == kF64)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint32_t, uint64_t, double>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU32 && indTp == kU64 && valTp == kF32)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint32_t, uint64_t, float>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU32 && indTp == kU32 && valTp == kF64)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint32_t, uint32_t, double>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
if (ptrTp == kU32 && indTp == kU32 && valTp == kF32)
|
2021-02-19 14:01:39 +08:00
|
|
|
return newSparseTensor<uint32_t, uint32_t, float>(filename, sparsity,
|
|
|
|
asize);
|
2021-02-18 08:47:33 +08:00
|
|
|
fputs("unsupported combination of types\n", stderr);
|
|
|
|
exit(1);
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t sparseDimSize(void *tensor, uint64_t d) {
|
2021-02-18 08:47:33 +08:00
|
|
|
return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
MemRef1DU64 sparsePointers64(void *tensor, uint64_t d) {
|
|
|
|
std::vector<uint64_t> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getPointers(&v, d);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
|
|
|
}
|
|
|
|
|
|
|
|
MemRef1DU32 sparsePointers32(void *tensor, uint64_t d) {
|
|
|
|
std::vector<uint32_t> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getPointers(&v, d);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
|
|
|
}
|
|
|
|
|
|
|
|
MemRef1DU64 sparseIndices64(void *tensor, uint64_t d) {
|
|
|
|
std::vector<uint64_t> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getIndices(&v, d);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
MemRef1DU32 sparseIndices32(void *tensor, uint64_t d) {
|
|
|
|
std::vector<uint32_t> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getIndices(&v, d);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
MemRef1DF64 sparseValuesF64(void *tensor) {
|
|
|
|
std::vector<double> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getValues(&v);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
2021-02-18 08:47:33 +08:00
|
|
|
MemRef1DF32 sparseValuesF32(void *tensor) {
|
|
|
|
std::vector<float> *v;
|
|
|
|
static_cast<SparseTensorStorageBase *>(tensor)->getValues(&v);
|
|
|
|
return {v->data(), v->data(), 0, {v->size()}, {1}};
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void delSparseTensor(void *tensor) {
|
2021-02-18 08:47:33 +08:00
|
|
|
delete static_cast<SparseTensorStorageBase *>(tensor);
|
2021-02-10 08:22:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
} // extern "C"
|
|
|
|
|
2020-11-12 10:40:01 +08:00
|
|
|
#endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
|