forked from mindspore-Ecosystem/mindspore
!44765 Move SparseTensors to sparse_tensor.py
Merge pull request !44765 from 杨林枫/move_sparse_tensor
This commit is contained in:
commit
ccc94b1f4b
|
@ -20,7 +20,8 @@ from __future__ import absolute_import
|
||||||
import ast
|
import ast
|
||||||
import math
|
import math
|
||||||
|
|
||||||
from mindspore import RowTensorInner, RowTensor, SparseTensor, COOTensor, CSRTensor, MapTensor
|
from mindspore import RowTensor, SparseTensor, COOTensor, CSRTensor, MapTensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops import functional as F, composite as C
|
from mindspore.ops import functional as F, composite as C
|
||||||
from mindspore.ops import Primitive
|
from mindspore.ops import Primitive
|
||||||
from mindspore.ops.composite import multitype_ops
|
from mindspore.ops.composite import multitype_ops
|
||||||
|
|
|
@ -17,11 +17,12 @@
|
||||||
"""standard_method"""
|
"""standard_method"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from mindspore import Tensor, CSRTensor, COOTensor, RowTensorInner
|
from mindspore import Tensor, CSRTensor, COOTensor
|
||||||
from mindspore import dtype as mstype
|
from mindspore import dtype as mstype
|
||||||
from mindspore._c_expression import Tensor as Tensor_
|
from mindspore._c_expression import Tensor as Tensor_
|
||||||
from mindspore.ops.function.sparse_func import sparse_add
|
from mindspore.ops.function.sparse_func import sparse_add
|
||||||
import mindspore.common._monad as monad
|
import mindspore.common._monad as monad
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
||||||
_count, _extend, _dict_clear, _haskey, _update, _fromkeys
|
_count, _extend, _dict_clear, _haskey, _update, _fromkeys
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,8 @@ from mindspore.parallel._utils import _get_global_rank, _get_device_num, _get_gr
|
||||||
from mindspore.communication.management import get_group_size, create_group
|
from mindspore.communication.management import get_group_size, create_group
|
||||||
from mindspore.nn.cell import Cell
|
from mindspore.nn.cell import Cell
|
||||||
from mindspore.nn import SequentialCell
|
from mindspore.nn import SequentialCell
|
||||||
from mindspore.common import Tensor, RowTensorInner
|
from mindspore.common import Tensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||||
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
||||||
from mindspore.ops import functional as F
|
from mindspore.ops import functional as F
|
||||||
|
|
|
@ -25,7 +25,8 @@ from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc,
|
||||||
from mindspore.common.dump import set_dump
|
from mindspore.common.dump import set_dump
|
||||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||||
from mindspore.common.seed import set_seed, get_seed
|
from mindspore.common.seed import set_seed, get_seed
|
||||||
from mindspore.common.tensor import Tensor, RowTensor, RowTensorInner, SparseTensor, COOTensor, CSRTensor, MapTensor
|
from mindspore.common.tensor import Tensor, MapTensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensor, RowTensorInner, SparseTensor, COOTensor, CSRTensor
|
||||||
from mindspore.common.mutable import mutable
|
from mindspore.common.mutable import mutable
|
||||||
from mindspore.common.jit_config import JitConfig
|
from mindspore.common.jit_config import JitConfig
|
||||||
|
|
||||||
|
@ -57,7 +58,7 @@ __all__ = [
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__.extend([
|
__all__.extend([
|
||||||
"Tensor", "RowTensor", "RowTensorInner", "SparseTensor", "COOTensor", "CSRTensor", "MapTensor", # tensor
|
"Tensor", "RowTensor", "SparseTensor", "COOTensor", "CSRTensor", "MapTensor", # tensor
|
||||||
"no_recursive", "ms_function", "ms_class", 'jit', 'jit_class', # api
|
"no_recursive", "ms_function", "ms_class", 'jit', 'jit_class', # api
|
||||||
"Parameter", "ParameterTuple", # parameter
|
"Parameter", "ParameterTuple", # parameter
|
||||||
"dtype",
|
"dtype",
|
||||||
|
|
|
@ -31,9 +31,9 @@ from mindspore import context
|
||||||
from mindspore import log as logger
|
from mindspore import log as logger
|
||||||
from mindspore._extends.remote import kernel_build_server
|
from mindspore._extends.remote import kernel_build_server
|
||||||
from mindspore.common.tensor import Tensor as PythonTensor
|
from mindspore.common.tensor import Tensor as PythonTensor
|
||||||
from mindspore.common.tensor import CSRTensor as PythonCSRTensor
|
from mindspore.common.sparse_tensor import CSRTensor as PythonCSRTensor
|
||||||
from mindspore.common.tensor import COOTensor as PythonCOOTensor
|
from mindspore.common.sparse_tensor import COOTensor as PythonCOOTensor
|
||||||
from mindspore.common.tensor import RowTensor as PythonRowTensor
|
from mindspore.common.sparse_tensor import RowTensor as PythonRowTensor
|
||||||
from mindspore._c_expression import GraphExecutor_, Tensor, MetaTensor, CSRTensor, RowTensor, COOTensor, \
|
from mindspore._c_expression import GraphExecutor_, Tensor, MetaTensor, CSRTensor, RowTensor, COOTensor, \
|
||||||
PyNativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
|
PyNativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
|
||||||
_ms_memory_recycle
|
_ms_memory_recycle
|
||||||
|
|
|
@ -0,0 +1,888 @@
|
||||||
|
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ============================================================================
|
||||||
|
"""SparseTensor implementation."""
|
||||||
|
from __future__ import absolute_import, annotations
|
||||||
|
|
||||||
|
__all__ = ['RowTensorInner', 'RowTensor', 'SparseTensor', 'COOTensor', 'CSRTensor']
|
||||||
|
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from mindspore import log as logger
|
||||||
|
from mindspore.common import dtype as mstype
|
||||||
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
||||||
|
from mindspore.common.tensor import Tensor
|
||||||
|
from mindspore._c_expression import COOTensor as COOTensor_
|
||||||
|
from mindspore._c_expression import CSRTensor as CSRTensor_
|
||||||
|
from mindspore._c_expression import RowTensor as RowTensor_
|
||||||
|
from mindspore._c_expression import Tensor as Tensor_
|
||||||
|
from mindspore._checkparam import Validator as validator
|
||||||
|
|
||||||
|
|
||||||
|
class RowTensorInner(RowTensor_):
|
||||||
|
"""
|
||||||
|
Implementation for RowTensor, for MindSpore developers only.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, indices=None, values=None, shape=None, row_tensor=None):
|
||||||
|
"""Init RowTensor"""
|
||||||
|
self.init_finished = False
|
||||||
|
# Directly init a RowTensor from another RowTensor
|
||||||
|
if row_tensor is not None:
|
||||||
|
if not isinstance(row_tensor, (RowTensor, RowTensor_)):
|
||||||
|
raise TypeError(f"Expect input `row_tensor` to be a RowTensor, but got {type(row_tensor)}")
|
||||||
|
if not (indices is None and values is None and shape is None):
|
||||||
|
raise TypeError("If input `row_tensor` is provided, `indices`, `values`, `shapes` should all be `None`")
|
||||||
|
RowTensor_.__init__(self, row_tensor)
|
||||||
|
# Init a RowTensor from indices, values and shape
|
||||||
|
else:
|
||||||
|
RowTensor_.__init__(self, indices, values, shape)
|
||||||
|
self.init_finished = True
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Avoid PyTest Segfault when RowTensor is not initialized."""
|
||||||
|
if self.init_finished:
|
||||||
|
return RowTensor_.__repr__(self)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indices(self):
|
||||||
|
"""Return RowTensor's indices."""
|
||||||
|
return Tensor(self._indices)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def values(self):
|
||||||
|
"""Return RowTensor's non-zero values."""
|
||||||
|
return Tensor(self._values)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dense_shape(self):
|
||||||
|
"""Return RowTensor's shape."""
|
||||||
|
return self._shape
|
||||||
|
|
||||||
|
|
||||||
|
class RowTensor(RowTensorInner):
|
||||||
|
"""
|
||||||
|
A sparse representation of a set of tensor slices at given indices.
|
||||||
|
|
||||||
|
An RowTensor is typically used to represent a subset of a larger
|
||||||
|
tensor dense of shape [L0, D1, .. , DN] where L0 >> D0.
|
||||||
|
|
||||||
|
The values in indices are the indices in the first dimension of the slices
|
||||||
|
that have been extracted from the larger tensor.
|
||||||
|
|
||||||
|
The dense tensor dense represented by an RowTensor slices has
|
||||||
|
`dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]`.
|
||||||
|
|
||||||
|
For example, if indices is [0], values is [[1, 2]], shape is
|
||||||
|
(3, 2), then the dense representation of the row tensor will be:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
[[1, 2],
|
||||||
|
[0, 0],
|
||||||
|
[0, 0]]
|
||||||
|
|
||||||
|
Note:
|
||||||
|
RowTensor is deprecated from version 2.0, and will be removed in future version.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
indices (Tensor): A 1-D integer Tensor of shape [D0]. Default: None.
|
||||||
|
values (Tensor): A Tensor of any dtype of shape [D0, D1, ..., Dn]. Default: None.
|
||||||
|
shape (tuple(int)): An integer tuple which contains the shape
|
||||||
|
of the corresponding dense tensor. Default: None.
|
||||||
|
row_tensor (RowTensor): A RowTensor object. Default: None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
RowTensor, composed of `indices`, `values`, and `shape`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> from mindspore import Tensor, RowTensor
|
||||||
|
>>> indices = Tensor([0])
|
||||||
|
>>> values = Tensor([[1, 2]], dtype=ms.float32)
|
||||||
|
>>> shape = (3, 2)
|
||||||
|
>>> x = RowTensor(indices, values, shape)
|
||||||
|
>>> print(x.values)
|
||||||
|
[[1. 2.]]
|
||||||
|
>>> print(x.indices)
|
||||||
|
[0]
|
||||||
|
>>> print(x.dense_shape)
|
||||||
|
(3, 2)
|
||||||
|
"""
|
||||||
|
def __init__(self, indices=None, values=None, shape=None, row_tensor=None):
|
||||||
|
"""Init RowTensor"""
|
||||||
|
logger.warning("'RowTensor' is deprecated from version 1.7 and will be removed in a future version.")
|
||||||
|
super().__init__(indices, values, shape, row_tensor)
|
||||||
|
|
||||||
|
|
||||||
|
class SparseTensor(COOTensor_):
|
||||||
|
"""
|
||||||
|
A sparse representation of a set of nonzero elements from a tensor at given indices.
|
||||||
|
|
||||||
|
SparseTensor can only be used in the `Cell`'s construct method.
|
||||||
|
|
||||||
|
For a tensor dense, its SparseTensor(indices, values, dense_shape) has
|
||||||
|
`dense[indices[i]] = values[i]`.
|
||||||
|
|
||||||
|
For example, if indices is [[0, 1], [1, 2]], values is [1, 2], dense_shape is
|
||||||
|
(3, 4), then the dense representation of the sparse tensor will be:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
[[0, 1, 0, 0],
|
||||||
|
[0, 0, 2, 0],
|
||||||
|
[0, 0, 0, 0]]
|
||||||
|
|
||||||
|
Note:
|
||||||
|
The interface is deprecated from version 1.7 and will be removed in a future version.
|
||||||
|
Please use 'COOTensor' instead.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
|
||||||
|
where N and ndims are the number of `values` and number of dimensions in
|
||||||
|
the SparseTensor, respectively.
|
||||||
|
values (Tensor): A 1-D tensor of any type and shape `[N]`, which
|
||||||
|
supplies the values for each element in `indices`.
|
||||||
|
shape (tuple(int)): A integer tuple of size `ndims`,
|
||||||
|
which specifies the shape of the sparse tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SparseTensor, composed of `indices`, `values`, and `shape`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> import mindspore.nn as nn
|
||||||
|
>>> from mindspore import Tensor, SparseTensor
|
||||||
|
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||||
|
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||||
|
>>> shape = (3, 4)
|
||||||
|
>>> x = SparseTensor(indices, values, shape)
|
||||||
|
>>> print(x.values)
|
||||||
|
[1. 2.]
|
||||||
|
>>> print(x.indices)
|
||||||
|
[[0 1]
|
||||||
|
[1 2]]
|
||||||
|
>>> print(x.shape)
|
||||||
|
(3, 4)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, indices, values, shape):
|
||||||
|
"""Init COOTensor."""
|
||||||
|
logger.warning("'SparseTensor' is deprecated from version 1.7 and will be removed in a future version. " +
|
||||||
|
"Please use 'COOTensor' instead.")
|
||||||
|
if not (isinstance(indices, Tensor) and isinstance(values, Tensor) and isinstance(shape, tuple)):
|
||||||
|
raise TypeError("Inputs must follow: COOTensor(indices, values, shape).")
|
||||||
|
COOTensor_.__init__(self, indices, values, shape)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indices(self):
|
||||||
|
return Tensor(self._indices)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def values(self):
|
||||||
|
return Tensor(self._values)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def shape(self):
|
||||||
|
return self._shape
|
||||||
|
|
||||||
|
|
||||||
|
class COOTensor(COOTensor_):
|
||||||
|
"""
|
||||||
|
A sparse representation of a set of nonzero elements from a tensor at given indices.
|
||||||
|
|
||||||
|
For a tensor dense, its COOTensor(indices, values, shape) has
|
||||||
|
`dense[indices[i]] = values[i]`.
|
||||||
|
|
||||||
|
For example, if indices is [[0, 1], [1, 2]], values is [1, 2], shape is
|
||||||
|
(3, 4), then the dense representation of the sparse tensor will be:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
[[0, 1, 0, 0],
|
||||||
|
[0, 0, 2, 0],
|
||||||
|
[0, 0, 0, 0]]
|
||||||
|
|
||||||
|
Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
|
||||||
|
and division (/). For details about operations supported by `COOTensor`, see
|
||||||
|
[operators](https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators).
|
||||||
|
|
||||||
|
Note:
|
||||||
|
This is an experimental feature and is subjected to change.
|
||||||
|
Currently, duplicate coordinates in the indices will not be coalesced.
|
||||||
|
If the indices contain out-of-bound values, the result will be undefined.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
|
||||||
|
where N and ndims are the number of `values` and number of dimensions in
|
||||||
|
the COOTensor, respectively. Currently, `ndims` must be 2.
|
||||||
|
Please make sure that the indices are in range of the given shape.
|
||||||
|
values (Tensor): A 1-D tensor of any type and shape `[N]`, which
|
||||||
|
supplies the values for each element in `indices`.
|
||||||
|
shape (tuple(int)): A integer tuple of size `ndims`,
|
||||||
|
which specifies the dense_shape of the sparse tensor.
|
||||||
|
coo_tensor (COOTensor): A COOTensor object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor, composed of `indices`, `values`, and `shape`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> import mindspore.nn as nn
|
||||||
|
>>> from mindspore import Tensor, COOTensor
|
||||||
|
>>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
|
||||||
|
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||||
|
>>> shape = (3, 4)
|
||||||
|
>>> x = COOTensor(indices, values, shape)
|
||||||
|
>>> print(x.values)
|
||||||
|
[1. 2.]
|
||||||
|
>>> print(x.indices)
|
||||||
|
[[0 1]
|
||||||
|
[1 2]]
|
||||||
|
>>> print(x.shape)
|
||||||
|
(3, 4)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, indices=None, values=None, shape=None, coo_tensor=None):
|
||||||
|
"""Init COOTensor"""
|
||||||
|
self.init_finished = False
|
||||||
|
# Directly init a COOTensor from another COOTensor
|
||||||
|
if coo_tensor is not None:
|
||||||
|
if not isinstance(coo_tensor, (COOTensor, COOTensor_)):
|
||||||
|
raise TypeError(f"Expect input `coo_tensor` to be a COOTensor, but got {type(coo_tensor)}")
|
||||||
|
if not (indices is None and values is None and shape is None):
|
||||||
|
raise TypeError("If input `coo_tensor` is provided, `indices`, `values`, `shapes` should all be `None`")
|
||||||
|
COOTensor_.__init__(self, coo_tensor)
|
||||||
|
# Init a COOTensor from indices, values and shape
|
||||||
|
else:
|
||||||
|
validator.check_coo_tensor_input(indices, values, shape)
|
||||||
|
validator.check_coo_tensor_shape(indices.shape, values.shape, shape)
|
||||||
|
validator.check_coo_tensor_dtype(indices.dtype)
|
||||||
|
indices = tensor_operator_registry.get('stop_gradient')(indices)
|
||||||
|
COOTensor_.__init__(self, indices, values, shape)
|
||||||
|
self.init_finished = True
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Avoid PyTest Segfault when COOTensor is not initialized."""
|
||||||
|
if self.init_finished:
|
||||||
|
return COOTensor_.__repr__(self)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def __neg__(self):
|
||||||
|
return COOTensor(self.indices, -self.values, self.shape)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, Tensor):
|
||||||
|
return tensor_operator_registry.get("tensor_scatter_add")(other, self.indices, self.values)
|
||||||
|
if isinstance(other, COOTensor):
|
||||||
|
return tensor_operator_registry.get('coo_add')(self, other, Tensor(0, self.values.dtype))
|
||||||
|
raise TypeError("COOTensor add with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, Tensor):
|
||||||
|
return tensor_operator_registry.get("tensor_scatter_add")(-other, self.indices, self.values)
|
||||||
|
if isinstance(other, COOTensor):
|
||||||
|
return tensor_operator_registry.get('coo_add')(
|
||||||
|
self, -other, Tensor(0, self.values.dtype))
|
||||||
|
raise TypeError("COOTensor subtract with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
def __mul__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, Tensor):
|
||||||
|
other_values = tensor_operator_registry.get("gather_nd")(other, self.indices)
|
||||||
|
return COOTensor(self.indices, self.values * other_values, self.shape)
|
||||||
|
raise TypeError("COOTensor multiply with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
def __div__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, Tensor):
|
||||||
|
logger.warning("For sparse divide, zero values in the dense tensor are ignored.")
|
||||||
|
other_values = tensor_operator_registry.get("gather_nd")(other, self.indices)
|
||||||
|
return COOTensor(self.indices, self.values / other_values, self.shape)
|
||||||
|
raise TypeError("COOTensor divide with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
def __truediv__(self, other):
|
||||||
|
return self.__div__(other)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indices(self) -> Tensor:
|
||||||
|
"""Return COOTensor's indices."""
|
||||||
|
return Tensor(self._indices)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def values(self) -> Tensor:
|
||||||
|
"""Return COOTensor's non-zero values."""
|
||||||
|
return Tensor(self._values)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def shape(self) -> Tuple[int, ...]:
|
||||||
|
"""Return COOTensor's shape."""
|
||||||
|
return self._shape
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dtype(self) -> mstype:
|
||||||
|
"""Return the dtype of the values of COOTensor (:class:`mindspore.dtype`)."""
|
||||||
|
return self._dtype
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> int:
|
||||||
|
"""Return the number of non-zero values."""
|
||||||
|
return self.values.size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def itemsize(self) -> int:
|
||||||
|
"""Return the length of one tensor element in bytes."""
|
||||||
|
return self.values.itemsize
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ndim(self) -> int:
|
||||||
|
"""Return the number of tensor dimensions."""
|
||||||
|
return len(self.shape)
|
||||||
|
|
||||||
|
def coalesce(self) -> COOTensor:
|
||||||
|
"""
|
||||||
|
Return the coalesced sparse tensor of the input.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU``
|
||||||
|
"""
|
||||||
|
shape = Tensor(self.shape)
|
||||||
|
res_indices, res_values, _ = tensor_operator_registry.get("coalesce")(self.indices.transpose(),
|
||||||
|
self.values, shape)
|
||||||
|
return COOTensor(res_indices.transpose(), res_values, self.shape)
|
||||||
|
|
||||||
|
def to_csr(self) -> CSRTensor:
|
||||||
|
"""
|
||||||
|
Converts COOTensor to CSRTensor.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CSRTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
row_indices = self.indices[:, 0]
|
||||||
|
col_indices = self.indices[:, 1]
|
||||||
|
idx_dtype = self.indices.dtype
|
||||||
|
row_indices, sort_idx = tensor_operator_registry.get("sort")(
|
||||||
|
row_indices.astype(mstype.float32))
|
||||||
|
row_indices = row_indices.astype(idx_dtype)
|
||||||
|
col_indices = col_indices[sort_idx]
|
||||||
|
values = self.values[sort_idx]
|
||||||
|
indptr = tensor_operator_registry.get("coo2csr")(row_indices, self.shape[0])
|
||||||
|
return CSRTensor(indptr, col_indices, values, self.shape)
|
||||||
|
|
||||||
|
def to_dense(self) -> Tensor:
|
||||||
|
"""
|
||||||
|
Converts COOTensor to Dense Tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU``
|
||||||
|
"""
|
||||||
|
zeros_tensor = tensor_operator_registry.get("zeros")(self.shape, self.values.dtype)
|
||||||
|
return tensor_operator_registry.get("tensor_scatter_add")(
|
||||||
|
zeros_tensor, self.indices, self.values)
|
||||||
|
|
||||||
|
def astype(self, dtype: mstype) -> COOTensor:
|
||||||
|
"""
|
||||||
|
Return a copy of the COOTensor, cast its values to a specified type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> from mindspore import Tensor, COOTensor
|
||||||
|
>>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
|
||||||
|
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||||
|
>>> shape = (3, 4)
|
||||||
|
>>> coo_tensor = COOTensor(indices, values, shape)
|
||||||
|
>>> print(coo_tensor.astype(ms.float64).dtype)
|
||||||
|
Float64
|
||||||
|
"""
|
||||||
|
data = self.values.astype(dtype)
|
||||||
|
return COOTensor(self.indices, data, self.shape)
|
||||||
|
|
||||||
|
def to_tuple(self) -> Tuple[Tensor, Tensor, Tuple[int, ...]]:
|
||||||
|
"""
|
||||||
|
Return indices, values and shape as a tuple.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
return self.indices, self.values, self.shape
|
||||||
|
|
||||||
|
def abs(self) -> COOTensor:
|
||||||
|
"""
|
||||||
|
Return absolute value element-wisely.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
data = self.values.abs()
|
||||||
|
return COOTensor(self.indices, data, self.shape)
|
||||||
|
|
||||||
|
def add(self, other: COOTensor, thresh: Tensor) -> COOTensor:
|
||||||
|
"""
|
||||||
|
Return the sum with another COOTensor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
other(COOTensor): the second SparseTensor to sum.
|
||||||
|
thresh(Tensor): the magnitude threshold that determines
|
||||||
|
if an output value/index pair pair take space.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor, representing the sum.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If any input(self/other)'s indices's dim is not equal to 2.
|
||||||
|
ValueError: If any input(self/other)'s values's dim is not equal to 1.
|
||||||
|
ValueError: If any input(self/other)'s shape's dim is not equal to 1.
|
||||||
|
ValueError: If thresh's dim is not equal to 0.
|
||||||
|
TypeError: If any input(self/other)'s indices's type is not equal to int64.
|
||||||
|
TypeError: If any input(self/other)'s shape's type is not equal to int64.
|
||||||
|
ValueError: If any input(self/other)'s indices's length is not equal to
|
||||||
|
its values's length.
|
||||||
|
TypeError: If any input(self/other)'s values's type is not equal to anf of
|
||||||
|
(int8/int16/int32/int64/float32/float64/complex64/complex128)
|
||||||
|
TypeError: If thresh's type is not equal to anf of
|
||||||
|
(int8/int16/int32/int64/float32/float64)
|
||||||
|
TypeError: If self's indices's type is not equal to other's indices's type
|
||||||
|
TypeError: If self's values's type is not equal to other's values's type
|
||||||
|
TypeError: If self's shape's type is not equal to other's shape's type
|
||||||
|
TypeError: If (self/other)'s value's type is not matched with thresh's type
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``CPU`` ``GPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from mindspore import Tensor, COOTensor
|
||||||
|
>>> from mindspore import dtype as mstype
|
||||||
|
>>> indics0 = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||||
|
>>> values0 = Tensor([1, 2], dtype=mstype.int32)
|
||||||
|
>>> shape0 = (3, 4)
|
||||||
|
>>> input0 = COOTensor(indics0, values0, shape0)
|
||||||
|
>>> indics1 = Tensor([[0, 0], [1, 1]], dtype=mstype.int64)
|
||||||
|
>>> values1 = Tensor([3, 4], dtype=mstype.int32)
|
||||||
|
>>> shape1 = (3, 4)
|
||||||
|
>>> input1 = COOTensor(indics1, values1, shape1)
|
||||||
|
>>> thres = Tensor(0, dtype=mstype.int32)
|
||||||
|
>>> out = input0.add(input1, thres)
|
||||||
|
>>> print(out)
|
||||||
|
COOTensor(shape = [3, 4], dtype = Int32, indices=Tensor(shape=[4, 2],
|
||||||
|
dtype = Int64, value=[[0 0], [0 1], [1 1], [1 2]]), values=Tensor(shape[4],
|
||||||
|
dtype=Int32, value=[3 1 4 2]))
|
||||||
|
"""
|
||||||
|
return tensor_operator_registry.get('coo_add')(self, other, thresh)
|
||||||
|
|
||||||
|
|
||||||
|
class CSRTensor(CSRTensor_):
|
||||||
|
"""
|
||||||
|
Constructs a sparse tensor in CSR (Compressed Sparse Row) format, with specified
|
||||||
|
values indicated by `values` and row and column positions indicated by `indptr`
|
||||||
|
and `indices`.
|
||||||
|
|
||||||
|
For example, if indptr is [0, 1, 2, 2], indices is [1, 2], values is [1., 2.], shape is
|
||||||
|
(3, 4), then the dense representation of the sparse tensor will be:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
[[0., 1., 0., 0.],
|
||||||
|
[0., 0., 2., 0.],
|
||||||
|
[0., 0., 0., 0.]]
|
||||||
|
|
||||||
|
Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
|
||||||
|
and division (/). For details about operations supported by `CSRTensor`, see
|
||||||
|
[operators](https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators).
|
||||||
|
|
||||||
|
Note:
|
||||||
|
This is an experimental feature and is subjected to change.
|
||||||
|
If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
|
||||||
|
when the length of `values` or `indices` exceeds the range indicated by indptr, and when the columns indicated by
|
||||||
|
`indices` are repeated on the same row.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
indptr (Tensor): 1-D Tensor of shape `[M]`, which equals to `shape[0] + 1`, which indicates the
|
||||||
|
start and end point for `values` in each row. Default: None. If provided,
|
||||||
|
must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
|
||||||
|
indices (Tensor): 1-D Tensor of shape `[N]`, which has the same length as `values`. `indices`
|
||||||
|
indicates the which column `values` should be placed. Default: None. If provided,
|
||||||
|
must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
|
||||||
|
values (Tensor): Tensor, which has the same length as `indices` (values.shape[0] == indices.shape[0]).
|
||||||
|
`values` stores the data for CSRTensor. Default: None.
|
||||||
|
shape (tuple(int)): A tuple indicates the shape of the CSRTensor, and `shape[0]` must equal to `M - 1`,
|
||||||
|
which all equal to number of rows of the CSRTensor. Default: None.
|
||||||
|
csr_tensor (CSRTensor): A CSRTensor object. Values' feature dimension should match with
|
||||||
|
CSRTensor's feature dimension (values.shape[1:] == csr_tensor.shape[2:]). Default: None.
|
||||||
|
|
||||||
|
Outputs:
|
||||||
|
CSRTensor, with shape defined by `shape`, and dtype inferred from `value`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> # initialize a csr_tensor with indptr, indices, values and shape
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=ms.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=ms.int32)
|
||||||
|
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||||
|
>>> shape = (2, 4)
|
||||||
|
>>> csr_tensor = CSRTensor(indptr, indices, values, shape)
|
||||||
|
>>> # access a data member of CSRTensor
|
||||||
|
>>> print(indptr == csr_tensor.indptr)
|
||||||
|
[ True True True]
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, indptr=None, indices=None, values=None, shape=None, csr_tensor=None):
|
||||||
|
"Init CSRTensor"
|
||||||
|
self.init_finished = False
|
||||||
|
# Directly init a CSRTensor from another CSRTensor
|
||||||
|
if csr_tensor is not None:
|
||||||
|
if not isinstance(csr_tensor, (CSRTensor, CSRTensor_)):
|
||||||
|
raise TypeError(f"Expect input `csr_tensor` to be a CSRTensor, but got {type(csr_tensor)}")
|
||||||
|
if not (indptr is None and indices is None and values is None and shape is None):
|
||||||
|
raise TypeError(
|
||||||
|
"If input `csr_tensor` is provided, `indptr`, `indices`, `values`, `shapes` should all be `None`")
|
||||||
|
CSRTensor_.__init__(self, csr_tensor)
|
||||||
|
# Init a CSRTensor from indptr, indices, values and shape
|
||||||
|
else:
|
||||||
|
validator.check_csr_tensor_input(indptr, indices, values, shape)
|
||||||
|
validator.check_csr_tensor_shape(indptr.shape, indices.shape, values.shape, shape)
|
||||||
|
validator.check_csr_tensor_dtype(indptr.dtype, indices.dtype)
|
||||||
|
indptr = tensor_operator_registry.get('stop_gradient')(indptr)
|
||||||
|
indices = tensor_operator_registry.get('stop_gradient')(indices)
|
||||||
|
CSRTensor_.__init__(self, indptr, indices, values, shape)
|
||||||
|
self.init_finished = True
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Avoid PyTest Segfault when CSRTensor is not initialized."""
|
||||||
|
if self.init_finished:
|
||||||
|
return CSRTensor_.__repr__(self)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def __mul__(self, other):
|
||||||
|
res = tensor_operator_registry.get('csr_mul')(self, other)
|
||||||
|
return CSRTensor(self.indptr, self.indices, res, self.shape)
|
||||||
|
|
||||||
|
def __div__(self, other):
|
||||||
|
logger.warning("For CSR divide, zero values in the dense tensor are ignored.")
|
||||||
|
res = tensor_operator_registry.get('csr_div')(self, other)
|
||||||
|
return CSRTensor(self.indptr, self.indices, res, self.shape)
|
||||||
|
|
||||||
|
def __truediv__(self, other):
|
||||||
|
return self.__div__(other)
|
||||||
|
|
||||||
|
def __neg__(self):
|
||||||
|
return CSRTensor(self.indptr, self.indices, -self.values, self.shape)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, CSRTensor):
|
||||||
|
return tensor_operator_registry.get('csr_add')(
|
||||||
|
self, other, Tensor(1, self.values.dtype), Tensor(1, self.values.dtype))
|
||||||
|
raise TypeError("CSRTensor add with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
if not self.shape == other.shape:
|
||||||
|
raise ValueError("Input tensors should have the same shape.")
|
||||||
|
if isinstance(other, CSRTensor):
|
||||||
|
return tensor_operator_registry.get('csr_add')(
|
||||||
|
self, other, Tensor(1, self.values.dtype), Tensor(-1, self.values.dtype))
|
||||||
|
raise TypeError("CSRTensor subtract with %s is not supported." % type(other))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indptr(self) -> Tensor:
|
||||||
|
"""Return CSRTensor's row indices pointers."""
|
||||||
|
return Tensor(self._indptr)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def indices(self) -> Tensor:
|
||||||
|
"""Return CSRTensor's column indices."""
|
||||||
|
return Tensor(self._indices)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def values(self) -> Tensor:
|
||||||
|
"""Return CSRTensor's non-zero values."""
|
||||||
|
return Tensor(self._values)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def shape(self) -> Tuple[int, ...]:
|
||||||
|
"""Return CSRTensor's shape."""
|
||||||
|
return self._shape
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dtype(self) -> mstype:
|
||||||
|
"""Return the dtype of the values of CSRTensor (:class:`mindspore.dtype`)."""
|
||||||
|
return self._dtype
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> int:
|
||||||
|
"""Return the number of non-zero values."""
|
||||||
|
return self.values.size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def itemsize(self) -> int:
|
||||||
|
"""Return the length of one tensor element in bytes."""
|
||||||
|
return self.values.itemsize
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ndim(self) -> int:
|
||||||
|
"""Return the number of tensor dimensions."""
|
||||||
|
return len(self.shape)
|
||||||
|
|
||||||
|
def to_tuple(self) -> Tuple[Tensor, Tensor, Tensor, Tuple[int, ...]]:
|
||||||
|
"""
|
||||||
|
Return indptr, indices, values and shape as a tuple.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
return self.indptr, self.indices, self.values, self.shape
|
||||||
|
|
||||||
|
def to_coo(self) -> COOTensor:
|
||||||
|
"""
|
||||||
|
Converts CSRTensor to COOTensor.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
COOTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
if self.ndim != 2:
|
||||||
|
raise ValueError("Currently only support 2-D CSRTensor when converting to COOTensor.")
|
||||||
|
row_indices = tensor_operator_registry.get("csr2coo")(self.indptr, self.values.shape[0])
|
||||||
|
coo_indices = tensor_operator_registry.get("stack")((row_indices, self.indices), 1)
|
||||||
|
return COOTensor(coo_indices, self.values, self.shape)
|
||||||
|
|
||||||
|
def to_dense(self) -> Tensor:
|
||||||
|
"""
|
||||||
|
Converts CSRTensor to Dense Tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU``
|
||||||
|
"""
|
||||||
|
return tensor_operator_registry.get("csr_to_dense")(self)
|
||||||
|
|
||||||
|
def astype(self, dtype: mstype) -> CSRTensor:
|
||||||
|
"""
|
||||||
|
Return a copy of the CSRTensor, cast its values to a specified type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CSRTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> import mindspore as ms
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=ms.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=ms.int32)
|
||||||
|
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||||
|
>>> shape = (2, 4)
|
||||||
|
>>> csr_tensor = CSRTensor(indptr, indices, values, shape)
|
||||||
|
>>> print(csr_tensor.astype(ms.float64).dtype)
|
||||||
|
Float64
|
||||||
|
"""
|
||||||
|
data = self.values.astype(dtype)
|
||||||
|
return CSRTensor(self.indptr, self.indices, data, self.shape)
|
||||||
|
|
||||||
|
def mv(self, dense_vector: Tensor) -> Tensor:
|
||||||
|
"""
|
||||||
|
Return the matrix multiplication result of the right-multiply dense matrix of the CSRTensor.
|
||||||
|
The CSRTensor with shape `[M, N]` needs to adapt the dense vector with shape `[N, 1]`
|
||||||
|
to get the dense vector with result `[M, 1]`.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dense_vector (Tensor): A dense Tensor, its shape must be (csr_tensor.shape[1], 1)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> from mindspore import dtype as mstype
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
||||||
|
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
||||||
|
>>> dense_shape = (2, 4)
|
||||||
|
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
||||||
|
>>> dense = Tensor([[1], [1], [1], [1]], dtype=mstype.float32)
|
||||||
|
>>> print(csr_tensor.mv(dense))
|
||||||
|
[[2.]
|
||||||
|
[1.]]
|
||||||
|
"""
|
||||||
|
validator.check_value_type('dense_vector', dense_vector, (Tensor_,), 'CSRTensor.mv')
|
||||||
|
return tensor_operator_registry.get("csr_mv")(self, dense_vector)
|
||||||
|
|
||||||
|
def mm(self, dense_matrix: Tensor) -> Tensor:
|
||||||
|
"""
|
||||||
|
Return the matrix multiplication result of the right-multiply dense matrix of the CSRTensor.
|
||||||
|
The CSRTensor with shape `[M, N]` needs to adapt the dense matrix with shape `[N, K]`
|
||||||
|
to get the dense matrix with result `[M, K]`.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dense_matrix (Tensor): A dense Tensor, its shape[0] should be equal to csr_tensor.shape[1]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> from mindspore import dtype as mstype
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
||||||
|
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
||||||
|
>>> dense_shape = (2, 4)
|
||||||
|
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
||||||
|
>>> dense_matrix = Tensor([[1., 2.], [1, 2.], [1, 2.], [1., 2.]], dtype=mstype.float32)
|
||||||
|
>>> print(csr_tensor.mm(dense_matrix))
|
||||||
|
[[2. 4.]
|
||||||
|
[1. 2.]]
|
||||||
|
"""
|
||||||
|
validator.check_value_type('dense_matrix', dense_matrix, (Tensor_,), 'CSRTensor.mm')
|
||||||
|
return tensor_operator_registry.get("csr_mm")()(self.indptr, self.indices, self.values,
|
||||||
|
self.shape, dense_matrix)
|
||||||
|
|
||||||
|
def sum(self, axis: int) -> Tensor:
|
||||||
|
"""
|
||||||
|
Reduces a dimension of a CSRTensor by summing all elements in the dimension.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
axis (int): The dimensions to reduce.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor, the dtype is the same as `CSRTensor.values`.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> from mindspore import dtype as mstype
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
||||||
|
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
||||||
|
>>> dense_shape = (2, 4)
|
||||||
|
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
||||||
|
>>> print(csr_tensor.sum(1))
|
||||||
|
[[2.]
|
||||||
|
[1.]]
|
||||||
|
"""
|
||||||
|
return tensor_operator_registry.get("csr_reduce_sum")(self, axis)
|
||||||
|
|
||||||
|
def abs(self) -> CSRTensor:
|
||||||
|
"""
|
||||||
|
Return absolute value element-wisely.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CSRTensor, with all values being non-negative.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
"""
|
||||||
|
data = self.values.abs()
|
||||||
|
return CSRTensor(self.indptr, self.indices, data, self.shape)
|
||||||
|
|
||||||
|
def add(self, b: CSRTensor, alpha: Tensor, beta: Tensor) -> CSRTensor:
|
||||||
|
"""
|
||||||
|
Addition of two CSR Tensors : C = alpha * A + beta * B
|
||||||
|
|
||||||
|
Args:
|
||||||
|
b (CSRTensor): Sparse CSR Tensor.
|
||||||
|
alpha(Tensor): Dense Tensor, its shape must be able to broadcast to self.
|
||||||
|
beta(Tensor): Dense Tensor, its shape must be able to broadcast to b.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CSRTensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from mindspore import Tensor, CSRTensor
|
||||||
|
>>> import mindspore.common.dtype as mstype
|
||||||
|
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
||||||
|
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
||||||
|
>>> values_a = Tensor([2, 1], dtype=mstype.float32)
|
||||||
|
>>> values_b = Tensor([1, 2], dtype=mstype.float32)
|
||||||
|
>>> dense_shape = (2, 4)
|
||||||
|
>>> alpha = Tensor(1, mstype.float32)
|
||||||
|
>>> beta = Tensor(1, mstype.float32)
|
||||||
|
>>> a = CSRTensor(indptr, indices, values_a, dense_shape)
|
||||||
|
>>> b = CSRTensor(indptr, indices, values_b, dense_shape)
|
||||||
|
>>> print(a.add(b, alpha, beta))
|
||||||
|
CSRTensor(shape=[2,4], dtype=Float32,
|
||||||
|
indptr=Tensor(shape=[3], dtype=Int32, value = [0, 1, 2]),
|
||||||
|
indices=Tensor(shape=[2], dtype=Int32, value = [0, 1]),
|
||||||
|
values=Tensor(shape=[2], dtype=Float32, value = [3.0, 3.0]))
|
||||||
|
"""
|
||||||
|
return tensor_operator_registry.get('csr_add')(self, b, alpha, beta)
|
||||||
|
|
|
@ -13,13 +13,11 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
"""Tensor implementation."""
|
"""Tensor implementation."""
|
||||||
from __future__ import absolute_import, annotations
|
|
||||||
|
|
||||||
__all__ = ['Tensor', 'RowTensorInner', 'RowTensor', 'SparseTensor', 'COOTensor', 'CSRTensor', 'MapTensor']
|
__all__ = ['Tensor', 'MapTensor']
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import numbers
|
import numbers
|
||||||
from typing import Tuple
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from mindspore.communication.management import get_rank, get_group_size
|
from mindspore.communication.management import get_rank, get_group_size
|
||||||
|
@ -30,9 +28,6 @@ from mindspore import log as logger
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common._utils import split_to_slice_if_need
|
from mindspore.common._utils import split_to_slice_if_need
|
||||||
from mindspore.common._register_for_tensor import tensor_operator_registry
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
||||||
from mindspore._c_expression import COOTensor as COOTensor_
|
|
||||||
from mindspore._c_expression import CSRTensor as CSRTensor_
|
|
||||||
from mindspore._c_expression import RowTensor as RowTensor_
|
|
||||||
from mindspore._c_expression import Tensor as Tensor_
|
from mindspore._c_expression import Tensor as Tensor_
|
||||||
from mindspore._c_expression import MapTensor_
|
from mindspore._c_expression import MapTensor_
|
||||||
from mindspore._checkparam import Rel, check_is_number
|
from mindspore._checkparam import Rel, check_is_number
|
||||||
|
@ -4164,863 +4159,6 @@ class Tensor(Tensor_):
|
||||||
return tensor_operator_registry.get('ne')(self, other)
|
return tensor_operator_registry.get('ne')(self, other)
|
||||||
|
|
||||||
|
|
||||||
class RowTensorInner(RowTensor_):
|
|
||||||
"""
|
|
||||||
Implementation for RowTensor, for MindSpore developers only.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, indices=None, values=None, shape=None, row_tensor=None):
|
|
||||||
"""Init RowTensor"""
|
|
||||||
self.init_finished = False
|
|
||||||
# Directly init a RowTensor from another RowTensor
|
|
||||||
if row_tensor is not None:
|
|
||||||
if not isinstance(row_tensor, (RowTensor, RowTensor_)):
|
|
||||||
raise TypeError(f"Expect input `row_tensor` to be a RowTensor, but got {type(row_tensor)}")
|
|
||||||
if not (indices is None and values is None and shape is None):
|
|
||||||
raise TypeError("If input `row_tensor` is provided, `indices`, `values`, `shapes` should all be `None`")
|
|
||||||
RowTensor_.__init__(self, row_tensor)
|
|
||||||
# Init a RowTensor from indices, values and shape
|
|
||||||
else:
|
|
||||||
RowTensor_.__init__(self, indices, values, shape)
|
|
||||||
self.init_finished = True
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
"""Avoid PyTest Segfault when RowTensor is not initialized."""
|
|
||||||
if self.init_finished:
|
|
||||||
return RowTensor_.__repr__(self)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
@property
|
|
||||||
def indices(self):
|
|
||||||
"""Return RowTensor's indices."""
|
|
||||||
return Tensor(self._indices)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def values(self):
|
|
||||||
"""Return RowTensor's non-zero values."""
|
|
||||||
return Tensor(self._values)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dense_shape(self):
|
|
||||||
"""Return RowTensor's shape."""
|
|
||||||
return self._shape
|
|
||||||
|
|
||||||
|
|
||||||
class RowTensor(RowTensorInner):
|
|
||||||
"""
|
|
||||||
A sparse representation of a set of tensor slices at given indices.
|
|
||||||
|
|
||||||
An RowTensor is typically used to represent a subset of a larger
|
|
||||||
tensor dense of shape [L0, D1, .. , DN] where L0 >> D0.
|
|
||||||
|
|
||||||
The values in indices are the indices in the first dimension of the slices
|
|
||||||
that have been extracted from the larger tensor.
|
|
||||||
|
|
||||||
The dense tensor dense represented by an RowTensor slices has
|
|
||||||
`dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]`.
|
|
||||||
|
|
||||||
For example, if indices is [0], values is [[1, 2]], shape is
|
|
||||||
(3, 2), then the dense representation of the row tensor will be:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[[1, 2],
|
|
||||||
[0, 0],
|
|
||||||
[0, 0]]
|
|
||||||
|
|
||||||
Note:
|
|
||||||
RowTensor is deprecated from version 2.0, and will be removed in future version.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
indices (Tensor): A 1-D integer Tensor of shape [D0]. Default: None.
|
|
||||||
values (Tensor): A Tensor of any dtype of shape [D0, D1, ..., Dn]. Default: None.
|
|
||||||
shape (tuple(int)): An integer tuple which contains the shape
|
|
||||||
of the corresponding dense tensor. Default: None.
|
|
||||||
row_tensor (RowTensor): A RowTensor object. Default: None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
RowTensor, composed of `indices`, `values`, and `shape`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> from mindspore import Tensor, RowTensor
|
|
||||||
>>> indices = Tensor([0])
|
|
||||||
>>> values = Tensor([[1, 2]], dtype=ms.float32)
|
|
||||||
>>> shape = (3, 2)
|
|
||||||
>>> x = RowTensor(indices, values, shape)
|
|
||||||
>>> print(x.values)
|
|
||||||
[[1. 2.]]
|
|
||||||
>>> print(x.indices)
|
|
||||||
[0]
|
|
||||||
>>> print(x.dense_shape)
|
|
||||||
(3, 2)
|
|
||||||
"""
|
|
||||||
def __init__(self, indices=None, values=None, shape=None, row_tensor=None):
|
|
||||||
"""Init RowTensor"""
|
|
||||||
logger.warning("'RowTensor' is deprecated from version 1.7 and will be removed in a future version.")
|
|
||||||
super().__init__(indices, values, shape, row_tensor)
|
|
||||||
|
|
||||||
|
|
||||||
class SparseTensor(COOTensor_):
|
|
||||||
"""
|
|
||||||
A sparse representation of a set of nonzero elements from a tensor at given indices.
|
|
||||||
|
|
||||||
SparseTensor can only be used in the `Cell`'s construct method.
|
|
||||||
|
|
||||||
For a tensor dense, its SparseTensor(indices, values, dense_shape) has
|
|
||||||
`dense[indices[i]] = values[i]`.
|
|
||||||
|
|
||||||
For example, if indices is [[0, 1], [1, 2]], values is [1, 2], dense_shape is
|
|
||||||
(3, 4), then the dense representation of the sparse tensor will be:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[[0, 1, 0, 0],
|
|
||||||
[0, 0, 2, 0],
|
|
||||||
[0, 0, 0, 0]]
|
|
||||||
|
|
||||||
Note:
|
|
||||||
The interface is deprecated from version 1.7 and will be removed in a future version.
|
|
||||||
Please use 'COOTensor' instead.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
|
|
||||||
where N and ndims are the number of `values` and number of dimensions in
|
|
||||||
the SparseTensor, respectively.
|
|
||||||
values (Tensor): A 1-D tensor of any type and shape `[N]`, which
|
|
||||||
supplies the values for each element in `indices`.
|
|
||||||
shape (tuple(int)): A integer tuple of size `ndims`,
|
|
||||||
which specifies the shape of the sparse tensor.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
SparseTensor, composed of `indices`, `values`, and `shape`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> import mindspore.nn as nn
|
|
||||||
>>> from mindspore import Tensor, SparseTensor
|
|
||||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
|
||||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
|
||||||
>>> shape = (3, 4)
|
|
||||||
>>> x = SparseTensor(indices, values, shape)
|
|
||||||
>>> print(x.values)
|
|
||||||
[1. 2.]
|
|
||||||
>>> print(x.indices)
|
|
||||||
[[0 1]
|
|
||||||
[1 2]]
|
|
||||||
>>> print(x.shape)
|
|
||||||
(3, 4)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, indices, values, shape):
|
|
||||||
"""Init COOTensor."""
|
|
||||||
logger.warning("'SparseTensor' is deprecated from version 1.7 and will be removed in a future version. " +
|
|
||||||
"Please use 'COOTensor' instead.")
|
|
||||||
if not (isinstance(indices, Tensor) and isinstance(values, Tensor) and isinstance(shape, tuple)):
|
|
||||||
raise TypeError("Inputs must follow: COOTensor(indices, values, shape).")
|
|
||||||
COOTensor_.__init__(self, indices, values, shape)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def indices(self):
|
|
||||||
return Tensor(self._indices)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def values(self):
|
|
||||||
return Tensor(self._values)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def shape(self):
|
|
||||||
return self._shape
|
|
||||||
|
|
||||||
|
|
||||||
class COOTensor(COOTensor_):
|
|
||||||
"""
|
|
||||||
A sparse representation of a set of nonzero elements from a tensor at given indices.
|
|
||||||
|
|
||||||
For a tensor dense, its COOTensor(indices, values, shape) has
|
|
||||||
`dense[indices[i]] = values[i]`.
|
|
||||||
|
|
||||||
For example, if indices is [[0, 1], [1, 2]], values is [1, 2], shape is
|
|
||||||
(3, 4), then the dense representation of the sparse tensor will be:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[[0, 1, 0, 0],
|
|
||||||
[0, 0, 2, 0],
|
|
||||||
[0, 0, 0, 0]]
|
|
||||||
|
|
||||||
Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
|
|
||||||
and division (/). For details about operations supported by `COOTensor`, see
|
|
||||||
[operators](https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators).
|
|
||||||
|
|
||||||
Note:
|
|
||||||
This is an experimental feature and is subjected to change.
|
|
||||||
Currently, duplicate coordinates in the indices will not be coalesced.
|
|
||||||
If the indices contain out-of-bound values, the result will be undefined.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
indices (Tensor): A 2-D integer Tensor of shape `[N, ndims]`,
|
|
||||||
where N and ndims are the number of `values` and number of dimensions in
|
|
||||||
the COOTensor, respectively. Currently, `ndims` must be 2.
|
|
||||||
Please make sure that the indices are in range of the given shape.
|
|
||||||
values (Tensor): A 1-D tensor of any type and shape `[N]`, which
|
|
||||||
supplies the values for each element in `indices`.
|
|
||||||
shape (tuple(int)): A integer tuple of size `ndims`,
|
|
||||||
which specifies the dense_shape of the sparse tensor.
|
|
||||||
coo_tensor (COOTensor): A COOTensor object.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor, composed of `indices`, `values`, and `shape`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> import mindspore.nn as nn
|
|
||||||
>>> from mindspore import Tensor, COOTensor
|
|
||||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
|
|
||||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
|
||||||
>>> shape = (3, 4)
|
|
||||||
>>> x = COOTensor(indices, values, shape)
|
|
||||||
>>> print(x.values)
|
|
||||||
[1. 2.]
|
|
||||||
>>> print(x.indices)
|
|
||||||
[[0 1]
|
|
||||||
[1 2]]
|
|
||||||
>>> print(x.shape)
|
|
||||||
(3, 4)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, indices=None, values=None, shape=None, coo_tensor=None):
|
|
||||||
"""Init COOTensor"""
|
|
||||||
self.init_finished = False
|
|
||||||
# Directly init a COOTensor from another COOTensor
|
|
||||||
if coo_tensor is not None:
|
|
||||||
if not isinstance(coo_tensor, (COOTensor, COOTensor_)):
|
|
||||||
raise TypeError(f"Expect input `coo_tensor` to be a COOTensor, but got {type(coo_tensor)}")
|
|
||||||
if not (indices is None and values is None and shape is None):
|
|
||||||
raise TypeError("If input `coo_tensor` is provided, `indices`, `values`, `shapes` should all be `None`")
|
|
||||||
COOTensor_.__init__(self, coo_tensor)
|
|
||||||
# Init a COOTensor from indices, values and shape
|
|
||||||
else:
|
|
||||||
validator.check_coo_tensor_input(indices, values, shape)
|
|
||||||
validator.check_coo_tensor_shape(indices.shape, values.shape, shape)
|
|
||||||
validator.check_coo_tensor_dtype(indices.dtype)
|
|
||||||
indices = tensor_operator_registry.get('stop_gradient')(indices)
|
|
||||||
COOTensor_.__init__(self, indices, values, shape)
|
|
||||||
self.init_finished = True
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
"""Avoid PyTest Segfault when COOTensor is not initialized."""
|
|
||||||
if self.init_finished:
|
|
||||||
return COOTensor_.__repr__(self)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def __neg__(self):
|
|
||||||
return COOTensor(self.indices, -self.values, self.shape)
|
|
||||||
|
|
||||||
def __add__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, Tensor):
|
|
||||||
return tensor_operator_registry.get("tensor_scatter_add")(other, self.indices, self.values)
|
|
||||||
if isinstance(other, COOTensor):
|
|
||||||
return tensor_operator_registry.get('coo_add')(self, other, Tensor(0, self.values.dtype))
|
|
||||||
raise TypeError("COOTensor add with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
def __sub__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, Tensor):
|
|
||||||
return tensor_operator_registry.get("tensor_scatter_add")(-other, self.indices, self.values)
|
|
||||||
if isinstance(other, COOTensor):
|
|
||||||
return tensor_operator_registry.get('coo_add')(
|
|
||||||
self, -other, Tensor(0, self.values.dtype))
|
|
||||||
raise TypeError("COOTensor subtract with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
def __mul__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, Tensor):
|
|
||||||
other_values = tensor_operator_registry.get("gather_nd")(other, self.indices)
|
|
||||||
return COOTensor(self.indices, self.values * other_values, self.shape)
|
|
||||||
raise TypeError("COOTensor multiply with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
def __div__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, Tensor):
|
|
||||||
logger.warning("For sparse divide, zero values in the dense tensor are ignored.")
|
|
||||||
other_values = tensor_operator_registry.get("gather_nd")(other, self.indices)
|
|
||||||
return COOTensor(self.indices, self.values / other_values, self.shape)
|
|
||||||
raise TypeError("COOTensor divide with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
def __truediv__(self, other):
|
|
||||||
return self.__div__(other)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def indices(self) -> Tensor:
|
|
||||||
"""Return COOTensor's indices."""
|
|
||||||
return Tensor(self._indices)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def values(self) -> Tensor:
|
|
||||||
"""Return COOTensor's non-zero values."""
|
|
||||||
return Tensor(self._values)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def shape(self) -> Tuple[int, ...]:
|
|
||||||
"""Return COOTensor's shape."""
|
|
||||||
return self._shape
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dtype(self) -> mstype:
|
|
||||||
"""Return the dtype of the values of COOTensor (:class:`mindspore.dtype`)."""
|
|
||||||
return self._dtype
|
|
||||||
|
|
||||||
@property
|
|
||||||
def size(self) -> int:
|
|
||||||
"""Return the number of non-zero values."""
|
|
||||||
return self.values.size
|
|
||||||
|
|
||||||
@property
|
|
||||||
def itemsize(self) -> int:
|
|
||||||
"""Return the length of one tensor element in bytes."""
|
|
||||||
return self.values.itemsize
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ndim(self) -> int:
|
|
||||||
"""Return the number of tensor dimensions."""
|
|
||||||
return len(self.shape)
|
|
||||||
|
|
||||||
def coalesce(self) -> COOTensor:
|
|
||||||
"""
|
|
||||||
Return the coalesced sparse tensor of the input.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU``
|
|
||||||
"""
|
|
||||||
shape = Tensor(self.shape)
|
|
||||||
res_indices, res_values, _ = tensor_operator_registry.get("coalesce")(self.indices.transpose(),
|
|
||||||
self.values, shape)
|
|
||||||
return COOTensor(res_indices.transpose(), res_values, self.shape)
|
|
||||||
|
|
||||||
def to_csr(self) -> CSRTensor:
|
|
||||||
"""
|
|
||||||
Converts COOTensor to CSRTensor.
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
CSRTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
row_indices = self.indices[:, 0]
|
|
||||||
col_indices = self.indices[:, 1]
|
|
||||||
idx_dtype = self.indices.dtype
|
|
||||||
row_indices, sort_idx = tensor_operator_registry.get("sort")(
|
|
||||||
row_indices.astype(mstype.float32))
|
|
||||||
row_indices = row_indices.astype(idx_dtype)
|
|
||||||
col_indices = col_indices[sort_idx]
|
|
||||||
values = self.values[sort_idx]
|
|
||||||
indptr = tensor_operator_registry.get("coo2csr")(row_indices, self.shape[0])
|
|
||||||
return CSRTensor(indptr, col_indices, values, self.shape)
|
|
||||||
|
|
||||||
def to_dense(self) -> Tensor:
|
|
||||||
"""
|
|
||||||
Converts COOTensor to Dense Tensor.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU``
|
|
||||||
"""
|
|
||||||
zeros_tensor = tensor_operator_registry.get("zeros")(self.shape, self.values.dtype)
|
|
||||||
return tensor_operator_registry.get("tensor_scatter_add")(
|
|
||||||
zeros_tensor, self.indices, self.values)
|
|
||||||
|
|
||||||
def astype(self, dtype: mstype) -> COOTensor:
|
|
||||||
"""
|
|
||||||
Return a copy of the COOTensor, cast its values to a specified type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> from mindspore import Tensor, COOTensor
|
|
||||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
|
|
||||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
|
||||||
>>> shape = (3, 4)
|
|
||||||
>>> coo_tensor = COOTensor(indices, values, shape)
|
|
||||||
>>> print(coo_tensor.astype(ms.float64).dtype)
|
|
||||||
Float64
|
|
||||||
"""
|
|
||||||
data = self.values.astype(dtype)
|
|
||||||
return COOTensor(self.indices, data, self.shape)
|
|
||||||
|
|
||||||
def to_tuple(self) -> Tuple[Tensor, Tensor, Tuple[int, ...]]:
|
|
||||||
"""
|
|
||||||
Return indices, values and shape as a tuple.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
return self.indices, self.values, self.shape
|
|
||||||
|
|
||||||
def abs(self) -> COOTensor:
|
|
||||||
"""
|
|
||||||
Return absolute value element-wisely.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
data = self.values.abs()
|
|
||||||
return COOTensor(self.indices, data, self.shape)
|
|
||||||
|
|
||||||
def add(self, other: COOTensor, thresh: Tensor) -> COOTensor:
|
|
||||||
"""
|
|
||||||
Return the sum with another COOTensor.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
other(COOTensor): the second SparseTensor to sum.
|
|
||||||
thresh(Tensor): the magnitude threshold that determines
|
|
||||||
if an output value/index pair pair take space.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor, representing the sum.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If any input(self/other)'s indices's dim is not equal to 2.
|
|
||||||
ValueError: If any input(self/other)'s values's dim is not equal to 1.
|
|
||||||
ValueError: If any input(self/other)'s shape's dim is not equal to 1.
|
|
||||||
ValueError: If thresh's dim is not equal to 0.
|
|
||||||
TypeError: If any input(self/other)'s indices's type is not equal to int64.
|
|
||||||
TypeError: If any input(self/other)'s shape's type is not equal to int64.
|
|
||||||
ValueError: If any input(self/other)'s indices's length is not equal to
|
|
||||||
its values's length.
|
|
||||||
TypeError: If any input(self/other)'s values's type is not equal to anf of
|
|
||||||
(int8/int16/int32/int64/float32/float64/complex64/complex128)
|
|
||||||
TypeError: If thresh's type is not equal to anf of
|
|
||||||
(int8/int16/int32/int64/float32/float64)
|
|
||||||
TypeError: If self's indices's type is not equal to other's indices's type
|
|
||||||
TypeError: If self's values's type is not equal to other's values's type
|
|
||||||
TypeError: If self's shape's type is not equal to other's shape's type
|
|
||||||
TypeError: If (self/other)'s value's type is not matched with thresh's type
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``CPU`` ``GPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from mindspore import Tensor, COOTensor
|
|
||||||
>>> from mindspore import dtype as mstype
|
|
||||||
>>> indics0 = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
|
||||||
>>> values0 = Tensor([1, 2], dtype=mstype.int32)
|
|
||||||
>>> shape0 = (3, 4)
|
|
||||||
>>> input0 = COOTensor(indics0, values0, shape0)
|
|
||||||
>>> indics1 = Tensor([[0, 0], [1, 1]], dtype=mstype.int64)
|
|
||||||
>>> values1 = Tensor([3, 4], dtype=mstype.int32)
|
|
||||||
>>> shape1 = (3, 4)
|
|
||||||
>>> input1 = COOTensor(indics1, values1, shape1)
|
|
||||||
>>> thres = Tensor(0, dtype=mstype.int32)
|
|
||||||
>>> out = input0.add(input1, thres)
|
|
||||||
>>> print(out)
|
|
||||||
COOTensor(shape = [3, 4], dtype = Int32, indices=Tensor(shape=[4, 2],
|
|
||||||
dtype = Int64, value=[[0 0], [0 1], [1 1], [1 2]]), values=Tensor(shape[4],
|
|
||||||
dtype=Int32, value=[3 1 4 2]))
|
|
||||||
"""
|
|
||||||
return tensor_operator_registry.get('coo_add')(self, other, thresh)
|
|
||||||
|
|
||||||
|
|
||||||
class CSRTensor(CSRTensor_):
|
|
||||||
"""
|
|
||||||
Constructs a sparse tensor in CSR (Compressed Sparse Row) format, with specified
|
|
||||||
values indicated by `values` and row and column positions indicated by `indptr`
|
|
||||||
and `indices`.
|
|
||||||
|
|
||||||
For example, if indptr is [0, 1, 2, 2], indices is [1, 2], values is [1., 2.], shape is
|
|
||||||
(3, 4), then the dense representation of the sparse tensor will be:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[[0., 1., 0., 0.],
|
|
||||||
[0., 0., 2., 0.],
|
|
||||||
[0., 0., 0., 0.]]
|
|
||||||
|
|
||||||
Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
|
|
||||||
and division (/). For details about operations supported by `CSRTensor`, see
|
|
||||||
[operators](https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html#operators).
|
|
||||||
|
|
||||||
Note:
|
|
||||||
This is an experimental feature and is subjected to change.
|
|
||||||
If the values given by `indptr` or `indices` are invalid, the results may be undefined. Invalid values include
|
|
||||||
when the length of `values` or `indices` exceeds the range indicated by indptr, and when the columns indicated by
|
|
||||||
`indices` are repeated on the same row.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
indptr (Tensor): 1-D Tensor of shape `[M]`, which equals to `shape[0] + 1`, which indicates the
|
|
||||||
start and end point for `values` in each row. Default: None. If provided,
|
|
||||||
must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
|
|
||||||
indices (Tensor): 1-D Tensor of shape `[N]`, which has the same length as `values`. `indices`
|
|
||||||
indicates the which column `values` should be placed. Default: None. If provided,
|
|
||||||
must be :class:`mindspore.int16`, :class:`mindspore.int32` or :class:`mindspore.int64`.
|
|
||||||
values (Tensor): Tensor, which has the same length as `indices` (values.shape[0] == indices.shape[0]).
|
|
||||||
`values` stores the data for CSRTensor. Default: None.
|
|
||||||
shape (tuple(int)): A tuple indicates the shape of the CSRTensor, and `shape[0]` must equal to `M - 1`,
|
|
||||||
which all equal to number of rows of the CSRTensor. Default: None.
|
|
||||||
csr_tensor (CSRTensor): A CSRTensor object. Values' feature dimension should match with
|
|
||||||
CSRTensor's feature dimension (values.shape[1:] == csr_tensor.shape[2:]). Default: None.
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
CSRTensor, with shape defined by `shape`, and dtype inferred from `value`.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> # initialize a csr_tensor with indptr, indices, values and shape
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=ms.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=ms.int32)
|
|
||||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
|
||||||
>>> shape = (2, 4)
|
|
||||||
>>> csr_tensor = CSRTensor(indptr, indices, values, shape)
|
|
||||||
>>> # access a data member of CSRTensor
|
|
||||||
>>> print(indptr == csr_tensor.indptr)
|
|
||||||
[ True True True]
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, indptr=None, indices=None, values=None, shape=None, csr_tensor=None):
|
|
||||||
"Init CSRTensor"
|
|
||||||
self.init_finished = False
|
|
||||||
# Directly init a CSRTensor from another CSRTensor
|
|
||||||
if csr_tensor is not None:
|
|
||||||
if not isinstance(csr_tensor, (CSRTensor, CSRTensor_)):
|
|
||||||
raise TypeError(f"Expect input `csr_tensor` to be a CSRTensor, but got {type(csr_tensor)}")
|
|
||||||
if not (indptr is None and indices is None and values is None and shape is None):
|
|
||||||
raise TypeError(
|
|
||||||
"If input `csr_tensor` is provided, `indptr`, `indices`, `values`, `shapes` should all be `None`")
|
|
||||||
CSRTensor_.__init__(self, csr_tensor)
|
|
||||||
# Init a CSRTensor from indptr, indices, values and shape
|
|
||||||
else:
|
|
||||||
validator.check_csr_tensor_input(indptr, indices, values, shape)
|
|
||||||
validator.check_csr_tensor_shape(indptr.shape, indices.shape, values.shape, shape)
|
|
||||||
validator.check_csr_tensor_dtype(indptr.dtype, indices.dtype)
|
|
||||||
indptr = tensor_operator_registry.get('stop_gradient')(indptr)
|
|
||||||
indices = tensor_operator_registry.get('stop_gradient')(indices)
|
|
||||||
CSRTensor_.__init__(self, indptr, indices, values, shape)
|
|
||||||
self.init_finished = True
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
"""Avoid PyTest Segfault when CSRTensor is not initialized."""
|
|
||||||
if self.init_finished:
|
|
||||||
return CSRTensor_.__repr__(self)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def __mul__(self, other):
|
|
||||||
res = tensor_operator_registry.get('csr_mul')(self, other)
|
|
||||||
return CSRTensor(self.indptr, self.indices, res, self.shape)
|
|
||||||
|
|
||||||
def __div__(self, other):
|
|
||||||
logger.warning("For CSR divide, zero values in the dense tensor are ignored.")
|
|
||||||
res = tensor_operator_registry.get('csr_div')(self, other)
|
|
||||||
return CSRTensor(self.indptr, self.indices, res, self.shape)
|
|
||||||
|
|
||||||
def __truediv__(self, other):
|
|
||||||
return self.__div__(other)
|
|
||||||
|
|
||||||
def __neg__(self):
|
|
||||||
return CSRTensor(self.indptr, self.indices, -self.values, self.shape)
|
|
||||||
|
|
||||||
def __add__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, CSRTensor):
|
|
||||||
return tensor_operator_registry.get('csr_add')(
|
|
||||||
self, other, Tensor(1, self.values.dtype), Tensor(1, self.values.dtype))
|
|
||||||
raise TypeError("CSRTensor add with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
def __sub__(self, other):
|
|
||||||
if not self.shape == other.shape:
|
|
||||||
raise ValueError("Input tensors should have the same shape.")
|
|
||||||
if isinstance(other, CSRTensor):
|
|
||||||
return tensor_operator_registry.get('csr_add')(
|
|
||||||
self, other, Tensor(1, self.values.dtype), Tensor(-1, self.values.dtype))
|
|
||||||
raise TypeError("CSRTensor subtract with %s is not supported." % type(other))
|
|
||||||
|
|
||||||
@property
|
|
||||||
def indptr(self) -> Tensor:
|
|
||||||
"""Return CSRTensor's row indices pointers."""
|
|
||||||
return Tensor(self._indptr)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def indices(self) -> Tensor:
|
|
||||||
"""Return CSRTensor's column indices."""
|
|
||||||
return Tensor(self._indices)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def values(self) -> Tensor:
|
|
||||||
"""Return CSRTensor's non-zero values."""
|
|
||||||
return Tensor(self._values)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def shape(self) -> Tuple[int, ...]:
|
|
||||||
"""Return CSRTensor's shape."""
|
|
||||||
return self._shape
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dtype(self) -> mstype:
|
|
||||||
"""Return the dtype of the values of CSRTensor (:class:`mindspore.dtype`)."""
|
|
||||||
return self._dtype
|
|
||||||
|
|
||||||
@property
|
|
||||||
def size(self) -> int:
|
|
||||||
"""Return the number of non-zero values."""
|
|
||||||
return self.values.size
|
|
||||||
|
|
||||||
@property
|
|
||||||
def itemsize(self) -> int:
|
|
||||||
"""Return the length of one tensor element in bytes."""
|
|
||||||
return self.values.itemsize
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ndim(self) -> int:
|
|
||||||
"""Return the number of tensor dimensions."""
|
|
||||||
return len(self.shape)
|
|
||||||
|
|
||||||
def to_tuple(self) -> Tuple[Tensor, Tensor, Tensor, Tuple[int, ...]]:
|
|
||||||
"""
|
|
||||||
Return indptr, indices, values and shape as a tuple.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
return self.indptr, self.indices, self.values, self.shape
|
|
||||||
|
|
||||||
def to_coo(self) -> COOTensor:
|
|
||||||
"""
|
|
||||||
Converts CSRTensor to COOTensor.
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
COOTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
if self.ndim != 2:
|
|
||||||
raise ValueError("Currently only support 2-D CSRTensor when converting to COOTensor.")
|
|
||||||
row_indices = tensor_operator_registry.get("csr2coo")(self.indptr, self.values.shape[0])
|
|
||||||
coo_indices = tensor_operator_registry.get("stack")((row_indices, self.indices), 1)
|
|
||||||
return COOTensor(coo_indices, self.values, self.shape)
|
|
||||||
|
|
||||||
def to_dense(self) -> Tensor:
|
|
||||||
"""
|
|
||||||
Converts CSRTensor to Dense Tensor.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU``
|
|
||||||
"""
|
|
||||||
return tensor_operator_registry.get("csr_to_dense")(self)
|
|
||||||
|
|
||||||
def astype(self, dtype: mstype) -> CSRTensor:
|
|
||||||
"""
|
|
||||||
Return a copy of the CSRTensor, cast its values to a specified type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
CSRTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import mindspore as ms
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=ms.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=ms.int32)
|
|
||||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
|
||||||
>>> shape = (2, 4)
|
|
||||||
>>> csr_tensor = CSRTensor(indptr, indices, values, shape)
|
|
||||||
>>> print(csr_tensor.astype(ms.float64).dtype)
|
|
||||||
Float64
|
|
||||||
"""
|
|
||||||
data = self.values.astype(dtype)
|
|
||||||
return CSRTensor(self.indptr, self.indices, data, self.shape)
|
|
||||||
|
|
||||||
def mv(self, dense_vector: Tensor) -> Tensor:
|
|
||||||
"""
|
|
||||||
Return the matrix multiplication result of the right-multiply dense matrix of the CSRTensor.
|
|
||||||
The CSRTensor with shape `[M, N]` needs to adapt the dense vector with shape `[N, 1]`
|
|
||||||
to get the dense vector with result `[M, 1]`.
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dense_vector (Tensor): A dense Tensor, its shape must be (csr_tensor.shape[1], 1)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> from mindspore import dtype as mstype
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
|
||||||
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
|
||||||
>>> dense_shape = (2, 4)
|
|
||||||
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
|
||||||
>>> dense = Tensor([[1], [1], [1], [1]], dtype=mstype.float32)
|
|
||||||
>>> print(csr_tensor.mv(dense))
|
|
||||||
[[2.]
|
|
||||||
[1.]]
|
|
||||||
"""
|
|
||||||
validator.check_value_type('dense_vector', dense_vector, (Tensor_,), 'CSRTensor.mv')
|
|
||||||
return tensor_operator_registry.get("csr_mv")(self, dense_vector)
|
|
||||||
|
|
||||||
def mm(self, dense_matrix: Tensor) -> Tensor:
|
|
||||||
"""
|
|
||||||
Return the matrix multiplication result of the right-multiply dense matrix of the CSRTensor.
|
|
||||||
The CSRTensor with shape `[M, N]` needs to adapt the dense matrix with shape `[N, K]`
|
|
||||||
to get the dense matrix with result `[M, K]`.
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dense_matrix (Tensor): A dense Tensor, its shape[0] should be equal to csr_tensor.shape[1]
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> from mindspore import dtype as mstype
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
|
||||||
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
|
||||||
>>> dense_shape = (2, 4)
|
|
||||||
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
|
||||||
>>> dense_matrix = Tensor([[1., 2.], [1, 2.], [1, 2.], [1., 2.]], dtype=mstype.float32)
|
|
||||||
>>> print(csr_tensor.mm(dense_matrix))
|
|
||||||
[[2. 4.]
|
|
||||||
[1. 2.]]
|
|
||||||
"""
|
|
||||||
validator.check_value_type('dense_matrix', dense_matrix, (Tensor_,), 'CSRTensor.mm')
|
|
||||||
return tensor_operator_registry.get("csr_mm")()(self.indptr, self.indices, self.values,
|
|
||||||
self.shape, dense_matrix)
|
|
||||||
|
|
||||||
def sum(self, axis: int) -> Tensor:
|
|
||||||
"""
|
|
||||||
Reduces a dimension of a CSRTensor by summing all elements in the dimension.
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Currently only supports CPU backend with LLVM 12.0.1 installed.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
axis (int): The dimensions to reduce.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tensor, the dtype is the same as `CSRTensor.values`.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> from mindspore import dtype as mstype
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
|
||||||
>>> values = Tensor([2, 1], dtype=mstype.float32)
|
|
||||||
>>> dense_shape = (2, 4)
|
|
||||||
>>> csr_tensor = CSRTensor(indptr, indices, values, dense_shape)
|
|
||||||
>>> print(csr_tensor.sum(1))
|
|
||||||
[[2.]
|
|
||||||
[1.]]
|
|
||||||
"""
|
|
||||||
return tensor_operator_registry.get("csr_reduce_sum")(self, axis)
|
|
||||||
|
|
||||||
def abs(self) -> CSRTensor:
|
|
||||||
"""
|
|
||||||
Return absolute value element-wisely.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
CSRTensor, with all values being non-negative.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
data = self.values.abs()
|
|
||||||
return CSRTensor(self.indptr, self.indices, data, self.shape)
|
|
||||||
|
|
||||||
def add(self, b: CSRTensor, alpha: Tensor, beta: Tensor) -> CSRTensor:
|
|
||||||
"""
|
|
||||||
Addition of two CSR Tensors : C = alpha * A + beta * B
|
|
||||||
|
|
||||||
Args:
|
|
||||||
b (CSRTensor): Sparse CSR Tensor.
|
|
||||||
alpha(Tensor): Dense Tensor, its shape must be able to broadcast to self.
|
|
||||||
beta(Tensor): Dense Tensor, its shape must be able to broadcast to b.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
CSRTensor.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from mindspore import Tensor, CSRTensor
|
|
||||||
>>> import mindspore.common.dtype as mstype
|
|
||||||
>>> indptr = Tensor([0, 1, 2], dtype=mstype.int32)
|
|
||||||
>>> indices = Tensor([0, 1], dtype=mstype.int32)
|
|
||||||
>>> values_a = Tensor([2, 1], dtype=mstype.float32)
|
|
||||||
>>> values_b = Tensor([1, 2], dtype=mstype.float32)
|
|
||||||
>>> dense_shape = (2, 4)
|
|
||||||
>>> alpha = Tensor(1, mstype.float32)
|
|
||||||
>>> beta = Tensor(1, mstype.float32)
|
|
||||||
>>> a = CSRTensor(indptr, indices, values_a, dense_shape)
|
|
||||||
>>> b = CSRTensor(indptr, indices, values_b, dense_shape)
|
|
||||||
>>> print(a.add(b, alpha, beta))
|
|
||||||
CSRTensor(shape=[2,4], dtype=Float32,
|
|
||||||
indptr=Tensor(shape=[3], dtype=Int32, value = [0, 1, 2]),
|
|
||||||
indices=Tensor(shape=[2], dtype=Int32, value = [0, 1]),
|
|
||||||
values=Tensor(shape=[2], dtype=Float32, value = [3.0, 3.0]))
|
|
||||||
"""
|
|
||||||
return tensor_operator_registry.get('csr_add')(self, b, alpha, beta)
|
|
||||||
|
|
||||||
|
|
||||||
class MapTensor(MapTensor_):
|
class MapTensor(MapTensor_):
|
||||||
"""
|
"""
|
||||||
MapTensor is a tensor that stores a map like data structure.
|
MapTensor is a tensor that stores a map like data structure.
|
||||||
|
|
|
@ -26,7 +26,8 @@ from mindspore.nn.cell import Cell
|
||||||
from mindspore.nn.layer.container import CellList
|
from mindspore.nn.layer.container import CellList
|
||||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||||
from mindspore.common.initializer import initializer
|
from mindspore.common.initializer import initializer
|
||||||
from mindspore.common.tensor import Tensor, RowTensorInner
|
from mindspore.common import Tensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
from mindspore._checkparam import Validator as validator
|
from mindspore._checkparam import Validator as validator
|
||||||
from mindspore import log as logger
|
from mindspore import log as logger
|
||||||
|
|
|
@ -19,12 +19,12 @@ from mindspore import context
|
||||||
from mindspore import log as logger
|
from mindspore import log as logger
|
||||||
from mindspore.nn.cell import Cell
|
from mindspore.nn.cell import Cell
|
||||||
from mindspore.communication.management import GlobalComm, get_group_size
|
from mindspore.communication.management import GlobalComm, get_group_size
|
||||||
from mindspore.common.tensor import RowTensorInner
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops import functional as F, composite as C
|
from mindspore.ops import functional as F, composite as C
|
||||||
from mindspore.ops.operations.comm_ops import AllReduce, AllGather
|
from mindspore.ops.operations.comm_ops import AllReduce, AllGather
|
||||||
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
from mindspore.common.tensor import Tensor
|
from mindspore.common.sparse_tensor import Tensor
|
||||||
from mindspore.common.api import jit
|
from mindspore.common.api import jit
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,8 @@ from mindspore.context import ParallelMode
|
||||||
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
||||||
from mindspore.nn.wrap.cell_wrapper import TrainOneStepCell
|
from mindspore.nn.wrap.cell_wrapper import TrainOneStepCell
|
||||||
from mindspore.nn.cell import Cell
|
from mindspore.nn.cell import Cell
|
||||||
from mindspore.common import Tensor, RowTensorInner
|
from mindspore.common import Tensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common.parameter import Parameter
|
from mindspore.common.parameter import Parameter
|
||||||
from mindspore.ops import functional as F
|
from mindspore.ops import functional as F
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
|
|
@ -28,7 +28,7 @@ from mindspore.ops import functional as F
|
||||||
from mindspore.ops._grad.grad_base import bprop_getters, create_tensor_by_element
|
from mindspore.ops._grad.grad_base import bprop_getters, create_tensor_by_element
|
||||||
from mindspore.ops.primitive import constexpr
|
from mindspore.ops.primitive import constexpr
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common.tensor import RowTensorInner
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops._utils.utils import range_op, get_1d_shape, generate_shape_index, is_shape_unknown
|
from mindspore.ops._utils.utils import range_op, get_1d_shape, generate_shape_index, is_shape_unknown
|
||||||
from .._grad.grad_base import dyn_rank, convert_to_tensor, dyn_invert_permutation, dyn_size, dyn_ones, dyn_fill
|
from .._grad.grad_base import dyn_rank, convert_to_tensor, dyn_invert_permutation, dyn_size, dyn_ones, dyn_fill
|
||||||
from .._grad.grad_base import sum_grad_reduce_axis
|
from .._grad.grad_base import sum_grad_reduce_axis
|
||||||
|
|
|
@ -23,7 +23,7 @@ from mindspore.communication import get_rank, get_group_size
|
||||||
from mindspore.parallel._utils import _get_enable_parallel_optimizer, _get_grad_accumulation_shard
|
from mindspore.parallel._utils import _get_enable_parallel_optimizer, _get_grad_accumulation_shard
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
from mindspore.ops.operations._inner_ops import Send, Receive
|
from mindspore.ops.operations._inner_ops import Send, Receive
|
||||||
from mindspore.common.tensor import RowTensorInner
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
|
from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
|
||||||
from mindspore.ops.operations.comm_ops import (AllGather, _MiniStepAllGather, _HostAllGather, AllReduce,
|
from mindspore.ops.operations.comm_ops import (AllGather, _MiniStepAllGather, _HostAllGather, AllReduce,
|
||||||
NeighborExchange, AlltoAll, NeighborExchangeV2, Broadcast,
|
NeighborExchange, AlltoAll, NeighborExchangeV2, Broadcast,
|
||||||
|
|
|
@ -24,7 +24,7 @@ from mindspore.ops._primitive_cache import _get_cache_prim
|
||||||
from mindspore.ops.operations._inner_ops import TensorCopySlices, SliceGetItem, DynamicBroadcastTo, TopTypeof
|
from mindspore.ops.operations._inner_ops import TensorCopySlices, SliceGetItem, DynamicBroadcastTo, TopTypeof
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common._register_for_tensor import tensor_operator_registry
|
from mindspore.common._register_for_tensor import tensor_operator_registry
|
||||||
from mindspore.common.tensor import Tensor, CSRTensor, COOTensor
|
from mindspore.common import Tensor, CSRTensor, COOTensor
|
||||||
from mindspore.common._utils import is_shape_unknown
|
from mindspore.common._utils import is_shape_unknown
|
||||||
|
|
||||||
slice_get_item = SliceGetItem()
|
slice_get_item = SliceGetItem()
|
||||||
|
|
|
@ -34,7 +34,7 @@ from mindspore._checkparam import _check_3d_int_or_tuple
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common._decorator import deprecated
|
from mindspore.common._decorator import deprecated
|
||||||
from mindspore.common.parameter import Parameter
|
from mindspore.common.parameter import Parameter
|
||||||
from mindspore.common.tensor import Tensor, CSRTensor, COOTensor
|
from mindspore.common import Tensor, CSRTensor, COOTensor
|
||||||
from mindspore._c_expression import Tensor as Tensor_
|
from mindspore._c_expression import Tensor as Tensor_
|
||||||
from mindspore._c_expression import CSRTensor as CSRTensor_
|
from mindspore._c_expression import CSRTensor as CSRTensor_
|
||||||
from mindspore._c_expression import COOTensor as COOTensor_
|
from mindspore._c_expression import COOTensor as COOTensor_
|
||||||
|
|
|
@ -28,7 +28,8 @@ from mindspore.ops import functional as F
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
from mindspore.common.tensor import Tensor, RowTensorInner
|
from mindspore.common.tensor import Tensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common.parameter import Parameter
|
from mindspore.common.parameter import Parameter
|
||||||
from mindspore.common.initializer import initializer, _calculate_correct_fan, One
|
from mindspore.common.initializer import initializer, _calculate_correct_fan, One
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
import pytest
|
import pytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from mindspore import RowTensorInner
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore import context, nn, Tensor, ParameterTuple
|
from mindspore import context, nn, Tensor, ParameterTuple
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common import jit
|
from mindspore.common import jit
|
||||||
|
|
|
@ -17,7 +17,8 @@
|
||||||
import pytest
|
import pytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from mindspore import Tensor, RowTensorInner, nn, context
|
from mindspore import Tensor, nn, context
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -18,11 +18,12 @@ import pytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor, CSRTensor, COOTensor, RowTensorInner, jit, jit_class, context
|
from mindspore import Tensor, CSRTensor, COOTensor, jit, jit_class, context
|
||||||
from mindspore.ops import Primitive
|
from mindspore.ops import Primitive
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
from mindspore.ops import functional as F
|
from mindspore.ops import functional as F
|
||||||
from mindspore.common.mutable import mutable
|
from mindspore.common.mutable import mutable
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE)
|
context.set_context(mode=context.GRAPH_MODE)
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,9 @@ from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
|
||||||
from mindspore.ops.primitive import constexpr, PrimitiveWithInfer, prim_attr_register
|
from mindspore.ops.primitive import constexpr, PrimitiveWithInfer, prim_attr_register
|
||||||
from mindspore.ops._grad.grad_base import bprop_getters
|
from mindspore.ops._grad.grad_base import bprop_getters
|
||||||
from mindspore.ops._utils.utils import generate_shape_index
|
from mindspore.ops._utils.utils import generate_shape_index
|
||||||
from mindspore import Tensor, RowTensorInner, context
|
from mindspore import Tensor, context
|
||||||
from mindspore.common.parameter import Parameter, ParameterTuple
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore._checkparam import Validator as validator
|
from mindspore._checkparam import Validator as validator
|
||||||
from mindspore._checkparam import Rel
|
from mindspore._checkparam import Rel
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
import pytest
|
import pytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from mindspore import RowTensorInner
|
|
||||||
from mindspore import context, nn, Tensor, ParameterTuple
|
from mindspore import context, nn, Tensor, ParameterTuple
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common import jit
|
from mindspore.common import jit
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
|
|
@ -21,7 +21,8 @@
|
||||||
import pytest
|
import pytest
|
||||||
import mindspore as ms
|
import mindspore as ms
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import context, Tensor, RowTensorInner, COOTensor
|
from mindspore import context, Tensor, COOTensor
|
||||||
|
from mindspore.common.sparse_tensor import RowTensorInner
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
|
||||||
@pytest.fixture(scope="module", autouse=True)
|
@pytest.fixture(scope="module", autouse=True)
|
||||||
|
|
Loading…
Reference in New Issue