forked from mindspore-Ecosystem/mindspore
!6201 perfect annotation of ops and context and support bool equal
Merge pull request !6201 from zhangbuxue/perfect_annotation_for_ops_and_context
This commit is contained in:
commit
2f451fa1b0
|
@ -14,7 +14,7 @@
|
|||
# ============================================================================
|
||||
"""
|
||||
The context of mindspore, used to configure the current execution environment,
|
||||
including execution mode, execution backend and other feature switches.
|
||||
includes the execution mode, execution backend and other feature switches.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
|
@ -338,40 +338,40 @@ def set_auto_parallel_context(**kwargs):
|
|||
|
||||
Note:
|
||||
Attribute name is required for setting attributes.
|
||||
If a program has tasks with different parallel modes, then before setting new parallel mode for
|
||||
If a program has tasks with different parallel modes, then before setting new parallel mode for the
|
||||
next task, interface mindspore.context.reset_auto_parallel_context() needs to be called to reset
|
||||
the configuration.
|
||||
Setting or changing parallel modes must be called before any Initializer created, or RuntimeError
|
||||
may be raised when compile network.
|
||||
Setting or changing parallel modes must be called before any creating Initializer, otherwise,
|
||||
RuntimeError may be raised when compiling the network.
|
||||
|
||||
Args:
|
||||
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
|
||||
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
|
||||
gradients_mean (bool): Whether to perform mean operator after all-reduce of mirror.
|
||||
"stand_alone" do not support gradients_mean. Default: False.
|
||||
gradient_fp32_sync (bool): Gradients allreduce by fp32 even though gradients is fp16 if this flag is True..
|
||||
"stand_alone" does not support `gradients_mean`. Default: False.
|
||||
gradient_fp32_sync (bool): Gradients allreduce by fp32, even though gradients is fp16 if this flag is True..
|
||||
"stand_alone", "data_parallel" and "hybrid_parallel" do not support
|
||||
gradient_fp32_sync. Default: True.
|
||||
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
|
||||
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Default: "stand_alone".
|
||||
|
||||
- stand_alone: Only one processor working.
|
||||
- stand_alone: Only one processor is working.
|
||||
|
||||
- data_parallel: Distributing the data across different processors.
|
||||
- data_parallel: Distributes the data across different processors.
|
||||
|
||||
- hybrid_parallel: Achieving data parallelism and model parallelism manually.
|
||||
- hybrid_parallel: Achieves data parallelism and model parallelism manually.
|
||||
|
||||
- semi_auto_parallel: Achieving data parallelism and model parallelism by
|
||||
- semi_auto_parallel: Achieves data parallelism and model parallelism by
|
||||
setting parallel strategies.
|
||||
|
||||
- auto_parallel: Achieving parallelism automatically.
|
||||
- auto_parallel: Achieves parallelism automatically.
|
||||
auto_parallel_search_mode (str): There are two kinds of search modes, "recursive_programming"
|
||||
and "dynamic_programming". Default: "dynamic_programming".
|
||||
|
||||
- recursive_programming: Recursive programming search mode.
|
||||
|
||||
- dynamic_programming: Dynamic programming search mode.
|
||||
parameter_broadcast (bool): Indicating whether to broadcast parameters before training.
|
||||
parameter_broadcast (bool): Whether to broadcast parameters before training.
|
||||
"stand_alone", "semi_auto_parallel" and "auto_parallel" do not support parameter
|
||||
broadcast. Default: False.
|
||||
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
|
||||
|
@ -468,7 +468,7 @@ def set_context(**kwargs):
|
|||
|
||||
When the `save_graphs` attribute is set to True, attribute of `save_graphs_path` is used to set the
|
||||
intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
|
||||
As for other configurations and arguments, please refer to the corresponding module
|
||||
For other configurations and arguments, please refer to the corresponding module
|
||||
description, the configuration is optional and can be enabled when needed.
|
||||
|
||||
Note:
|
||||
|
@ -498,9 +498,9 @@ def set_context(**kwargs):
|
|||
|
||||
Args:
|
||||
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: PYNATIVE_MODE(1).
|
||||
device_target (str): The target device to run, support "Ascend", "GPU", "CPU". Default: "Ascend".
|
||||
device_id (int): Id of target device, the value must be in [0, device_num_per_host-1],
|
||||
while device_num_per_host should no more than 4096. Default: 0.
|
||||
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU". Default: "Ascend".
|
||||
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
|
||||
while device_num_per_host should be no more than 4096. Default: 0.
|
||||
save_graphs (bool): Whether to save graphs. Default: False.
|
||||
save_graphs_path (str): Path to save graphs. Default: "."
|
||||
enable_auto_mixed_precision (bool): Whether to enable auto mixed precision. Default: False.
|
||||
|
@ -509,33 +509,34 @@ def set_context(**kwargs):
|
|||
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
|
||||
enable_reduce_precision (bool): Whether to enable precision reduction. Default: True.
|
||||
enable_dump (bool): Whether to enable dump. Default: False.
|
||||
save_dump_path (str): When the program is executed on Ascend, operators can dump data here.
|
||||
save_dump_path (str): When the program is executed on Ascend, operators can dump data in this path.
|
||||
The root dump path is configured in /home/HwHiAiUser/ide_daemon/ide_daemon.cfg.
|
||||
So the real dump path is "{configured root dump path}/{`save_dump_path`}". Default: ".".
|
||||
variable_memory_max_size (str): Sets variable memory max size. Default: "0GB".
|
||||
variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "0GB".
|
||||
enable_profiling (bool): Whether to open profiling. Default: False.
|
||||
profiling_options (str): Sets profiling collection options, operators can profiling data here.
|
||||
Profiling collection options, the values are as follows, supporting the collection of multiple data.
|
||||
profiling_options (str): Set profiling collection options, operators can profiling data here.
|
||||
The values of profiling collection options are as follows, supporting the collection of multiple data.
|
||||
|
||||
- training_trace: collect iterative trajectory data, that is, the training task and software information of
|
||||
the AI software stack, to achieve performance analysis of the training task, focusing on data
|
||||
enhancement, forward and backward calculation, gradient aggregation update and other related data.
|
||||
|
||||
- task_trace: collect task trajectory data, that is, the hardware information of the HWTS/AICore of
|
||||
the Ascend 910 processor, and analyze the information of start and end of the task.
|
||||
the Ascend 910 processor, and analyze the information of beginning and ending of the task.
|
||||
|
||||
- op_trace: collect single operator performance data.
|
||||
The profiling can choose training_trace, task_trace, training_trace and task_trace combination and
|
||||
separated by colons; single operator can choose op_trace, op_trace cannot be combined with
|
||||
training_trace and task_trace. Default: "training_trace".
|
||||
The profiling can choose the combination of `training_trace`, `task_trace`,
|
||||
`training_trace` and `task_trace` combination, and eparated by colons;
|
||||
a single operator can choose `op_trace`, `op_trace` cannot be combined with
|
||||
`training_trace` and `task_trace`. Default: "training_trace".
|
||||
check_bprop (bool): Whether to check bprop. Default: False.
|
||||
max_device_memory (str): Sets the maximum memory available for device, currently only supported on GPU.
|
||||
The format is "xxGB". Default: "1024GB".
|
||||
print_file_path (str): The path of print data to save. If this parameter is set, print data is saved to
|
||||
a file by default, and turn off printing to the screen. If the file already exists, add a timestamp
|
||||
max_device_memory (str): Sets the maximum memory available for devices.
|
||||
Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
|
||||
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
|
||||
a file by default, and turns off printing to the screen. If the file already exists, add a timestamp
|
||||
suffix to the file. Default: ''.
|
||||
enable_sparse (bool): Whether to enable sparsity feature. Default: False.
|
||||
max_call_depth(int): Specify the function call depth limit. Default: 1000.
|
||||
max_call_depth(int): Specify the maximum depth of function call. Default: 1000.
|
||||
|
||||
Raises:
|
||||
ValueError: If input key is not an attribute in context.
|
||||
|
@ -614,13 +615,13 @@ class ParallelMode:
|
|||
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
|
||||
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
|
||||
|
||||
- STAND_ALONE: Only one processor working.
|
||||
- DATA_PARALLEL: Distributing the data across different processors.
|
||||
- HYBRID_PARALLEL: Achieving data parallelism and model parallelism manually.
|
||||
- SEMI_AUTO_PARALLEL: Achieving data parallelism and model parallelism by setting parallel strategies.
|
||||
- AUTO_PARALLEL: Achieving parallelism automatically.
|
||||
- STAND_ALONE: Only one processor is working.
|
||||
- DATA_PARALLEL: Distributes the data across different processors.
|
||||
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
|
||||
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
|
||||
- AUTO_PARALLEL: Achieves parallelism automatically.
|
||||
|
||||
MODE_LIST: The list for all supported parallel modes.
|
||||
MODE_LIST: The list of all supported parallel modes.
|
||||
"""
|
||||
|
||||
STAND_ALONE = "stand_alone"
|
||||
|
|
|
@ -26,6 +26,21 @@ using ".register" decorator
|
|||
"""
|
||||
|
||||
|
||||
@equal.register("Bool", "Bool")
|
||||
def _equal_bool(x, y):
|
||||
"""
|
||||
Determine if two bool objects are equal.
|
||||
|
||||
Args:
|
||||
x (bool): first input bool object.
|
||||
y (bool): second input bool object.
|
||||
|
||||
Returns:
|
||||
bool, if x == y return true, x != y return false.
|
||||
"""
|
||||
return F.bool_eq(x, y)
|
||||
|
||||
|
||||
@equal.register("Number", "Number")
|
||||
def _equal_scalar(x, y):
|
||||
"""
|
||||
|
|
|
@ -123,6 +123,7 @@ string_concat = Primitive('string_concat')
|
|||
bool_not = Primitive("bool_not")
|
||||
bool_or = Primitive("bool_or")
|
||||
bool_and = Primitive("bool_and")
|
||||
bool_eq = Primitive("bool_eq")
|
||||
logical_and = P.LogicalAnd()
|
||||
logical_or = P.LogicalOr()
|
||||
logical_not = P.LogicalNot()
|
||||
|
|
|
@ -289,7 +289,7 @@ class Cast(PrimitiveWithInfer):
|
|||
|
||||
class IsSubClass(PrimitiveWithInfer):
|
||||
"""
|
||||
Check whether one type is sub class of another type.
|
||||
Check whether one type is subtraction class of another type.
|
||||
|
||||
Inputs:
|
||||
- **sub_type** (mindspore.dtype) - The type to be checked. Only constant value is allowed.
|
||||
|
@ -478,7 +478,7 @@ class DynamicShape(Primitive):
|
|||
|
||||
class Squeeze(PrimitiveWithInfer):
|
||||
"""
|
||||
Returns a tensor with the same type but dimensions of 1 being removed based on axis.
|
||||
Returns a tensor with the same type but dimensions of 1 are removed based on `axis`.
|
||||
|
||||
Note:
|
||||
The dimension index starts at 0 and must be in the range `[-input.dim(), input.dim())`.
|
||||
|
@ -536,7 +536,7 @@ class Squeeze(PrimitiveWithInfer):
|
|||
|
||||
class Transpose(PrimitiveWithInfer):
|
||||
"""
|
||||
Permutes the dimensions of input tensor according to input perm.
|
||||
Permutes the dimensions of input tensor according to input permutation.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
||||
|
@ -545,7 +545,7 @@ class Transpose(PrimitiveWithInfer):
|
|||
allowed.
|
||||
|
||||
Outputs:
|
||||
Tensor, the type of output tensor is same as `input_x` and the shape of output tensor is decided by the
|
||||
Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the
|
||||
shape of `input_x` and the value of `input_perm`.
|
||||
|
||||
Examples:
|
||||
|
@ -654,7 +654,7 @@ class SparseGatherV2(GatherV2):
|
|||
- **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
||||
The original Tensor.
|
||||
- **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
|
||||
Specifies the indices of elements of the original Tensor. Must be in the range
|
||||
Specifies the indices of elements of the original Tensor, must be in the range
|
||||
`[0, input_param.shape[axis])`.
|
||||
- **axis** (int) - Specifies the dimension index to gather indices.
|
||||
|
||||
|
@ -718,15 +718,15 @@ class Split(PrimitiveWithInfer):
|
|||
output_num (int): The number of output tensors. Default: 1.
|
||||
|
||||
Raises:
|
||||
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)),
|
||||
or if the output_num is less than or equal to 0, or if the
|
||||
dimension which to split cannot be evenly divided by output_num.
|
||||
ValueError: If `axis` is out of the range [-len(`input_x.shape`), len(`input_x.shape`)),
|
||||
or if the `output_num` is less than or equal to 0, or if the
|
||||
dimension which to split cannot be evenly divided by `output_num`.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
||||
|
||||
Outputs:
|
||||
tuple[Tensor], the shape of each output tensor is same, which is
|
||||
tuple[Tensor], the shape of each output tensor is the same, which is
|
||||
:math:`(y_1, y_2, ..., y_S)`.
|
||||
|
||||
Examples:
|
||||
|
@ -806,14 +806,14 @@ class TruncatedNormal(PrimitiveWithInfer):
|
|||
The generated values follow a normal distribution.
|
||||
|
||||
Args:
|
||||
seed (int): A int number used to create random seed. Default: 0.
|
||||
seed (int): A integer number used to create random seed. Default: 0.
|
||||
dtype (:class:`mindspore.dtype`): Data type. Default: mindspore.float32.
|
||||
|
||||
Inputs:
|
||||
- **shape** (tuple[int]) - Shape of output tensor, is a tuple of positive int.
|
||||
- **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer.
|
||||
|
||||
Outputs:
|
||||
Tensor, type of output tensor is same as attribute `dtype`.
|
||||
Tensor, the dat type of output tensor is the same as attribute `dtype`.
|
||||
|
||||
Examples:
|
||||
>>> shape = (1, 2, 3)
|
||||
|
@ -953,13 +953,13 @@ class ZerosLike(PrimitiveWithInfer):
|
|||
"""
|
||||
Creates a new tensor. All elements value are 0.
|
||||
|
||||
Returns a tensor of zeros with the same shape and type as the input tensor.
|
||||
Returns a tensor of zeros with the same shape and data type as the input tensor.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - Input tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape and type as `input_x` but filled with zeros.
|
||||
Tensor, has the same shape and data type as `input_x` but filled with zeros.
|
||||
|
||||
Examples:
|
||||
>>> zeroslike = P.ZerosLike()
|
||||
|
@ -982,15 +982,16 @@ class ZerosLike(PrimitiveWithInfer):
|
|||
|
||||
class TupleToArray(PrimitiveWithInfer):
|
||||
"""
|
||||
Converts a tuple to tensor.
|
||||
Convert a tuple to a tensor.
|
||||
|
||||
If the first number type of tuple is int, the output tensor type is int. Else, the output tensor type is float.
|
||||
If the type of the first number in the tuple is integer, the data type of the output tensor is int.
|
||||
Otherwise, the data type of the output tensor is float.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (tuple) - A tuple of numbers. These numbers have the same type. Only constant value is allowed.
|
||||
|
||||
Outputs:
|
||||
Tensor, if the input tuple contain `N` numbers, then the shape of the output tensor is (N,).
|
||||
Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).
|
||||
|
||||
Examples:
|
||||
>>> type = P.TupleToArray()((1,2,3))
|
||||
|
@ -1355,7 +1356,7 @@ class Tile(PrimitiveWithInfer):
|
|||
Replicates a tensor with given multiples times.
|
||||
|
||||
Creates a new tensor by replicating input multiples times. The dimension of
|
||||
output tensor is the larger of the dimension length of input and the length of multiples.
|
||||
output tensor is the larger of the input tensor dimension and the length of `multiples`.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - 1-D or higher Tensor. Set the shape of input tensor as
|
||||
|
@ -1363,16 +1364,17 @@ class Tile(PrimitiveWithInfer):
|
|||
|
||||
- **multiples** (tuple[int]) - The input tuple is constructed by multiple
|
||||
integers, i.e., :math:`(y_1, y_2, ..., y_S)`. The length of `multiples`
|
||||
can't be smaller than the length of shape in `input_x`. Only constant value is allowed.
|
||||
cannot be smaller than the length of the shape of `input_x`.
|
||||
Only constant value is allowed.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same type as the `input_x`.
|
||||
Tensor, has the same data type as the `input_x`.
|
||||
|
||||
- If the length of `multiples` is the same as the length of shape in `input_x`,
|
||||
- If the length of `multiples` is the same as the length of shape of `input_x`,
|
||||
then the shape of their corresponding positions can be multiplied, and
|
||||
the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
|
||||
- If the length of `multiples` is larger than the length of shape in `input_x`,
|
||||
fill in multiple 1 in front of the shape in `input_x` until their lengths are consistent.
|
||||
- If the length of `multiples` is larger than the length of shape of `input_x`,
|
||||
fill in multiple 1 in the length of the shape of `input_x` until their lengths are consistent.
|
||||
Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
|
||||
then the shape of their corresponding positions can be multiplied, and
|
||||
the shape of Outputs is :math:`(1*y_1, ..., x_S*y_R)`.
|
||||
|
@ -1501,7 +1503,7 @@ class UnsortedSegmentMin(PrimitiveWithInfer):
|
|||
- **num_segments** (int) - The value spcifies the number of distinct `segment_ids`.
|
||||
|
||||
Outputs:
|
||||
Tensor, Set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
|
||||
Tensor. Set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
|
||||
|
@ -1552,7 +1554,7 @@ class UnsortedSegmentProd(PrimitiveWithInfer):
|
|||
should be greater than 0.
|
||||
|
||||
Outputs:
|
||||
Tensor, Set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
|
||||
Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
|
||||
|
@ -1783,10 +1785,10 @@ class Unpack(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
|
||||
A rank R > 0 Tensor to be unpacked.
|
||||
A tensor to be unpacked and the rank of the tensor must be greater than 0.
|
||||
|
||||
Outputs:
|
||||
A tuple of Tensors, the shape of each objects is same.
|
||||
A tuple of tensors, the shape of each objects is the same.
|
||||
|
||||
Raises:
|
||||
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
|
||||
|
@ -1834,7 +1836,7 @@ class Unpack(PrimitiveWithInfer):
|
|||
|
||||
class Slice(PrimitiveWithInfer):
|
||||
"""
|
||||
Slice a tensor in specified shape.
|
||||
Slice a tensor in the specified shape.
|
||||
|
||||
Args:
|
||||
x (Tensor): The target tensor.
|
||||
|
@ -1955,16 +1957,16 @@ class Select(PrimitiveWithInfer):
|
|||
|
||||
Given a tensor as input, this operation inserts a dimension of 1 at the dimension,
|
||||
if both :math:`x` and :math:`y` are none, the operation returns the coordinates of the true
|
||||
element in the condition, the coordinates are returned as a two-dimensional
|
||||
element in the `condition`, the coordinates are returned as a two-dimensional
|
||||
tensor, where the first dimension (row) represents the number of true elements
|
||||
and the second dimension (columns) represents the coordinates of the true
|
||||
elements. Keep in mind that the shape of the output tensor can vary depending
|
||||
on how much of the true value is in the input. Indexes are output in row-first
|
||||
on how many true values are in the input. Indexes are output in row-first
|
||||
order.
|
||||
|
||||
If neither is None, :math:`x` and :math:`y` must have the same shape. If :math:`x` and :math:`y` are
|
||||
scalars, the conditional tensor must be a scalar. If :math:`x` and :math:`y` are
|
||||
higher-demensional vectors, the condition must be a vector whose size matches the
|
||||
higher-demensional vectors, the `condition` must be a vector whose size matches the
|
||||
first dimension of :math:`x`, or must have the same shape as :math:`y`.
|
||||
|
||||
The conditional tensor acts as an optional compensation (mask), which
|
||||
|
@ -1979,14 +1981,14 @@ class Select(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N)`.
|
||||
The condition tensor, decides whose element is chosen.
|
||||
The condition tensor, decides which element is chosen.
|
||||
- **input_y** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
||||
The first input tensor.
|
||||
- **input_z** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
||||
The second input tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape as input_y. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
||||
Tensor, has the same shape as `input_y`. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
|
||||
|
||||
Examples:
|
||||
>>> select = P.Select()
|
||||
|
@ -2080,12 +2082,12 @@ def _compute_slicing_length(begin, end, stride, x_shape, i):
|
|||
class StridedSlice(PrimitiveWithInfer):
|
||||
r"""
|
||||
|
||||
Extracts a strided slice of a tensor.
|
||||
Extract a strided slice of a tensor.
|
||||
|
||||
Given an input tensor, this operation inserts a dimension of length 1 at the dimension.
|
||||
This operation extracts a fragment of size (end-begin)/stride from the given
|
||||
'input_tensor'. Starting from the position specified by the begin, the fragment
|
||||
continues adding stride to the index until all dimensions are not less than end.
|
||||
This operation extracts a fragment of size (end-begin)/stride from the given 'input_tensor'.
|
||||
Starting from the begining position, the fragment continues adding stride to the index until
|
||||
all dimensions are not less than the ending position.
|
||||
|
||||
Note:
|
||||
The stride may be negative value, which causes reverse slicing.
|
||||
|
@ -2102,22 +2104,22 @@ class StridedSlice(PrimitiveWithInfer):
|
|||
- **input_x** (Tensor) - The input Tensor.
|
||||
- **begin** (tuple[int]) - A tuple which represents the location where to start. Only
|
||||
constant value is allowed.
|
||||
- **end** (tuple[int]) - A tuple or which represents the maximum location where to stop.
|
||||
- **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
|
||||
Only constant value is allowed.
|
||||
- **strides** (tuple[int]) - A tuple which represents the stride continuously added
|
||||
before reach the maximum location. Only constant value is allowed.
|
||||
- **strides** (tuple[int]) - A tuple which represents the stride is continuously added
|
||||
before reaching the maximum location. Only constant value is allowed.
|
||||
|
||||
Outputs:
|
||||
Tensor.
|
||||
Explain with the following example.
|
||||
- In the 0th dim, begin is 1, end is 2, and strides is 1,
|
||||
The output is explained by following example.
|
||||
- In the 0th dimension, begin is 1, end is 2, and strides is 1,
|
||||
because :math:`1+1=2\geq2`, the interval is :math:`[1,2)`.
|
||||
Thus, return the element with :math:`index = 1` in 0th dim, i.e., [[3, 3, 3], [4, 4, 4]].
|
||||
- In the 1st dim, similarly, the interval is :math:`[0,1)`.
|
||||
Based on the return value of the 0th dim, return the element with :math:`index = 0`,
|
||||
Thus, return the element with :math:`index = 1` in 0th dimension, i.e., [[3, 3, 3], [4, 4, 4]].
|
||||
- In the 1st dimension, similarly, the interval is :math:`[0,1)`.
|
||||
Based on the return value of the 0th dimension, return the element with :math:`index = 0`,
|
||||
i.e., [3, 3, 3].
|
||||
- In the 2nd dim, similarly, the interval is :math:`[0,3)`.
|
||||
Based on the return value of the 1st dim, return the element with :math:`index = 0,1,2`,
|
||||
- In the 2nd dimension, similarly, the interval is :math:`[0,3)`.
|
||||
Based on the return value of the 1st dimension, return the element with :math:`index = 0,1,2`,
|
||||
i.e., [3, 3, 3].
|
||||
- Finally, the output is [3, 3, 3].
|
||||
|
||||
|
@ -2358,7 +2360,7 @@ class DiagPart(PrimitiveWithInfer):
|
|||
class Eye(PrimitiveWithInfer):
|
||||
"""
|
||||
|
||||
Creates a tensor with ones on the diagonal and zeros the rest.
|
||||
Create a tensor with ones on the diagonal and zeros the rest.
|
||||
|
||||
Inputs:
|
||||
- **n** (int) - The number of rows of returned tensor
|
||||
|
@ -2391,12 +2393,12 @@ class Eye(PrimitiveWithInfer):
|
|||
|
||||
class ScatterNd(PrimitiveWithInfer):
|
||||
"""
|
||||
Scatters a tensor into a new tensor depending on the specified indices.
|
||||
Scatter a tensor into a new tensor depending on the specified indices.
|
||||
|
||||
Creates an empty tensor, and set values by scattering the update tensor depending on indices.
|
||||
Create an empty tensor, and set values by scattering the update tensor depending on indices.
|
||||
|
||||
Inputs:
|
||||
- **indices** (Tensor) - The index of scattering in the new tensor. With int32 data type.
|
||||
- **indices** (Tensor) - The index of scattering in the new tensor with int32 data type.
|
||||
- **update** (Tensor) - The source Tensor to be scattered.
|
||||
- **shape** (tuple[int]) - Define the shape of the output tensor, has the same type as indices.
|
||||
|
||||
|
@ -2480,7 +2482,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer):
|
|||
|
||||
class GatherNd(PrimitiveWithInfer):
|
||||
"""
|
||||
Gathers slices from a tensor by indices.
|
||||
Gather slices from a tensor by indices.
|
||||
|
||||
Using given indices to gather slices from a tensor with a specified shape.
|
||||
|
||||
|
@ -2516,9 +2518,7 @@ class GatherNd(PrimitiveWithInfer):
|
|||
|
||||
class TensorScatterUpdate(PrimitiveWithInfer):
|
||||
"""
|
||||
Update tensor value by using input indices and value.
|
||||
|
||||
Using given values to update tensor value, along with the input indices.
|
||||
Update tensor value using given values, along with the input indices.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The target tensor.
|
||||
|
@ -2619,7 +2619,7 @@ class ScatterNdUpdate(_ScatterNdOp):
|
|||
Inputs:
|
||||
- **input_x** (Parameter) - The target tensor, with data type of Parameter.
|
||||
- **indices** (Tensor) - The index of input tensor, with int32 data type.
|
||||
- **update** (Tensor) - The tensor to add to the input tensor, has the same type as input.
|
||||
- **update** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape and type as `input_x`.
|
||||
|
@ -2757,9 +2757,9 @@ class ScatterAdd(_ScatterOp):
|
|||
|
||||
class ScatterSub(_ScatterOp):
|
||||
"""
|
||||
Update the value of the input tensor through the sub operation.
|
||||
Update the value of the input tensor through the subtraction operation.
|
||||
|
||||
Using given values to update tensor value through the sub operation, along with the input indices.
|
||||
Using given values to update tensor value through the subtraction operation, along with the input indices.
|
||||
This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
|
||||
|
||||
Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
|
||||
|
@ -2772,9 +2772,10 @@ class ScatterSub(_ScatterOp):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Parameter) - The target parameter.
|
||||
- **indices** (Tensor) - The index to do sub operation whose data type should be mindspore.int32.
|
||||
- **updates** (Tensor) - The tensor doing the sub operation with `input_x`,
|
||||
the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
|
||||
- **indices** (Tensor) - The index to perform the subtraction operation
|
||||
whose data type should be mindspore.int32.
|
||||
- **updates** (Tensor) - The tensor that performs the subtraction operation with `input_x`,
|
||||
the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
|
||||
|
||||
Outputs:
|
||||
Parameter, the updated `input_x`.
|
||||
|
@ -2895,7 +2896,7 @@ class ScatterNdSub(_ScatterNdOp):
|
|||
"""
|
||||
Applies sparse subtraction to individual values or slices in a Tensor.
|
||||
|
||||
Using given values to update tensor value through the sub operation, along with the input indices.
|
||||
Using given values to update tensor value through the subtraction operation, along with the input indices.
|
||||
This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
|
||||
|
||||
Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
|
||||
|
@ -2909,8 +2910,8 @@ class ScatterNdSub(_ScatterNdOp):
|
|||
Inputs:
|
||||
- **input_x** (Parameter) - The target parameter.
|
||||
- **indices** (Tensor) - The index to do add operation whose data type should be mindspore.int32.
|
||||
- **updates** (Tensor) - The tensor doing the sub operation with `input_x`,
|
||||
the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
|
||||
- **updates** (Tensor) - The tensor that performs the subtraction operation with `input_x`,
|
||||
the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
|
||||
|
||||
Outputs:
|
||||
Parameter, the updated `input_x`.
|
||||
|
@ -2939,9 +2940,9 @@ class ScatterNonAliasingAdd(_ScatterNdOp):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Parameter) - The target parameter. The data type should be float16, float32 or int32.
|
||||
- **indices** (Tensor) - The index to do add operation whose data type should be mindspore.int32.
|
||||
- **updates** (Tensor) - The tensor doing the add operation with `input_x`,
|
||||
the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
|
||||
- **indices** (Tensor) - The index to perform the addition operation whose data type should be mindspore.int32.
|
||||
- **updates** (Tensor) - The tensor that performs the addition operation with `input_x`,
|
||||
the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
|
||||
|
||||
Outputs:
|
||||
Parameter, the updated `input_x`.
|
||||
|
@ -2987,7 +2988,7 @@ class SpaceToDepth(PrimitiveWithInfer):
|
|||
- **x** (Tensor) - The target tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, the same type as `x`. It must be a 4-D tensor.
|
||||
Tensor, the same data type as `x`. It must be a 4-D tensor.
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
|
||||
|
@ -3083,24 +3084,26 @@ class SpaceToBatch(PrimitiveWithInfer):
|
|||
r"""
|
||||
Divide spatial dimensions into blocks and combine the block size with the original batch.
|
||||
|
||||
This operation will divide spatial dimensions (H, W) into blocks with block_size, the output tensor's H and W
|
||||
This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor's H and W
|
||||
dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
|
||||
product of the original batch and the square of block_size. Prior to division into blocks, the spatial dimensions
|
||||
product of the original batch and the square of block_size. Before division, the spatial dimensions
|
||||
of the input are zero padded according to paddings if necessary.
|
||||
|
||||
Args:
|
||||
block_size (int): The block size of division, has the value not less than 2.
|
||||
paddings (list): The padding value for H and W dimension, containing 2 sub list, each containing 2 int value.
|
||||
All values must be >= 0. paddings[i] specifies the paddings for spatial dimension i, which corresponds to
|
||||
input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible
|
||||
by block_size.
|
||||
block_size (int): The block size of dividing blocks with value greater than 2.
|
||||
paddings (list): The padding values for H and W dimension, containing 2 subtraction lists.
|
||||
Each subtraction list contains 2 integer value. All values must be greater than 0.
|
||||
paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the
|
||||
input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1]
|
||||
is divisible by block_size.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.
|
||||
|
||||
Outputs:
|
||||
Tensor, the output tensor with the same type as input. Assume input shape is :math:`(n, c, h, w)` with
|
||||
:math:`block\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`, where
|
||||
Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with
|
||||
:math:`block\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,
|
||||
where
|
||||
|
||||
:math:`n' = n*(block\_size*block\_size)`
|
||||
|
||||
|
@ -3159,7 +3162,7 @@ class BatchToSpace(PrimitiveWithInfer):
|
|||
|
||||
Args:
|
||||
block_size (int): The block size of division, has the value not less than 2.
|
||||
crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 sub lists.
|
||||
crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction lists.
|
||||
Each list contains 2 integers.
|
||||
All values must be not less than 0. crops[i] specifies the crop values for the spatial dimension i, which
|
||||
corresponds to the input dimension i+2. It is required that
|
||||
|
@ -3230,21 +3233,22 @@ class SpaceToBatchND(PrimitiveWithInfer):
|
|||
|
||||
This operation will divide spatial dimensions (H, W) into blocks with block_shape, the output tensor's H and W
|
||||
dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
|
||||
product of the original batch and the product of block_shape. Prior to division into blocks, the spatial dimensions
|
||||
of the input are zero padded according to paddings if necessary.
|
||||
product of the original batch and the product of `block_shape`. Before division,
|
||||
the spatial dimensions of the input are zero padded according to paddings if necessary.
|
||||
|
||||
Args:
|
||||
block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value >= 1.
|
||||
The length of block_shape is M correspoding to the number of spatial dimensions.
|
||||
paddings (list): The padding value for H and W dimension, containing M sub list, each containing 2 int value.
|
||||
All values must be >= 0. paddings[i] specifies the paddings for spatial dimension i, which corresponds to
|
||||
input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible
|
||||
by block_shape[i].
|
||||
block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value greater than 1.
|
||||
The length of `block_shape` is M correspoding to the number of spatial dimensions.
|
||||
paddings (list): The padding values for H and W dimension, containing M subtraction list.
|
||||
Each contains 2 integer value. All values must be greater than 0.
|
||||
`paddings[i]` specifies the paddings for the spatial dimension i,
|
||||
which corresponds to the input dimension i+2.
|
||||
It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible by block_shape[i].
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.
|
||||
Outputs:
|
||||
Tensor, the output tensor with the same type as input. Assume input shape is :math:`(n, c, h, w)` with
|
||||
Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with
|
||||
:math:`block\_shape` and :math:`padddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,
|
||||
where
|
||||
|
||||
|
@ -3321,7 +3325,7 @@ class BatchToSpaceND(PrimitiveWithInfer):
|
|||
Args:
|
||||
block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value >= 1.
|
||||
The length of block_shape is M correspoding to the number of spatial dimensions.
|
||||
crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 sub list,
|
||||
crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction list,
|
||||
each containing 2 int value.
|
||||
All values must be >= 0. crops[i] specifies the crop values for spatial dimension i, which corresponds to
|
||||
input dimension i+2. It is required that input_shape[i+2]*block_shape[i] > crops[i][0]+crops[i][1].
|
||||
|
|
|
@ -117,7 +117,7 @@ class ImageSummary(PrimitiveWithInfer):
|
|||
|
||||
class TensorSummary(PrimitiveWithInfer):
|
||||
"""
|
||||
Output tensor to protocol buffer through tensor summary operator.
|
||||
Output a tensor to a protocol buffer through a tensor summary operator.
|
||||
|
||||
Inputs:
|
||||
- **name** (str) - The name of the input variable.
|
||||
|
|
|
@ -125,10 +125,10 @@ class TensorAdd(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor, or a tensor whose data type is number or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting,
|
||||
|
@ -1079,10 +1079,10 @@ class Sub(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor, or a tensor whose data type is number or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting,
|
||||
|
@ -1157,10 +1157,10 @@ class SquaredDifference(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is float16, float32, int32 or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is float16, float32, int32 or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor or a tensor whose data type is
|
||||
float16, float32, int32 or bool.
|
||||
|
||||
Outputs:
|
||||
|
@ -1863,10 +1863,10 @@ class TruncateDiv(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor, or a tensor whose data type is number or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting,
|
||||
|
@ -1893,10 +1893,10 @@ class TruncateMod(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor, or a tensor whose data type is number or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting,
|
||||
|
@ -2048,10 +2048,10 @@ class Xdivy(_MathBinaryOp):
|
|||
the scalar could only be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is float16, float32 or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
|
||||
or a tensor whose data type is float16, float32 or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
|
||||
or a bool when the first input is a tensor, or a tensor whose data type is float16, float32 or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is the same as the one after broadcasting,
|
||||
|
@ -3069,7 +3069,7 @@ class Sign(PrimitiveWithInfer):
|
|||
Note:
|
||||
.. math::
|
||||
sign(x) = \begin{cases} -1, &if\ x < 0 \cr
|
||||
0, &if\ x == 0 \cr
|
||||
0, &if\ x = 0 \cr
|
||||
1, &if\ x > 0\end{cases}
|
||||
|
||||
Inputs:
|
||||
|
@ -3251,7 +3251,7 @@ class SquareSumAll(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **input_x1** (Tensor) - The input tensor. The data type must be float16 or float32.
|
||||
- **input_x2** (Tensor) - The input tensor same type and shape as the `input_x1`.
|
||||
- **input_x2** (Tensor) - The input tensor has the same type and shape as the `input_x1`.
|
||||
|
||||
Note:
|
||||
SquareSumAll only supports float16 and float32 data type.
|
||||
|
|
|
@ -98,7 +98,7 @@ class Softmax(PrimitiveWithInfer):
|
|||
Softmax operation.
|
||||
|
||||
Applies the Softmax operation to the input tensor on the specified axis.
|
||||
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
|
||||
Suppose a slice in the given aixs :math:`x`, then for each element :math:`x_i`,
|
||||
the Softmax function is shown as follows:
|
||||
|
||||
.. math::
|
||||
|
@ -107,7 +107,7 @@ class Softmax(PrimitiveWithInfer):
|
|||
where :math:`N` is the length of the tensor.
|
||||
|
||||
Args:
|
||||
axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.
|
||||
axis (Union[int, tuple]): The axis to perform the Softmax operation. Default: -1.
|
||||
|
||||
Inputs:
|
||||
- **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.
|
||||
|
@ -1549,17 +1549,17 @@ class TopK(PrimitiveWithInfer):
|
|||
Finds values and indices of the `k` largest entries along the last dimension.
|
||||
|
||||
Args:
|
||||
sorted (bool): If true, the resulting elements will
|
||||
sorted (bool): If True, the obtained elements will
|
||||
be sorted by the values in descending order. Default: False.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.
|
||||
- **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.
|
||||
- **k** (int) - The number of top elements to be computed along the last dimension, constant input is needed.
|
||||
|
||||
Outputs:
|
||||
Tuple of 2 Tensors, the values and the indices.
|
||||
Tuple of 2 tensors, the values and the indices.
|
||||
|
||||
- **values** (Tensor) - The `k` largest elements along each last dimensional slice.
|
||||
- **values** (Tensor) - The `k` largest elements in each slice of the last dimensional.
|
||||
- **indices** (Tensor) - The indices of values within the last dimension of input.
|
||||
|
||||
Examples:
|
||||
|
@ -1593,7 +1593,7 @@ class TopK(PrimitiveWithInfer):
|
|||
|
||||
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
|
||||
r"""
|
||||
Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.
|
||||
Gets the softmax cross-entropy value between logits and labels with one-hot encoding.
|
||||
|
||||
Note:
|
||||
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
|
||||
|
@ -1609,7 +1609,7 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
|
|||
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
|
||||
|
||||
Outputs:
|
||||
Tuple of 2 Tensors, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.
|
||||
Tuple of 2 tensors, the `loss` shape is `(N,)`, and the `dlogits` with the same shape as `logits`.
|
||||
|
||||
Examples:
|
||||
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
|
||||
|
@ -1653,7 +1653,7 @@ class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
|
|||
loss = \sum_{ij} loss_{ij}
|
||||
|
||||
Args:
|
||||
is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.
|
||||
is_grad (bool): If true, this operation returns the computed gradient. Default: False.
|
||||
|
||||
Inputs:
|
||||
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
|
||||
|
@ -4084,19 +4084,19 @@ class SparseApplyAdagrad(PrimitiveWithInfer):
|
|||
Args:
|
||||
lr (float): Learning rate.
|
||||
update_slots (bool): If `True`, `accum` will be updated. Default: True.
|
||||
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
|
||||
use_locking (bool): If true, the `var` and `accumulation` tensors will be protected from being updated.
|
||||
Default: False.
|
||||
|
||||
Inputs:
|
||||
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
|
||||
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
|
||||
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
|
||||
Has the same data type as `var`.
|
||||
- **accum** (Parameter) - Accumulation to be updated. The shape and data type should be the same as `var`.
|
||||
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except the first dimension.
|
||||
Gradients has the same data type as `var`.
|
||||
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
|
||||
|
||||
Outputs:
|
||||
Tuple of 2 Tensors, the updated parameters.
|
||||
Tuple of 2 tensors, the updated parameters.
|
||||
|
||||
- **var** (Tensor) - The same shape and data type as `var`.
|
||||
- **accum** (Tensor) - The same shape and data type as `accum`.
|
||||
|
@ -4170,20 +4170,20 @@ class SparseApplyAdagradV2(PrimitiveWithInfer):
|
|||
Args:
|
||||
lr (float): Learning rate.
|
||||
epsilon (float): A small value added for numerical stability.
|
||||
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
|
||||
use_locking (bool): If `True`, the `var` and `accum` tensors will be protected from being updated.
|
||||
Default: False.
|
||||
update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.
|
||||
|
||||
Inputs:
|
||||
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
|
||||
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
|
||||
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
|
||||
Has the same data type as `var`.
|
||||
- **accum** (Parameter) - Accumulation to be updated. The shape and data type should be the same as `var`.
|
||||
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except the first dimension.
|
||||
Gradients has the same data type as `var`.
|
||||
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
|
||||
|
||||
Outputs:
|
||||
Tuple of 2 Tensors, the updated parameters.
|
||||
Tuple of 2 tensors, the updated parameters.
|
||||
|
||||
- **var** (Tensor) - The same shape and data type as `var`.
|
||||
- **accum** (Tensor) - The same shape and data type as `accum`.
|
||||
|
@ -4361,23 +4361,23 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck):
|
|||
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
|
||||
|
||||
Args:
|
||||
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
|
||||
use_locking (bool): If true, the `var` and `accum` tensors will be protected from being updated.
|
||||
Default: False.
|
||||
|
||||
Inputs:
|
||||
- **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
|
||||
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
|
||||
- **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or
|
||||
- **lr** (Union[Number, Tensor]) - The learning rate value. should be a float number or
|
||||
a scalar tensor with float16 or float32 data type.
|
||||
- **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or
|
||||
a scalar tensor with float16 or float32 data type.
|
||||
- **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or
|
||||
a scalar tensor with float16 or float32 data type..
|
||||
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
|
||||
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
|
||||
- **indices** (Tensor) - A vector of indices in the first dimension of `var` and `accum`.
|
||||
|
||||
Outputs:
|
||||
Tuple of 2 Tensors, the updated parameters.
|
||||
Tuple of 2 tensors, the updated parameters.
|
||||
|
||||
- **var** (Tensor) - The same shape and data type as `var`.
|
||||
- **accum** (Tensor) - The same shape and data type as `accum`.
|
||||
|
@ -4982,16 +4982,16 @@ class SparseApplyFtrl(PrimitiveWithCheck):
|
|||
|
||||
Inputs:
|
||||
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
|
||||
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
|
||||
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
|
||||
- **accum** (Parameter) - The accumulation to be updated, must be same data type and shape as `var`.
|
||||
- **linear** (Parameter) - the linear coefficient to be updated, must be the same data type and shape as `var`.
|
||||
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
|
||||
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
|
||||
- **indices** (Tensor) - A vector of indices in the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in the first dimension. The type must be int32.
|
||||
|
||||
Outputs:
|
||||
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
|
||||
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
|
||||
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
|
||||
- **var** (Tensor) - Tensor, has the same shape and data type as `var`.
|
||||
- **accum** (Tensor) - Tensor, has the same shape and data type as `accum`.
|
||||
- **linear** (Tensor) - Tensor, has the same shape and data type as `linear`.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
|
@ -5074,18 +5074,18 @@ class SparseApplyFtrlV2(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
|
||||
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
|
||||
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
|
||||
- **accum** (Parameter) - The accumulation to be updated, must be same data type and shape as `var`.
|
||||
- **linear** (Parameter) - the linear coefficient to be updated, must be same data type and shape as `var`.
|
||||
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
|
||||
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
|
||||
- **indices** (Tensor) - A vector of indices in the first dimension of `var` and `accum`.
|
||||
The shape of `indices` must be the same as `grad` in the first dimension. The type must be int32.
|
||||
|
||||
Outputs:
|
||||
Tuple of 3 Tensor, the updated parameters.
|
||||
|
||||
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
|
||||
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
|
||||
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
|
||||
- **var** (Tensor) - Tensor, has the same shape and data type as `var`.
|
||||
- **accum** (Tensor) - Tensor, has the same shape and data type as `accum`.
|
||||
- **linear** (Tensor) - Tensor, has the same shape and data type as `linear`.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
|
|
|
@ -34,7 +34,7 @@ class StandardNormal(PrimitiveWithInfer):
|
|||
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
||||
|
||||
Outputs:
|
||||
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
|
||||
Tensor. The shape is the same as the input `shape`. The dtype is float32.
|
||||
|
||||
Examples:
|
||||
>>> shape = (4, 16)
|
||||
|
@ -239,13 +239,13 @@ class UniformInt(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
|
||||
- **minval** (Tensor) - The a distribution parameter.
|
||||
It defines the minimum possibly generated value. With int32 data type. Only one number is supported.
|
||||
- **maxval** (Tensor) - The b distribution parameter.
|
||||
It defines the maximum possibly generated value. With int32 data type. Only one number is supported.
|
||||
- **minval** (Tensor) - The distribution parameter, a.
|
||||
It defines the minimum possibly generated value, with int32 data type. Only one number is supported.
|
||||
- **maxval** (Tensor) - The distribution parameter, b.
|
||||
It defines the maximum possibly generated value, with int32 data type. Only one number is supported.
|
||||
|
||||
Outputs:
|
||||
Tensor. The shape that the input 'shape' denotes. The dtype is int32.
|
||||
Tensor. The shape is the same as the input 'shape', and the data type is int32.
|
||||
|
||||
Examples:
|
||||
>>> shape = (4, 16)
|
||||
|
@ -284,7 +284,7 @@ class UniformInt(PrimitiveWithInfer):
|
|||
|
||||
class UniformReal(PrimitiveWithInfer):
|
||||
r"""
|
||||
Produces random floating-point values i, uniformly distributed on the interval [0, 1).
|
||||
Produces random floating-point values i, uniformly distributed to the interval [0, 1).
|
||||
|
||||
Args:
|
||||
seed (int): Random seed. Must be non-negative. Default: 0.
|
||||
|
|
|
@ -29,10 +29,10 @@ class SparseToDense(PrimitiveWithInfer):
|
|||
Inputs:
|
||||
- **indices** (Tensor) - The indices of sparse representation.
|
||||
- **values** (Tensor) - Values corresponding to each row of indices.
|
||||
- **dense_shape** (tuple) - A int tuple which specifies the shape of dense tensor.
|
||||
- **dense_shape** (tuple) - An int tuple which specifies the shape of dense tensor.
|
||||
|
||||
Returns:
|
||||
Tensor, the shape of tensor is dense_shape.
|
||||
Tensor, the shape of tensor is `dense_shape`.
|
||||
|
||||
Examples:
|
||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||
|
|
|
@ -62,7 +62,13 @@ else
|
|||
exit ${RET}
|
||||
fi
|
||||
|
||||
pytest -n 4 --dist=loadfile -v $CURRPATH/parallel $CURRPATH/train $CURRPATH/ops
|
||||
pytest -n 4 --dist=loadfile -v $CURRPATH/parallel $CURRPATH/train
|
||||
RET=$?
|
||||
if [ ${RET} -ne 0 ]; then
|
||||
exit ${RET}
|
||||
fi
|
||||
|
||||
pytest -n 2 --dist=loadfile -v $CURRPATH/ops
|
||||
RET=$?
|
||||
if [ ${RET} -ne 0 ]; then
|
||||
exit ${RET}
|
||||
|
|
Loading…
Reference in New Issue