forked from mindspore-Ecosystem/mindspore
r.19版本发布,修复算子和接口应支持的平台
This commit is contained in:
parent
97bccdacb1
commit
f42133fe30
|
@ -2172,7 +2172,7 @@ def scatter_nd_max(input_x, indices, updates, use_locking=False):
|
|||
is required when data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
|
@ -2245,7 +2245,7 @@ def scatter_nd_min(input_x, indices, updates, use_locking=False):
|
|||
is required when data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
|
||||
|
|
|
@ -309,7 +309,7 @@ def addcdiv(input_data, x1, x2, value):
|
|||
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1/x2)`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
|
||||
|
@ -350,7 +350,7 @@ def addcmul(input_data, x1, x2, value):
|
|||
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1*x2)`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
|
||||
|
@ -940,7 +940,7 @@ def inplace_update(x, v, indices):
|
|||
TypeError: If `indices` is a tuple and its element is not an int.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> indices = (0, 1)
|
||||
|
@ -1327,7 +1327,7 @@ def xlogy(x, y):
|
|||
ValueError: If `x` could not be broadcast to a tensor with shape of `y`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
|
||||
|
@ -1911,7 +1911,7 @@ def bessel_j0(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -1938,7 +1938,7 @@ def bessel_j1(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -1965,7 +1965,7 @@ def bessel_i0(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
|
||||
|
@ -1992,7 +1992,7 @@ def bessel_i0e(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
|
||||
|
@ -2019,7 +2019,7 @@ def bessel_k0(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -2046,7 +2046,7 @@ def bessel_k0e(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -2073,7 +2073,7 @@ def bessel_y0(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -2100,7 +2100,7 @@ def bessel_y1(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -2399,7 +2399,7 @@ def trunc(input_x):
|
|||
TypeError: If `input_x` is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
|
||||
|
@ -2819,7 +2819,7 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|||
but data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.function.math_func import approximate_equal
|
||||
|
@ -2890,7 +2890,7 @@ def isnan(x):
|
|||
TypeError: If `x` is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
||||
|
@ -3846,7 +3846,7 @@ def lerp(start, end, weight):
|
|||
ValueError: If `weight` could not be broadcast to tensors with shapes of `start` and `end` when it is a tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
|
@ -3922,7 +3922,7 @@ def bessel_i1(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
|
||||
|
@ -3949,7 +3949,7 @@ def bessel_i1e(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-1, -0.5, 0.5, 1]), mindspore.float32)
|
||||
|
@ -3976,7 +3976,7 @@ def bessel_k1(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
@ -4003,7 +4003,7 @@ def bessel_k1e(x):
|
|||
TypeError: If dtype of `x` is not float16, float32 or float64.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
|
||||
|
|
|
@ -2128,7 +2128,7 @@ def grid_sample(input_x, grid, interpolation_mode='bilinear', padding_mode='zero
|
|||
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32))
|
||||
|
|
|
@ -88,7 +88,7 @@ def assign_sub(variable, value):
|
|||
when data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
||||
|
|
|
@ -113,7 +113,7 @@ def standard_laplace(shape, seed=0, seed2=0):
|
|||
ValueError: If shape is a tuple containing non-positive items.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops
|
||||
|
|
|
@ -5036,7 +5036,7 @@ class ScatterNdMax(_ScatterNdOp):
|
|||
Refer to :func:`mindspore.ops.scatter_nd_max` for more details.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdMax
|
||||
|
@ -5083,7 +5083,7 @@ class ScatterNdMin(_ScatterNdOp):
|
|||
Refer to :func:`mindspore.ops.scatter_nd_min` for more details.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
|
||||
|
|
|
@ -264,7 +264,7 @@ class Addcdiv(Primitive):
|
|||
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1/x2)`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
|
||||
|
@ -310,7 +310,7 @@ class Addcmul(Primitive):
|
|||
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1*x2)`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
|
||||
|
@ -1718,7 +1718,7 @@ class InplaceUpdate(PrimitiveWithInfer):
|
|||
TypeError: If `indices` is a tuple and its element is not an int.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> indices = (0, 1)
|
||||
|
@ -3335,7 +3335,7 @@ class Xlogy(Primitive):
|
|||
Refer to :func:`mindspore.ops.xlogy` for more detail.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
|
||||
|
@ -3962,7 +3962,7 @@ class IsNan(Primitive):
|
|||
Refer to :func:`mindspore.ops.isnan` for more detail.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> is_nan = ops.IsNan()
|
||||
|
@ -4002,7 +4002,7 @@ class IsInf(Primitive):
|
|||
TypeError: If `x` is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> is_inf = ops.IsInf()
|
||||
|
@ -5636,7 +5636,7 @@ class Trunc(Primitive):
|
|||
Refer to :func:`mindspore.ops.trunc` for more detail.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -5862,7 +5862,7 @@ class ApplyAdagradV2(Primitive):
|
|||
RuntimeError: If the data type of `var`, `accum` and `grad` conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -6578,7 +6578,7 @@ class ApplyProximalGradientDescent(Primitive):
|
|||
RuntimeError: If the data type of `var`, and `delta` conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
|
@ -9262,7 +9262,7 @@ class GridSampler3D(Primitive):
|
|||
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU`` ``GPU``
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> gridsampler = GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
||||
|
|
|
@ -414,7 +414,7 @@ class SparseToDense(PrimitiveWithInfer):
|
|||
ValueError: If `sparse_shape`, shape of `indices` and shape of `values` don't meet the parameter description.
|
||||
|
||||
Supported Platforms:
|
||||
``CPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||
|
|
Loading…
Reference in New Issue