forked from mindspore-Ecosystem/mindspore
!46884 issue:I5C2X7 Mul doc refresh
Merge pull request !46884 from zhangzhaoju/master_mul
This commit is contained in:
commit
24854285fe
|
@ -0,0 +1,6 @@
|
||||||
|
mindspore.Tensor.mul
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. py:method:: mindspore.Tensor.mul(value)
|
||||||
|
|
||||||
|
详情请参考 :func:`mindspore.ops.mul`。
|
|
@ -658,6 +658,12 @@ void GetBroadCastIndex(const std::vector<size_t> &unaligned_input_shape, const s
|
||||||
<< " and output shape is " << output_shape;
|
<< " and output shape is " << output_shape;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// while input address is null, whose shape like (4, 0, 5), then the output size is zero
|
||||||
|
if (output_size < 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Get the flatten input indices according to "logical_shape" and "physical_shape".
|
// Get the flatten input indices according to "logical_shape" and "physical_shape".
|
||||||
size_t offset = 1;
|
size_t offset = 1;
|
||||||
size_t stride = 1;
|
size_t stride = 1;
|
||||||
|
@ -665,7 +671,7 @@ void GetBroadCastIndex(const std::vector<size_t> &unaligned_input_shape, const s
|
||||||
(*index_list)[0] = 0; // First element is set to 0.
|
(*index_list)[0] = 0; // First element is set to 0.
|
||||||
for (size_t i = 0; i < size; ++i) {
|
for (size_t i = 0; i < size; ++i) {
|
||||||
size_t increment = (logical_shape[i] == physical_shape[i] ? stride : 0);
|
size_t increment = (logical_shape[i] == physical_shape[i] ? stride : 0);
|
||||||
for (size_t j = 0; j < (physical_shape[i] - 1) * offset; ++j) {
|
for (size_t j = 0; j + offset < physical_shape[i] * offset; ++j) {
|
||||||
(*index_list)[offset + j] = (*index_list)[j] + increment;
|
(*index_list)[offset + j] = (*index_list)[j] + increment;
|
||||||
}
|
}
|
||||||
offset *= physical_shape[i];
|
offset *= physical_shape[i];
|
||||||
|
|
|
@ -2078,6 +2078,17 @@ class Mul(_MathBinaryOp):
|
||||||
[ 4. 10. 18.]
|
[ 4. 10. 18.]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Let x/y using same sig_dtype to enable implicit conversion for compatibility
|
||||||
|
__mindspore_signature__ = (
|
||||||
|
sig.make_sig('x', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
|
||||||
|
sig.make_sig('y', rw=sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T)
|
||||||
|
)
|
||||||
|
|
||||||
|
@prim_attr_register
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize Xdivy."""
|
||||||
|
self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _infer_specified_mul_value(x, y):
|
def _infer_specified_mul_value(x, y):
|
||||||
"""Calculate min/max value for output of Mul op"""
|
"""Calculate min/max value for output of Mul op"""
|
||||||
|
|
Loading…
Reference in New Issue