diff --git a/docs/api/api_python/ops/mindspore.ops.func_jvp.rst b/docs/api/api_python/ops/mindspore.ops.func_jvp.rst new file mode 100644 index 00000000000..833fbb2bc5a --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.func_jvp.rst @@ -0,0 +1,18 @@ +mindspore.ops.jvp +================= + +.. py:function:: mindspore.ops.jvp(fn, inputs, v) + + 计算给定网络的雅可比向量积(Jacobian-vector product, JVP)。JVP对应 `前向模式自动微分 `_。 + + 参数: + - **fn** (Union[Function, Cell]) - 待求导的函数或网络。以Tensor为入参,返回Tensor或Tensor数组。 + - **inputs** (Union[Tensor, tuple[Tensor], list[Tensor]]) - 输入网络 `fn` 的入参。 + - **v** (Union[Tensor, tuple[Tensor], list[Tensor]]) - 与雅可比矩阵相乘的向量,shape和type与网络的输入一致。 + + 返回: + - **net_output** (Union[Cell, Function]) - 输入网络的正向计算结果。 + - **jvp** (Union[NoneType, int, tuple[int]]) - 雅可比向量积的结果。 + + 异常: + - **TypeError** - `inputs` 或 `v` 类型不符合要求。 diff --git a/docs/api/api_python/ops/mindspore.ops.func_vjp.rst b/docs/api/api_python/ops/mindspore.ops.func_vjp.rst new file mode 100644 index 00000000000..0870acbc67a --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.func_vjp.rst @@ -0,0 +1,21 @@ +mindspore.ops.vjp +================= + +.. py:function:: mindspore.ops.vjp(fn, inputs, v) + + 计算给定网络的向量雅可比积(vector-jacobian-product, VJP)。VJP对应 `反向模式自动微分 `_。 + + .. note:: + 此接口未來会变动。 + + 参数: + - **fn** (Union[Function, Cell]) - 待求导的函数或网络。以Tensor为入参,返回Tensor或Tensor数组。 + - **inputs** (Union[Tensor, tuple[Tensor], list[Tensor]]) - 输入网络 `fn` 的入参。 + - **v** (Union[Tensor, tuple[Tensor], list[Tensor]]) - 与雅可比矩阵相乘的向量,shape和type与网络的输出一致。 + + 返回: + - **net_output** (Union[Cell, Function]) - 输入网络的正向计算结果。 + - **vjp** (Union[NoneType, int, tuple[int]]) - 向量雅可比积的结果。 + + 异常: + - **TypeError** - `inputs` 或 `v` 类型不符合要求。 diff --git a/mindspore/python/mindspore/ops/function/grad/grad_func.py b/mindspore/python/mindspore/ops/function/grad/grad_func.py index d609d40eebe..79dc27b4b87 100644 --- a/mindspore/python/mindspore/ops/function/grad/grad_func.py +++ b/mindspore/python/mindspore/ops/function/grad/grad_func.py @@ -544,28 +544,28 @@ def derivative(fn, primals, order): def jvp(fn, inputs, v): """ - Compute the jacobian-vector-product of the given network. + Compute the jacobian-vector-product of the given network. `jvp` matches + `forward-mode differentiation `_. Args: fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single tensor or tuple of Tensors. - inputs (Union[Tensor, Tuple or List of Tensors]): The inputs to `fn`. - v (Union[Tensor, Tuple or or List of Tensors]): The shape and type of v should be the same as inputs. + inputs (Union[Tensor, tuple[Tensor], list[Tensor]]): The inputs to `fn` . + v (Union[Tensor, tuple[Tensor], list[Tensor]]): The vector in jacobian-vector-product. The shape and type of v + should be the same as inputs. Returns: - Tuple, tuple of output and jvp. - - - **netout** (Tensor or Tuple of Tensors) - The output of "fn(inputs)". - - **jvp** (Tensor or Tuple of Tensors) - The result of the dot product. + - **netout** (Union[Tensor, tuple(Tensor]]) - The output of `fn(inputs)` . + - **jvp** (Union[Tensor, tuple(Tensor]]) - The result of jacobian-vector-product. Raises: - TypeError: If the input is not a tensor or tuple or list of tensors. + TypeError: `inputs` or `v` does not belong to required types. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: - >>> from mindspore.ops import functional as F + >>> from mindspore import ops >>> from mindspore import Tensor >>> class Net(nn.Cell): ... def construct(self, x, y): @@ -573,7 +573,7 @@ def jvp(fn, inputs, v): >>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32)) >>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32)) >>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32)) - >>> output = F.jvp(Net(), (x, y), (v, v)) + >>> output = ops.jvp(Net(), (x, y), (v, v)) >>> print(output[0]) [[ 2. 10.] [30. 68.]] @@ -667,28 +667,31 @@ def linearize(fn, inputs): def vjp(fn, inputs, v): """ - Compute the vector-jacobian-product of the given network. + Compute the vector-jacobian-product of the given network. `vjp` matches + `reverse-mode differentiation `_. + + Note: + This function is subjected to change in the future. Args: fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single tensor or tuple of Tensors. - inputs (Union[Tensor, Tuple or List of Tensors]): The inputs to `fn`. - v (Union[Tensor, Tuple or List of Tensors]): The shape and type of v should be the same as outputs. + inputs (Union[Tensor, tuple[Tensor], list[Tensor]]): The inputs to `fn` . + v (Union[Tensor, tuple[Tensor], list[Tensor]]): The vector in vector-jacobian-product. The shape and type of v + should be the same as outputs. Returns: - Tuple, tuple of output and vjp. - - - **netout** (Tensor or Tuple of Tensors) - The output of "fn(inputs)". - - **vjp** (Tensor or Tuple of Tensors) - The result of the dot product. + - **netout** (Union[Tensor, tuple(Tensor]]) - The output of `fn(inputs)` . + - **vjp** (Union[Tensor, tuple(Tensor]]) - The result of vector-jacobian-product. Raises: - TypeError: If the input is not a tensor or tuple or list of tensors. + TypeError: `inputs` or `v` does not belong to required types. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: - >>> from mindspore.ops import functional as F + >>> from mindspore import ops >>> from mindspore import Tensor >>> class Net(nn.Cell): ... def construct(self, x, y): @@ -696,7 +699,7 @@ def vjp(fn, inputs, v): >>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32)) >>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32)) >>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32)) - >>> output = F.vjp(Net(), (x, y), v) + >>> output = ops.vjp(Net(), (x, y), v) >>> print(output[0]) [[ 2. 10.] [30. 68.]]