!6241 [bug]add doc for some Parameter property & fix bug for `Isinstance`

Merge pull request !6241 from vlne-v1/I1V5VO-fix-isinstance
This commit is contained in:
mindspore-ci-bot 2020-09-16 10:13:30 +08:00 committed by Gitee
commit 3fe8916afa
7 changed files with 72 additions and 10 deletions

View File

@ -34,6 +34,8 @@ __dtype__ = [
"float64", "double",
"bool_", "float_",
"list_", "tuple_",
"int_", "uint",
"number", "tensor",
"string", "type_none",
"tensor_type",
"Type"

View File

@ -180,7 +180,12 @@ class Parameter(MetaTensor):
@property
def inited_param(self):
"""Get the new parameter after call the init_data."""
"""
Get the new parameter after call the init_data.
Default is a None, If `self` is a Parameter with out data, after call the
`init_data` the initialized Parameter with data will be recorded here.
"""
return self._inited_param
@ -232,7 +237,14 @@ class Parameter(MetaTensor):
@property
def is_init(self):
"""Get the initialization status of the parameter."""
"""
Get the initialization status of the parameter.
In GE backend, the Parameter need a "init graph" to sync the data from host to device.
This flag indicates whether the data as been sync to the device.
This flag only work in GE, and it will be set to False in other backend.
"""
return self._is_init
@is_init.setter
@ -250,7 +262,8 @@ class Parameter(MetaTensor):
Clone the parameter.
Args:
prefix (str): Namespace of parameter.
prefix (str): Namespace of parameter. The cloned Parameter name is
combined of prefix and current name: `f"{perfix}.{self.name}"`.
init (Union[Tensor, str, Initializer, numbers.Number]): Initialize the shape of the parameter.
Default: 'same'.

View File

@ -92,7 +92,12 @@ class Int : public Number {
~Int() override = default;
MS_DECLARE_PARENT(Int, Number)
TypeId generic_type_id() const override { return kNumberTypeInt; }
TypePtr DeepCopy() const override { return std::make_shared<Int>(nbits()); }
TypePtr DeepCopy() const override {
if (nbits() == 0) {
return std::make_shared<Int>();
}
return std::make_shared<Int>(nbits());
}
std::string ToString() const override { return GetTypeName("Int"); }
std::string ToReprString() const override { return nbits() == 0 ? "int_" : GetTypeName("int"); }
std::string DumpText() const override {
@ -110,7 +115,12 @@ class UInt : public Number {
~UInt() override {}
MS_DECLARE_PARENT(UInt, Number)
TypePtr DeepCopy() const override { return std::make_shared<UInt>(nbits()); }
TypePtr DeepCopy() const override {
if (nbits() == 0) {
return std::make_shared<UInt>();
}
return std::make_shared<UInt>(nbits());
}
std::string ToString() const override { return GetTypeName("UInt"); }
std::string ToReprString() const override { return GetTypeName("uint"); }
std::string DumpText() const override {
@ -127,7 +137,12 @@ class Float : public Number {
MS_DECLARE_PARENT(Float, Number)
TypeId generic_type_id() const override { return kNumberTypeFloat; }
TypePtr DeepCopy() const override { return std::make_shared<Float>(nbits()); }
TypePtr DeepCopy() const override {
if (nbits() == 0) {
return std::make_shared<Float>();
}
return std::make_shared<Float>(nbits());
}
std::string ToString() const override { return GetTypeName("Float"); }
std::string ToReprString() const override { return nbits() == 0 ? "float_" : GetTypeName("float"); }
std::string DumpText() const override {

View File

@ -349,7 +349,12 @@ class IsInstance(PrimitiveWithInfer):
validator.check_const_input("inst", inst['value'], self.name)
validator.check_value_type("type_", type_v, [mstype.Type], self.name)
value = mstype.issubclass_(sub_type_t, type_v)
if type_v == mstype.list_:
value = isinstance(sub_type_t, list)
elif type_v == mstype.tuple_:
value = isinstance(sub_type_t, tuple)
else:
value = mstype.issubclass_(sub_type_t, type_v)
out = {'shape': (),
'dtype': mstype.type_type,

View File

@ -70,11 +70,14 @@ class Model:
value would be passed to the `Loss` metric, the predicted value and label would be passed
to other metric. Default: None.
amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed
precision training. Supports [O0, O2, O3]. Default: "O0".
precision training. Supports ["O0", "O2", "O3", "auto"]. Default: "O0".
- O0: Do not change.
- O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale.
- O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'.
- auto: Set to level to recommended level in different devices. Set level to O2 on GPU, Set
level to O3 Ascend. The recommended level is choose by the export experience, cannot
always generalize. User should specify the level for special network.
O2 is recommended on GPU, O3 is recommended on Ascend.

View File

@ -21,6 +21,6 @@ operations. Note that the entire computation is carried out in floating point. A
aware training, MindSpore provides conversion functions to convert the trained model into lower precision.
"""
from .quant import convert_quant_network
from .quant import convert_quant_network, export
__all__ = ["convert_quant_network"]
__all__ = ["convert_quant_network", "export"]

View File

@ -0,0 +1,24 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_multitype """
import mindspore as ms
from mindspore.ops import operations as P
def test_isinstance():
assert P.IsInstance()([1, 2, 3], ms.list_) is True
assert P.IsInstance()((1, 2, 3), ms.tuple_) is True
assert P.IsInstance()(1.0, ms.float_) is True
assert P.IsInstance()(1, ms.int_) is True