forked from mindspore-Ecosystem/mindspore
!49916 Add AnyTypeNode and NoneTypeNode
Merge pull request !49916 from NaCN/add_async_node
This commit is contained in:
commit
f58509ff73
|
@ -93,6 +93,25 @@ class SequenceNode : public StubNode {
|
|||
};
|
||||
using SequenceNodePtr = std::shared_ptr<SequenceNode>;
|
||||
|
||||
class AnyTypeNode : public StubNode {
|
||||
public:
|
||||
AnyTypeNode() = default;
|
||||
MS_DECLARE_PARENT(AnyTypeNode, StubNode);
|
||||
bool SetAbstract(const AbstractBasePtr &abs) override;
|
||||
void SetValue(const ValuePtr &val) override;
|
||||
py::object GetRealNode();
|
||||
|
||||
private:
|
||||
StubNodePtr real_node_;
|
||||
};
|
||||
|
||||
class NoneTypeNode : public StubNode {
|
||||
public:
|
||||
NoneTypeNode() = default;
|
||||
MS_DECLARE_PARENT(NoneTypeNode, StubNode);
|
||||
py::object GetRealValue();
|
||||
};
|
||||
|
||||
COMMON_EXPORT std::pair<py::object, StubNodePtr> MakeTopNode(const TypePtr &type);
|
||||
COMMON_EXPORT void RegStubNodes(const py::module *m);
|
||||
} // namespace stub
|
||||
|
|
|
@ -39,6 +39,8 @@ const TypePtr kTupleTensor8 = std::make_shared<Tuple>(
|
|||
const TypePtr kTupleTensor9 = std::make_shared<Tuple>(TypePtrList{
|
||||
kTensorType, kTensorType, kTensorType, kTensorType, kTensorType, kTensorType, kTensorType, kTensorType, kTensorType});
|
||||
|
||||
// If Abstract of the operator is constant, please add the type kTypeNone, such as "Size".
|
||||
// If Abstract of the operator is Tensor and Tuple[Tensor], please add the type kAnyType, such as "Eigh".
|
||||
inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4},
|
||||
{"Adam", kTupleTensor3},
|
||||
{"AdamApplyOne", kTupleTensor3},
|
||||
|
@ -85,7 +87,7 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"CTCLoss", kTupleTensor2},
|
||||
{"CTCLossV2", kTupleTensor2},
|
||||
{"Coalesce", kTupleTensor3},
|
||||
{"ConcatOffset", kAnyType},
|
||||
{"ConcatOffset", kTypeNone},
|
||||
{"CombinedNonMaxSuppression", kTupleTensor4},
|
||||
{"ComputeAccidentalHits", kTupleTensor3},
|
||||
{"ConfusionMulGrad", kTupleTensor2},
|
||||
|
@ -129,7 +131,7 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"GRUV2HiddenGrad", kTupleTensor3},
|
||||
{"GRUV2HiddenGradCell", kTupleTensor3},
|
||||
{"Geqrf", kTupleTensor2},
|
||||
{"GetNext", kAnyType},
|
||||
{"GetNext", kTuple},
|
||||
{"GridSampler2DGrad", kTupleTensor2},
|
||||
{"GridSampler3D", kTupleTensor2},
|
||||
{"GridSampler3DGrad", kTupleTensor2},
|
||||
|
@ -138,7 +140,7 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"InstanceNormGrad", kTupleTensor3},
|
||||
{"InstanceNormV2", kTupleTensor3},
|
||||
{"InstanceNormV2Grad", kTupleTensor3},
|
||||
{"InvertPermutation", kAnyType},
|
||||
{"InvertPermutation", kTypeNone},
|
||||
{"LSTM", kTupleTensor5},
|
||||
{"LSTMGrad", kTupleTensor4},
|
||||
{"LSTMGradData", kTupleTensor3},
|
||||
|
@ -162,7 +164,7 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"Lu", kTupleTensor2},
|
||||
{"LuUnpack", kTupleTensor3},
|
||||
{"LuUnpackGrad", kTupleTensor2},
|
||||
{"MakeTuple", kAnyType},
|
||||
{"MakeTuple", kTypeNone},
|
||||
{"MapCacheIdx", kTupleTensor4},
|
||||
{"MapTensorGetData", kTupleTensor2},
|
||||
{"MatmulDDS", kTupleTensor2},
|
||||
|
@ -193,26 +195,26 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"ReduceStd", kTupleTensor2},
|
||||
{"ReservoirReplayBufferDestroy", kTupleTensor4},
|
||||
{"SampleDistortedBoundingBoxV2", kTupleTensor3},
|
||||
{"ScalarAdd", kAnyType},
|
||||
{"ScalarBool", kAnyType},
|
||||
{"ScalarDiv", kAnyType},
|
||||
{"ScalarFloordiv", kAnyType},
|
||||
{"ScalarMod", kAnyType},
|
||||
{"ScalarMul", kAnyType},
|
||||
{"ScalarSub", kAnyType},
|
||||
{"ScalarAdd", kTypeNone},
|
||||
{"ScalarBool", kTypeNone},
|
||||
{"ScalarDiv", kTypeNone},
|
||||
{"ScalarFloordiv", kTypeNone},
|
||||
{"ScalarMod", kTypeNone},
|
||||
{"ScalarMul", kTypeNone},
|
||||
{"ScalarSub", kTypeNone},
|
||||
{"SelfAdjointEig", kTupleTensor2},
|
||||
{"SequenceAdd", kAnyType},
|
||||
{"SequenceAddN", kAnyType},
|
||||
{"SequenceCount", kAnyType},
|
||||
{"SequenceIndex", kAnyType},
|
||||
{"SequenceMul", kAnyType},
|
||||
{"SequenceMax", kAnyType},
|
||||
{"SequenceMin", kAnyType},
|
||||
{"SequenceSlice", kAnyType},
|
||||
{"SequenceSliceGrad", kAnyType},
|
||||
{"SequenceSliceSetItem", kAnyType},
|
||||
{"SequenceZerosLike", kAnyType},
|
||||
{"Size", kAnyType},
|
||||
{"SequenceAdd", kTypeNone},
|
||||
{"SequenceAddN", kTypeNone},
|
||||
{"SequenceCount", kTypeNone},
|
||||
{"SequenceIndex", kTypeNone},
|
||||
{"SequenceMul", kTypeNone},
|
||||
{"SequenceMax", kTypeNone},
|
||||
{"SequenceMin", kTypeNone},
|
||||
{"SequenceSlice", kTypeNone},
|
||||
{"SequenceSliceGrad", kTypeNone},
|
||||
{"SequenceSliceSetItem", kTypeNone},
|
||||
{"SequenceZerosLike", kTypeNone},
|
||||
{"Size", kTypeNone},
|
||||
{"SoftmaxCrossEntropyWithLogits", kTupleTensor2},
|
||||
{"SoftmaxV2WithDropoutDoMaskV3", kTupleTensor2},
|
||||
{"Sort", kTupleTensor2},
|
||||
|
@ -249,27 +251,27 @@ inline static PredictOutTypeMap out_type_prediction = {{"ActsULQ", kTupleTensor4
|
|||
{"Sspaddmm", kTupleTensor3},
|
||||
{"SubAndFilter", kTupleTensor2},
|
||||
{"Svd", kTupleTensor3},
|
||||
{"TensorToList", kAnyType},
|
||||
{"TensorToScalar", kAnyType},
|
||||
{"TensorToTuple", kAnyType},
|
||||
{"TensorToList", kTypeNone},
|
||||
{"TensorToScalar", kTypeNone},
|
||||
{"TensorToTuple", kTypeNone},
|
||||
{"TopK", kTupleTensor2},
|
||||
{"TupleGetItem", kAnyType},
|
||||
{"TupleGetItem", kTypeNone},
|
||||
{"UniformCandidateSampler", kTupleTensor3},
|
||||
{"Unique", kTupleTensor2},
|
||||
{"UniqueConsecutive", kTupleTensor3},
|
||||
{"UniqueWithPad", kTupleTensor2},
|
||||
{"Unpack", kTuple},
|
||||
{"Unstack", kTuple},
|
||||
{"bit_and", kAnyType},
|
||||
{"bit_or", kAnyType},
|
||||
{"make_range", kAnyType},
|
||||
{"scalar_eq", kAnyType},
|
||||
{"scalar_ge", kAnyType},
|
||||
{"scalar_gt", kAnyType},
|
||||
{"scalar_le", kAnyType},
|
||||
{"scalar_lt", kAnyType},
|
||||
{"sequence_len", kAnyType},
|
||||
{"tuple_setitem", kAnyType}};
|
||||
{"bit_and", kTypeNone},
|
||||
{"bit_or", kTypeNone},
|
||||
{"make_range", kTypeNone},
|
||||
{"scalar_eq", kTypeNone},
|
||||
{"scalar_ge", kTypeNone},
|
||||
{"scalar_gt", kTypeNone},
|
||||
{"scalar_le", kTypeNone},
|
||||
{"scalar_lt", kTypeNone},
|
||||
{"sequence_len", kTypeNone},
|
||||
{"tuple_setitem", kTypeNone}};
|
||||
} // namespace pynative
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PIPELINE_PYNATIVE_PREDICTOUTTYPEMAP_H_
|
||||
|
|
|
@ -79,7 +79,7 @@ TypePtr PredictOutTypeByName(const std::string &op_name) {
|
|||
}
|
||||
static auto operator_fns = ops::OperatorRegister::GetInstance().GetOperatorMap();
|
||||
if (operator_fns.find(op_name) == operator_fns.end()) {
|
||||
return ops_map[op_name] = kAnyType;
|
||||
return ops_map[op_name] = kTypeNone;
|
||||
}
|
||||
const auto pre_iter = out_type_prediction.find(op_name);
|
||||
auto type = pre_iter == out_type_prediction.end() ? kTensorType : pre_iter->second;
|
||||
|
@ -118,8 +118,8 @@ py::object PyNativeExecutor::RunOpAsync(const py::args &args) const {
|
|||
|
||||
// 1. get top_type from Primitive::PredictOutputType
|
||||
auto top_type = PredictOutTypeByName(adapter->name());
|
||||
// 2. if predict failed(kAnyType), return after infer(half-asynchronous) or run(synchronous mode)
|
||||
if (top_type == kAnyType || DisablePyTraceAsync(op_run_info)) {
|
||||
// 2. if disable PyTraceAsync, return after infer(half-asynchronous) or run(synchronous mode)
|
||||
if (DisablePyTraceAsync(op_run_info)) {
|
||||
// Wait for async task finish
|
||||
forward_executor()->WaitForwardTask();
|
||||
PyNativeAlgo::Common::StubNodeToValue(op_run_info);
|
||||
|
|
|
@ -43,6 +43,10 @@ StubNodePtr MakeStubNode(const TypePtr &type) {
|
|||
node->SetElement(i, elem);
|
||||
}
|
||||
return node;
|
||||
} else if (type == kAnyType) {
|
||||
return std::make_shared<AnyTypeNode>();
|
||||
} else if (type == kTypeNone) {
|
||||
return std::make_shared<NoneTypeNode>();
|
||||
} else {
|
||||
if (!type->isa<TensorType>()) {
|
||||
MS_LOG(WARNING) << "stub tensor is create for type: " << type->ToString();
|
||||
|
@ -56,7 +60,7 @@ py::object MakeOutput(StubNodePtr node) {
|
|||
if (node->isa<TensorNode>()) {
|
||||
auto tensor = node->cast<std::shared_ptr<TensorNode>>();
|
||||
return py::cast(tensor);
|
||||
} else {
|
||||
} else if (node->isa<SequenceNode>()) {
|
||||
auto seq = node->cast<std::shared_ptr<SequenceNode>>();
|
||||
MS_EXCEPTION_IF_NULL(seq);
|
||||
auto &elements = seq->Elements();
|
||||
|
@ -68,6 +72,12 @@ py::object MakeOutput(StubNodePtr node) {
|
|||
out[i] = MakeOutput(elements[i]);
|
||||
}
|
||||
return out;
|
||||
} else if (node->isa<AnyTypeNode>()) {
|
||||
auto tensor = node->cast<std::shared_ptr<AnyTypeNode>>();
|
||||
return py::cast(tensor);
|
||||
} else {
|
||||
auto tensor = node->cast<std::shared_ptr<NoneTypeNode>>();
|
||||
return py::cast(tensor);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
@ -225,6 +235,28 @@ void SequenceNode::SetValue(const ValuePtr &val) {
|
|||
StubNode::SetValue(val);
|
||||
}
|
||||
|
||||
bool AnyTypeNode::SetAbstract(const AbstractBasePtr &abs) {
|
||||
real_node_ = MakeStubNode(abs->BuildType());
|
||||
auto flag = real_node_->SetAbstract(abs);
|
||||
(void)StubNode::SetAbstract(abs);
|
||||
return flag;
|
||||
}
|
||||
|
||||
void AnyTypeNode::SetValue(const ValuePtr &val) {
|
||||
real_node_->SetValue(val);
|
||||
StubNode::SetValue(val);
|
||||
}
|
||||
|
||||
py::object AnyTypeNode::GetRealNode() {
|
||||
WaitAbstract();
|
||||
return py::cast(real_node_);
|
||||
}
|
||||
|
||||
py::object NoneTypeNode::GetRealValue() {
|
||||
auto val = WaitValue();
|
||||
return ValueToPyData(val);
|
||||
}
|
||||
|
||||
std::pair<py::object, StubNodePtr> MakeTopNode(const TypePtr &type) {
|
||||
auto top = MakeStubNode(type);
|
||||
auto ret = MakeOutput(top);
|
||||
|
@ -239,6 +271,10 @@ void RegStubNodes(const py::module *m) {
|
|||
.def("get_dtype", &TensorNode::GetDtype, "get output dtype of async stub.");
|
||||
(void)py::class_<SequenceNode, StubNode, std::shared_ptr<SequenceNode>>(*m, "SequenceNode")
|
||||
.def("get_elements", &SequenceNode::GetElements, "get the elements of async stub_seq.");
|
||||
(void)py::class_<AnyTypeNode, StubNode, std::shared_ptr<AnyTypeNode>>(*m, "AnyTypeNode")
|
||||
.def("get_real_node", &AnyTypeNode::GetRealNode, "get the real StubNode");
|
||||
(void)py::class_<NoneTypeNode, StubNode, std::shared_ptr<NoneTypeNode>>(*m, "NoneTypeNode")
|
||||
.def("get_real_value", &NoneTypeNode::GetRealValue, "get the real value");
|
||||
}
|
||||
} // namespace stub
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -18,7 +18,7 @@ import inspect
|
|||
from functools import reduce
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.common.dtype import type_size_in_bytes
|
||||
from mindspore._c_expression import TensorNode, SequenceNode
|
||||
from mindspore._c_expression import TensorNode, SequenceNode, NoneTypeNode, AnyTypeNode
|
||||
from mindspore.common.api import _convert_python_data
|
||||
|
||||
|
||||
|
@ -163,6 +163,7 @@ _init_stub_tensor_api()
|
|||
|
||||
|
||||
def _convert_stub(stub):
|
||||
"convert stub to StubNode or Value"
|
||||
if isinstance(stub, TensorNode):
|
||||
return StubTensor(stub)
|
||||
if isinstance(stub, tuple):
|
||||
|
@ -170,4 +171,10 @@ def _convert_stub(stub):
|
|||
if isinstance(stub, SequenceNode):
|
||||
elements = stub.get_elements()
|
||||
return tuple(_convert_stub(e) for e in elements)
|
||||
if isinstance(stub, NoneTypeNode):
|
||||
val = stub.get_real_value()
|
||||
return _convert_python_data(val)
|
||||
if isinstance(stub, AnyTypeNode):
|
||||
val = stub.get_real_node()
|
||||
return _convert_stub(val)
|
||||
return _convert_python_data(stub)
|
||||
|
|
|
@ -23,6 +23,8 @@ import mindspore.nn as nn
|
|||
import mindspore.ops as P
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops.operations import nn_ops as G
|
||||
from mindspore.common.api import _pynative_executor
|
||||
|
||||
|
||||
|
||||
DEVICE_TARGET = "CPU"
|
||||
|
@ -345,6 +347,7 @@ def test_ps_roi_pooling_input_args_num():
|
|||
ms.Tensor(rois, dtype=ms.float32),
|
||||
ms.Tensor(features, dtype=ms.float32)
|
||||
)
|
||||
_pynative_executor.sync()
|
||||
except TypeError:
|
||||
return
|
||||
else:
|
||||
|
|
|
@ -22,6 +22,8 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops.operations import nn_ops as G
|
||||
from mindspore.common.api import _pynative_executor
|
||||
|
||||
|
||||
DEVICE_TARGET = "GPU"
|
||||
CTX_MODE = ms.context.GRAPH_MODE
|
||||
|
@ -387,6 +389,7 @@ def test_ps_roi_pooling_input_args_num():
|
|||
ms.Tensor(rois, dtype=ms.float32),
|
||||
ms.Tensor(features, dtype=ms.float32)
|
||||
)
|
||||
_pynative_executor.sync()
|
||||
except TypeError:
|
||||
return
|
||||
else:
|
||||
|
|
Loading…
Reference in New Issue