diff --git a/mindspore/ccsrc/backend/kernel_compiler/host/host_kernel_metadata.cc b/mindspore/ccsrc/backend/kernel_compiler/host/host_kernel_metadata.cc index 2417b61f60b..c8c575cea47 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/host/host_kernel_metadata.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/host/host_kernel_metadata.cc @@ -20,16 +20,24 @@ #include "backend/kernel_compiler/oplib/oplib.h" #include "backend/kernel_compiler/common_utils.h" #include "backend/session/anf_runtime_algorithm.h" +#include "base/core_ops.h" namespace mindspore { namespace kernel { -constexpr auto kDynamicShape = "DynamicShape"; +static const std::set host_kernel = {prim::kPrimDynamicShape->name(), + prim::kPrimDynamicBroadcastGradientArgs->name()}; void HostMetadataInfo(const CNodePtr &kernel_node, std::vector> *kernel_info_list) { MS_LOG(INFO) << "HostMetadataInfo."; MS_EXCEPTION_IF_NULL(kernel_node); MS_EXCEPTION_IF_NULL(kernel_info_list); + std::string op_name = AnfAlgo::GetCNodeName(kernel_node); + if (host_kernel.find(op_name) == host_kernel.end()) { + MS_LOG(DEBUG) << "Host dose not have op [" << op_name << "]"; + return; + } + std::vector inputs_format{}; std::vector inputs_type{}; size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index 84a72a83d2d..fad1f174d34 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -61,6 +61,7 @@ constexpr auto kTile = "Tile"; constexpr auto kBiasAddGrad = "BiasAddGrad"; // Arrays +constexpr auto kDynamicShape = "DynamicShape"; constexpr auto kStack = "Stack"; constexpr auto kUnstack = "Unstack"; constexpr auto kTupleGetItem = "TupleGetItem"; @@ -167,7 +168,7 @@ inline const PrimitivePtr kPrimSparseGatherV2 = std::make_shared("Spa inline const PrimitivePtr kPrimSparseToDense = std::make_shared("SparseToDense"); inline const PrimitivePtr kPrimShape = std::make_shared("Shape"); inline const PrimitivePtr kPrimStridedSlice = std::make_shared(kStridedSlice); -inline const PrimitivePtr kPrimDynamicShape = std::make_shared("DynamicShape"); +inline const PrimitivePtr kPrimDynamicShape = std::make_shared(kDynamicShape); inline const PrimitivePtr kPrimEmbeddingLookup = std::make_shared("EmbeddingLookup"); inline const PrimitivePtr kPrimEmbeddingLookupCommGrad = std::make_shared("EmbeddingLookupCommGrad"); inline const PrimitivePtr kPrimSize = std::make_shared("Size"); diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index 404bc0c6e74..2b94a5b31d0 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -266,7 +266,7 @@ class Conv2d(_Conv): def extend_repr(self): s = 'input_channels={}, output_channels={}, kernel_size={},' \ 'stride={}, pad_mode={}, padding={}, dilation={}, ' \ - 'group={}, has_bias={}' \ + 'group={}, has_bias={}, ' \ 'weight_init={}, bias_init={}, format={}'.format( self.in_channels, self.out_channels, diff --git a/mindspore/train/callback/_lr_scheduler_callback.py b/mindspore/train/callback/_lr_scheduler_callback.py index db63b10cffc..df490a08acc 100644 --- a/mindspore/train/callback/_lr_scheduler_callback.py +++ b/mindspore/train/callback/_lr_scheduler_callback.py @@ -35,7 +35,7 @@ class LearningRateScheduler(Callback): learning_rate_function (Function): The function about how to change the learning rate during training. Examples: - >>> from _lr_scheduler_callback import LearningRateScheduler + >>> from mindspore.train.callback import LearningRateScheduler >>> import mindspore.nn as nn >>> from mindspore.train import Model ...