diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.cc index 5d84923d873..123c241dd26 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.cc @@ -14,7 +14,6 @@ * limitations under the License. */ #include "plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.h" -#include #include #include #include diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/dynamic_stitch_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/dynamic_stitch_cpu_kernel.cc index 52fd85334d1..ca292716948 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/dynamic_stitch_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/dynamic_stitch_cpu_kernel.cc @@ -81,7 +81,8 @@ void DynamicStitchCpuKernelMod::UpdateOutputShapeAndSize(const std::vectorSetShapeVector(result_shape_); auto data_dtype = inputs[kIndex1]->dtype_id(); auto data_dtype_size = GetTypeByte(TypeIdToType(data_dtype)); - size_t batch_size = std::accumulate(result_shape_.cbegin(), result_shape_.cend(), 1, std::multiplies()); + size_t batch_size = + LongToSize(std::accumulate(result_shape_.cbegin(), result_shape_.cend(), 1, std::multiplies())); outputs[kIndex0]->set_size(batch_size * data_dtype_size); } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_create.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_create.cc index bc89d471b3a..ae278d85c9f 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_create.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_create.cc @@ -44,6 +44,7 @@ bool EnvironCreateCpuKernelMod::Launch(const std::vector &, cons int64_t env_handle = EnvironMgr::GetInstance().Create(); auto output = GetDeviceAddress(outputs, kIndex0); + MS_EXCEPTION_IF_NULL(output); output[kIndex0] = env_handle; MS_LOG(DEBUG) << "Create env handle: " << output[kIndex0]; diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_set.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_set.cc index f6abcbf0260..89d6581f9f8 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_set.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/environ/environ_cpu_set.cc @@ -60,7 +60,9 @@ bool EnvironSetCpuKernelMod::Launch(const std::vector &inputs, c auto input_key = GetDeviceAddress(inputs, kIndex1); auto input_value = GetDeviceAddress(inputs, kIndex2); auto output_handle = GetDeviceAddress(outputs, kIndex0); - + MS_EXCEPTION_IF_NULL(input_handle); + MS_EXCEPTION_IF_NULL(input_key); + MS_EXCEPTION_IF_NULL(output_handle); // Get host handle and host key. int64_t host_handle = input_handle[kIndex0]; int64_t host_key = input_key[kIndex0]; diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/pad_and_shift_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/pad_and_shift_cpu_kernel.cc index d74483b7e88..036523d7072 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/pad_and_shift_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/pad_and_shift_cpu_kernel.cc @@ -80,7 +80,7 @@ void PadAndShiftCpuKernelMod::LaunchKernel(const std::vector &in void PadAndShiftCpuKernelMod::UpdateOutputShapeAndSize(const std::vector &inputs, const std::vector &outputs) { ShapeVector output_shape(input_shape_.begin(), input_shape_.end()); - output_shape[kIndex0] = output_size_; + output_shape[kIndex0] = static_cast(output_size_); outputs[kIndex0]->SetShapeVector(output_shape); outputs[kIndex0]->set_size(output_size_ * type_size_); } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/pyfunc/py_func_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/pyfunc/py_func_cpu_kernel.cc index 8995fab518a..3f0e320ecc8 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/pyfunc/py_func_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/pyfunc/py_func_cpu_kernel.cc @@ -257,6 +257,21 @@ bool PyFuncCpuKernelMod::Launch(const std::vector &inputs, const return ExecuteKernel(inputs, outputs); } +void GetTypeInfo(const PrimitivePtr &primitive, const std::vector &inputs, const std::string &arg_name, + std::vector *types) { + if (primitive->HasAttr(arg_name)) { + const auto &type_ptrs = GetValue>(primitive->GetAttr(arg_name)); + (void)std::for_each(type_ptrs.begin(), type_ptrs.end(), [&types](auto p) { + MS_EXCEPTION_IF_NULL(p); + (void)types->emplace_back(p->type_id()); + }); + } else { + for (size_t i = 0; i < inputs.size(); ++i) { + types->emplace_back(inputs[i]->dtype_id()); + } + } +} + void PyFuncCpuKernelMod::BuildFuncInfo(const PrimitivePtr &primitive, const std::vector &inputs, const std::vector &outputs) { std::vector in_types; @@ -264,29 +279,8 @@ void PyFuncCpuKernelMod::BuildFuncInfo(const PrimitivePtr &primitive, const std: std::vector> in_shapes; std::vector> out_shapes; - if (primitive->HasAttr("in_types")) { - const auto &in_type_ptrs = GetValue>(primitive->GetAttr("in_types")); - (void)std::for_each(in_type_ptrs.begin(), in_type_ptrs.end(), [&in_types](auto p) { - MS_EXCEPTION_IF_NULL(p); - (void)in_types.emplace_back(p->type_id()); - }); - } else { - for (size_t i = 0; i < inputs.size(); ++i) { - in_types.emplace_back(inputs[i]->dtype_id()); - } - } - - if (primitive->HasAttr("out_types")) { - const auto &out_type_ptrs = GetValue>(primitive->GetAttr("out_types")); - (void)std::for_each(out_type_ptrs.begin(), out_type_ptrs.end(), [&out_types](auto p) { - MS_EXCEPTION_IF_NULL(p); - (void)out_types.emplace_back(p->type_id()); - }); - } else { - for (size_t i = 0; i < outputs.size(); ++i) { - out_types.emplace_back(outputs[i]->dtype_id()); - } - } + GetTypeInfo(primitive, inputs, "in_types", &in_types); + GetTypeInfo(primitive, inputs, "out_types", &out_types); if (primitive->HasAttr("in_shapes")) { in_shapes = GetValue>>(primitive->GetAttr("in_shapes")); diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/sparsefillemptyrows_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/sparsefillemptyrows_cpu_kernel.cc index 0fad3f890b5..7386e513b25 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/sparsefillemptyrows_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/sparsefillemptyrows_cpu_kernel.cc @@ -211,16 +211,16 @@ void SparseFillEmptyRowsCpuKernelMod::UpdateOutputShapeAndSize(const std::vector outputs[kIndex2]->SetShapeVector(out_empty_row_indicator_shape_); outputs[kIndex3]->SetShapeVector(out_reverse_index_shape_); size_t out_indice_batch = - std::accumulate(out_indice_shape.cbegin(), out_indice_shape.cend(), 1, std::multiplies()); + LongToSize(std::accumulate(out_indice_shape.cbegin(), out_indice_shape.cend(), 1, std::multiplies())); auto out_indice_dtype_size = GetTypeByte(TypeIdToType(output_indices_type_)); size_t out_values_batch = - std::accumulate(out_values_shape.cbegin(), out_values_shape.cend(), 1, std::multiplies()); + LongToSize(std::accumulate(out_values_shape.cbegin(), out_values_shape.cend(), 1, std::multiplies())); auto out_values_dtype_size = GetTypeByte(TypeIdToType(output_values_type_)); - size_t out_empty_row_indicator_batch = std::accumulate( - out_empty_row_indicator_shape_.cbegin(), out_empty_row_indicator_shape_.cend(), 1, std::multiplies()); + size_t out_empty_row_indicator_batch = LongToSize(std::accumulate( + out_empty_row_indicator_shape_.cbegin(), out_empty_row_indicator_shape_.cend(), 1, std::multiplies())); auto out_empty_row_indicator_dtype_size = GetTypeByte(TypeIdToType(output_empty_row_indicator_type_)); - size_t out_reverse_index_batch = - std::accumulate(out_reverse_index_shape_.cbegin(), out_reverse_index_shape_.cend(), 1, std::multiplies()); + size_t out_reverse_index_batch = LongToSize( + std::accumulate(out_reverse_index_shape_.cbegin(), out_reverse_index_shape_.cend(), 1, std::multiplies())); auto out_reverse_index_dtype_size = GetTypeByte(TypeIdToType(output_reverse_index_type_)); outputs[kIndex0]->set_size(out_indice_batch * out_indice_dtype_size); outputs[kIndex1]->set_size(out_values_batch * out_values_dtype_size); diff --git a/mindspore/ccsrc/runtime/device/kernel_runtime.cc b/mindspore/ccsrc/runtime/device/kernel_runtime.cc index 1ce19be99b1..0256afa9cc8 100644 --- a/mindspore/ccsrc/runtime/device/kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/kernel_runtime.cc @@ -1364,6 +1364,7 @@ void KernelRuntime::GenKernelTensorLaunchArgs(const CNodePtr &cnode, std::vector #endif for (auto index : clean_output_indexes) { auto device_address = AnfAlgo::GetOutputAddr(pre_node, index); + MS_EXCEPTION_IF_NULL(device_address); const auto &input = device_address->kernel_tensor(); MS_EXCEPTION_IF_NULL(input); if (mem_scheduler != nullptr) { diff --git a/mindspore/python/mindspore/ops/operations/array_ops.py b/mindspore/python/mindspore/ops/operations/array_ops.py index 1089a389437..dee29300e1d 100755 --- a/mindspore/python/mindspore/ops/operations/array_ops.py +++ b/mindspore/python/mindspore/ops/operations/array_ops.py @@ -14,7 +14,6 @@ # ============================================================================ """Operators for array.""" -# pylint: disable=unused-import import copy import itertools import numbers