From f1856105e5d4d87198fc3fc5dcf8ca499564f511 Mon Sep 17 00:00:00 2001 From: wilfChen Date: Mon, 10 Jan 2022 18:01:33 +0800 Subject: [PATCH] code clean --- .../backend/kernel_compiler/gpu/math/matmul_gpu_kernel.h | 3 --- .../ccsrc/backend/kernel_compiler/gpu/trt/trt_kernel.cc | 6 ++++-- .../ccsrc/backend/optimizer/gpu/cudnn_inplace_fusion.cc | 1 + .../ccsrc/backend/optimizer/trt_pass/trt_op_converter.cc | 8 ++++---- mindspore/ccsrc/backend/session/gpu_inference_session.cc | 1 + mindspore/ccsrc/runtime/device/executor/dynamic_kernel.cc | 2 -- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/matmul_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/matmul_gpu_kernel.h index 1098138b314..820fa80c960 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/matmul_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/matmul_gpu_kernel.h @@ -40,9 +40,6 @@ class MatMulGpuKernel : public GpuKernel { bool Launch(const std::vector &inputs, const std::vector &workspace, const std::vector &outputs, void *stream_ptr) override { - if (is_null_input_) { - return true; - } CHECK_CUBLAS_RET_WITH_ERROR(cublasSetStream(handle_, reinterpret_cast(stream_ptr)), "cublasSetStream failed"); VARIABLE_NOT_USED(workspace); diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/trt/trt_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/trt/trt_kernel.cc index 59902cc4bbb..7745bd83a96 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/trt/trt_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/trt/trt_kernel.cc @@ -15,6 +15,8 @@ */ #include "backend/kernel_compiler/gpu/trt/trt_kernel.h" +#include +#include #include "backend/kernel_compiler/gpu/data/dataset_utils.h" #include "backend/kernel_compiler/gpu/trt/trt_utils.h" #include "backend/kernel_compiler/common_utils.h" @@ -78,9 +80,9 @@ bool TrtKernel::Launch(const std::vector &inputs, const std::vector< MS_EXCEPTION_IF_NULL(context_); std::vector device_buffer; std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(device_buffer), - [](const AddressPtr &input) -> void * { return input->addr; }); + [](const AddressPtr &input) { return input->addr; }); std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(device_buffer), - [](const AddressPtr &output) -> void * { return output->addr; }); + [](const AddressPtr &output) { return output->addr; }); return context_->enqueueV2(device_buffer.data(), reinterpret_cast(stream), nullptr); } } // namespace kernel diff --git a/mindspore/ccsrc/backend/optimizer/gpu/cudnn_inplace_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/cudnn_inplace_fusion.cc index ef6bbfb72ac..b91f41c51aa 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/cudnn_inplace_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/cudnn_inplace_fusion.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_converter.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_converter.cc index a3189949975..040d25920f4 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_converter.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_converter.cc @@ -360,7 +360,7 @@ MS_TRT_CONVERTER_FUNC_REG(ReLU6) { dim.nbDims = SizeToInt(x_shape.size()); std::fill(dim.d, dim.d + dim.nbDims, 1); - auto AddConst = [&context, &dim](const float &coeff) -> nvinfer1::ITensor * { + auto AddConst = [&context, &dim](const float &coeff) { std::shared_ptr weight = context->CreateTempWeight(kNumberTypeFloat32, {1}); auto value = static_cast(weight->data_c()); value[0] = coeff; @@ -395,7 +395,7 @@ MS_TRT_CONVERTER_FUNC_REG(GeLU) { dim.nbDims = SizeToInt(x_shape.size()); std::fill(dim.d, dim.d + dim.nbDims, 1); - auto AddConst = [&context, &dim](const float &coeff) -> nvinfer1::ITensor * { + auto AddConst = [&context, &dim](const float &coeff) { std::shared_ptr weight = context->CreateTempWeight(kNumberTypeFloat32, {1}); auto value = static_cast(weight->data_c()); value[0] = coeff; @@ -446,7 +446,7 @@ MS_TRT_CONVERTER_FUNC_REG(HSigmoid) { dim.nbDims = SizeToInt(x_shape.size()); std::fill(dim.d, dim.d + dim.nbDims, 1); - auto AddConst = [&context, &dim](const float &coeff) -> nvinfer1::ITensor * { + auto AddConst = [&context, &dim](const float &coeff) { std::shared_ptr weight = context->CreateTempWeight(kNumberTypeFloat32, {1}); auto value = static_cast(weight->data_c()); value[0] = coeff; @@ -487,7 +487,7 @@ MS_TRT_CONVERTER_FUNC_REG(HSwish) { dim.nbDims = SizeToInt(x_shape.size()); std::fill(dim.d, dim.d + dim.nbDims, 1); - auto AddConst = [&context, &dim](const float &coeff) -> nvinfer1::ITensor * { + auto AddConst = [&context, &dim](const float &coeff) { std::shared_ptr weight = context->CreateTempWeight(kNumberTypeFloat32, {1}); auto value = static_cast(weight->data_c()); value[0] = coeff; diff --git a/mindspore/ccsrc/backend/session/gpu_inference_session.cc b/mindspore/ccsrc/backend/session/gpu_inference_session.cc index 0bfc2a7fb44..331dbdca8a7 100644 --- a/mindspore/ccsrc/backend/session/gpu_inference_session.cc +++ b/mindspore/ccsrc/backend/session/gpu_inference_session.cc @@ -15,6 +15,7 @@ */ #include "backend/session/gpu_inference_session.h" +#include #include "ir/tensor.h" #include "ir/anf.h" #include "ir/param_info.h" diff --git a/mindspore/ccsrc/runtime/device/executor/dynamic_kernel.cc b/mindspore/ccsrc/runtime/device/executor/dynamic_kernel.cc index b8c883bae4b..06686fa7d0b 100644 --- a/mindspore/ccsrc/runtime/device/executor/dynamic_kernel.cc +++ b/mindspore/ccsrc/runtime/device/executor/dynamic_kernel.cc @@ -119,7 +119,6 @@ void DynamicKernel::InferShapeForNopNode(AnfNodePtr *input_node) { std::stack nop_road; nop_road.push(*input_node); - /*lint -e716*/ while (true) { auto input_node_with_idx = AnfAlgo::GetPrevNodeOutput(*input_node, 0); auto in_node = input_node_with_idx.first; @@ -131,7 +130,6 @@ void DynamicKernel::InferShapeForNopNode(AnfNodePtr *input_node) { break; } } - /*lint +e716*/ while (!nop_road.empty()) { auto nop_node = nop_road.top();