!15787 fix codedex.

From: @liu_xiao_93
Reviewed-by: @zhoufeng54,@jjfeing
Signed-off-by: @jjfeing
This commit is contained in:
mindspore-ci-bot 2021-04-28 17:53:59 +08:00 committed by Gitee
commit d68971a799
3 changed files with 9 additions and 9 deletions

View File

@ -198,7 +198,7 @@ bool TbeKernelJsonCreator::GenTbeSingleKernelJson(const std::shared_ptr<mindspor
}
void GenNoneInputDescJson(const std::shared_ptr<OpIOInfo> &input_ptr, size_t input_i,
std::vector<nlohmann::json> *input_list) {
std::vector<nlohmann::json> *const input_list) {
nlohmann::json input_desc_json;
auto in_name = input_ptr->name();
input_desc_json[kJName] = in_name + std::to_string(input_i);
@ -209,7 +209,7 @@ void GenNoneInputDescJson(const std::shared_ptr<OpIOInfo> &input_ptr, size_t inp
void TbeKernelJsonCreator::GenValidInputDescJson(const std::shared_ptr<AnfNode> &anf_node, size_t real_input_index,
bool value, const std::shared_ptr<OpIOInfo> &input_ptr,
const string &op_input_name, size_t input_i,
std::vector<nlohmann::json> *input_list) {
std::vector<nlohmann::json> *const input_list) {
auto def_format = kOpFormat_NCHW;
auto dtype = GetDeviceInputType(anf_node, real_input_index);
auto format = GetDeviceInputFormat(anf_node, real_input_index);

View File

@ -30,7 +30,7 @@ namespace {
using KernelWithIndex = std::pair<AnfNodePtr, size_t>;
const std::set<std::string> InvalidOps = {kSplitOpName, kSplitVOpName, kConcatOpName};
void GetSplitOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &node, std::vector<AnfNodePtr> *out_nodes) {
void GetSplitOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &node, std::vector<AnfNodePtr> *const out_nodes) {
MS_EXCEPTION_IF_NULL(func_graph);
auto manager = func_graph->manager();
MS_EXCEPTION_IF_NULL(manager);

View File

@ -36,12 +36,12 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) {
MS_LOG(ERROR) << "MaxPool3DGradGrad only support NCDHW.";
}
MS_LOG(DEBUG) << "ksize of MaxPool3DGradGrad:" << ksize;
int64_t D = ksize[2];
int64_t H = ksize[3];
int64_t W = ksize[4];
int64_t d = ksize[2];
int64_t h = ksize[3];
int64_t w = ksize[4];
// 1 create tensor
std::vector<int64_t> assist_shape = {1, 1, D, H, W}; // shape:NCDHW
std::vector<int64_t> assist_shape = {1, 1, d, h, w}; // shape:NCDHW
TensorTypePtr tensor_type = std::make_shared<TensorType>(kFloat16);
MS_EXCEPTION_IF_NULL(tensor_type);
tensor::DeviceInfo device_info{kOpFormat_NDC1HWC0, tensor_type};
@ -52,14 +52,14 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) {
auto data_ptr = assist_tensor->data_c();
MS_EXCEPTION_IF_NULL(data_ptr);
std::vector<float16> half_data;
int64_t dims = 1 * 1 * D * H * W;
int64_t dims = 1 * 1 * d * h * w;
int64_t counter = dims;
for (int64_t i = 0; i < dims; i++) {
half_data.emplace_back(float16(static_cast<float>(counter)));
counter--;
}
auto elem_num = dims * kFloat16Len;
int64_t elem_num = dims * kFloat16Len;
auto ret_code = memcpy_s(data_ptr, static_cast<size_t>(assist_tensor->data().nbytes()), half_data.data(), elem_num);
if (ret_code != 0) {
MS_LOG(ERROR) << "Failed to copy data into Tensor.";