From 56e8d4ef9dd942fba934c677466ab5d22d986aaf Mon Sep 17 00:00:00 2001 From: bichaoyang Date: Mon, 8 Aug 2022 14:42:38 +0800 Subject: [PATCH] merge --- .../ccsrc/frontend/parallel/ops_info/bias_add_info.cc | 2 +- .../device/cpu/kernel/no_repeat_ngram_cpu_kernel.cc | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc index 05f87f75ae4..b7c862603e4 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc @@ -64,7 +64,7 @@ Status BiasAddInfo::InferTensorMap() { for (size_t i = 0; i < sub_a_strategy_size; ++i) { sub_a_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size) - i)); } - sub_b_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size) - static_cast(1))); + sub_b_tensor_map.push_back(static_cast(LAST_INDEX(sub_a_strategy_size)) - static_cast(1)); inputs_tensor_map_.push_back(sub_a_tensor_map); inputs_tensor_map_.push_back(sub_b_tensor_map); diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/no_repeat_ngram_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/no_repeat_ngram_cpu_kernel.cc index 29bb049e8a4..531af4791dd 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/no_repeat_ngram_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/no_repeat_ngram_cpu_kernel.cc @@ -93,7 +93,7 @@ void NoRepeatNGramCpuKernelMod::CheckAndInitParams() { template bool NoRepeatNGramCpuKernelMod::LaunchKernel(const std::vector &inputs, - const std::vector &workspace, + const std::vector &, const std::vector &outputs) { CHECK_KERNEL_INPUTS_NUM(inputs.size(), kNoRepeatNGramInputsNum, kernel_name_); CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kNoRepeatNGramOutputsNum, kernel_name_); @@ -113,14 +113,15 @@ bool NoRepeatNGramCpuKernelMod::LaunchKernel(const std::vector(state_seq[src_index_k]); + array_dim[LongToSize(k)] = static_cast(state_seq[LongToSize(src_index_k)]); if (k > (state_dim_ - ngram_size_)) { - array_ngram[k + ngram_size_ - state_dim_ - 1] = static_cast(state_seq[src_index_k]); + array_ngram[LongToSize(k + ngram_size_ - state_dim_ - 1)] = + static_cast(state_seq[LongToSize(src_index_k)]); } } for (int64_t j = 0; j < state_dim_ - ngram_size_ + 1; j++) { if (equal(array_ngram.begin(), array_ngram.end(), array_dim.begin() + j)) { - int64_t output_index_j = static_cast(array_dim[j + ngram_size_ - 1]); + int64_t output_index_j = static_cast(array_dim[LongToSize(j + ngram_size_ - 1)]); output[output_index_i + output_index_j] = -(std::numeric_limits::max)(); } } @@ -137,7 +138,7 @@ std::vector> &NoRepeatNGramCpuKernelMod::LaunchKernel}}; std::vector NoRepeatNGramCpuKernelMod::GetOpSupport() { - static std::vector support_list; + std::vector support_list; (void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list), [](const std::pair &pair) { return pair.first; }); return support_list;