forked from mindspore-Ecosystem/mindspore
parent
99317e9308
commit
c7fb92a16c
|
@ -79,7 +79,7 @@ bool SpaceToBatchNDCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressP
|
|||
|
||||
const auto *input = reinterpret_cast<T *>(inputs[0]->addr);
|
||||
auto *output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
int ret = memset_s(output, outputs[0]->size, 0, sizeof(T) * output_size_);
|
||||
int ret = memset_s(output, outputs[0]->size, 0, sizeof(T) * static_cast<size_t>(output_size_));
|
||||
if (ret != 0) {
|
||||
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', memset_s error. Error no: " << ret;
|
||||
}
|
||||
|
@ -88,8 +88,8 @@ bool SpaceToBatchNDCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressP
|
|||
std::vector<int64_t> input_index(input_shape_.size(), 0);
|
||||
int64_t cur_pos = pos;
|
||||
for (int rev_i = input_shape_.size() - 1; rev_i >= 0; rev_i -= 1) {
|
||||
input_index[rev_i] = cur_pos % input_shape_[rev_i];
|
||||
cur_pos = cur_pos / input_shape_[rev_i];
|
||||
input_index[rev_i] = cur_pos % input_shape_[IntToSize(rev_i)];
|
||||
cur_pos = cur_pos / input_shape_[IntToSize(rev_i)];
|
||||
}
|
||||
|
||||
std::vector<int64_t> output_index(input_index);
|
||||
|
@ -142,9 +142,9 @@ bool SpaceToBatchNDCpuKernelMod::Init(const BaseOperatorPtr &base_operator, cons
|
|||
int SpaceToBatchNDCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs,
|
||||
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
|
||||
if (KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost) == KRET_RESIZE_FAILED) {
|
||||
if (KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost) == static_cast<int>(KRET_RESIZE_FAILED)) {
|
||||
MS_LOG(WARNING) << kernel_name_ << " reinit failed.";
|
||||
return KRET_RESIZE_FAILED;
|
||||
return static_cast<int>(KRET_RESIZE_FAILED);
|
||||
}
|
||||
// get input_shape
|
||||
input_shape_ = inputs.at(kIndex0)->GetShapeVector();
|
||||
|
@ -153,15 +153,15 @@ int SpaceToBatchNDCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, con
|
|||
input_size_ = 1;
|
||||
output_size_ = 1;
|
||||
for (size_t i = 0; i < input_shape_.size(); ++i) {
|
||||
input_size_ = input_shape_[i] * input_size_;
|
||||
input_size_ *= input_shape_[i];
|
||||
}
|
||||
for (size_t i = 0; i < output_shape_.size(); ++i) {
|
||||
output_size_ = output_shape_[i] * output_size_;
|
||||
output_size_ *= output_shape_[i];
|
||||
}
|
||||
|
||||
off_set_ = input_shape_.size() - block_size_.size();
|
||||
|
||||
return KRET_OK;
|
||||
return static_cast<int>(KRET_OK);
|
||||
}
|
||||
|
||||
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &SpaceToBatchNDCpuKernelMod::GetFuncList() const {
|
||||
|
|
|
@ -91,7 +91,7 @@ abstract::ShapePtr SpaceToBatchNDInferShape(const PrimitivePtr &primitive,
|
|||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto input_shape_ptr = CheckAndConvertUtils::GetTensorInputShape(prim_name, input_args, kInputIndex0);
|
||||
auto input_shape_ptr = CheckAndConvertUtils::GetTensorInputShape(prim_name, input_args, 0);
|
||||
|
||||
auto paddings_value_ptr = primitive->GetAttr(kPaddings);
|
||||
MS_EXCEPTION_IF_NULL(paddings_value_ptr);
|
||||
|
@ -115,7 +115,7 @@ TypePtr SpaceToBatchNDInferType(const PrimitivePtr &prim, const std::vector<Abst
|
|||
}
|
||||
const std::set<TypePtr> valid_types = {kInt8, kInt16, kInt32, kInt64, kUInt8, kUInt16,
|
||||
kUInt32, kUInt64, kFloat16, kFloat32, kFloat64};
|
||||
auto var_type = input_args[kInputIndex0]->BuildType();
|
||||
auto var_type = input_args[0]->BuildType();
|
||||
|
||||
return CheckAndConvertUtils::CheckTensorTypeValid("input type", var_type, valid_types, prim->name());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue