forked from mindspore-Ecosystem/mindspore
!42946 fixing static code warnings to r1.9
Merge pull request !42946 from 黎冠新/r1.9
This commit is contained in:
commit
65b9eea27b
|
@ -38,8 +38,8 @@ constexpr size_t kWidth = 2;
|
|||
constexpr size_t kDepth = 3;
|
||||
constexpr size_t kBatch = 0;
|
||||
constexpr size_t kImages = 1;
|
||||
constexpr size_t kImageShapeLen = 4;
|
||||
constexpr size_t kBoxes = 2;
|
||||
constexpr size_t kImageShapeLen = 4;
|
||||
constexpr size_t kCoordY1 = 0;
|
||||
constexpr size_t kCoordX1 = 1;
|
||||
constexpr size_t kCoordY2 = 2;
|
||||
|
|
|
@ -45,8 +45,7 @@ bool L2LossCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::v
|
|||
int L2LossCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs,
|
||||
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
|
||||
int ret = 0;
|
||||
if ((ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost)) != 0) {
|
||||
if (auto ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost); ret != KRET_OK) {
|
||||
return ret;
|
||||
}
|
||||
input_shape_ = inputs[kIndex0]->GetShapeVector();
|
||||
|
@ -58,10 +57,8 @@ template <typename T>
|
|||
bool L2LossCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kL2LossInputsNum, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kL2LossOutputsNum, kernel_name_);
|
||||
auto input_addr = reinterpret_cast<T *>(inputs[kIndex0]->addr);
|
||||
auto result_addr = reinterpret_cast<T *>(outputs[kIndex0]->addr);
|
||||
T *input_addr = GetDeviceAddress<T>(inputs, kIndex0);
|
||||
T *result_addr = GetDeviceAddress<T>(outputs, kIndex0);
|
||||
*result_addr = static_cast<T>(0);
|
||||
if (tensor_size_ == 0) {
|
||||
MS_LOG(WARNING) << kernel_name_ << " input shape contain 0, input_shape: " << input_shape_;
|
||||
|
|
|
@ -172,14 +172,14 @@ bool CropAndResizeGradBoxesGpuKernelMod::Launch(const std::vector<AddressPtr> &i
|
|||
bool CropAndResizeGradBoxesGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
|
||||
const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::CropAndResizeGradBoxes>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto kernel_grad_ptr = std::dynamic_pointer_cast<ops::CropAndResizeGradBoxes>(base_operator);
|
||||
kernel_name_ = kernel_grad_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->method_ = kernel_ptr->get_method();
|
||||
attr_ptr_->method_ = kernel_grad_ptr->get_method();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
Resize(base_operator, inputs, outputs);
|
||||
|
|
|
@ -38,7 +38,7 @@ class ArgMaxHelperGpuKernel : public GpuKernelHelperBase {
|
|||
: GpuKernelHelperBase(kernel_name, device_id) {
|
||||
axis_ = 0;
|
||||
bound_ = 0;
|
||||
is_null_input_ = false;
|
||||
is_null_argmax_input_ = false;
|
||||
}
|
||||
|
||||
virtual ~ArgMaxHelperGpuKernel() = default;
|
||||
|
@ -57,13 +57,13 @@ class ArgMaxHelperGpuKernel : public GpuKernelHelperBase {
|
|||
if (out_flag == -1) {
|
||||
return out_flag;
|
||||
}
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_argmax_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_argmax_input_) {
|
||||
return 0;
|
||||
}
|
||||
size_t outer_size = 1;
|
||||
|
@ -123,7 +123,7 @@ class ArgMaxHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::shared_ptr<ArgMaxAttr> attr_ptr_;
|
||||
std::vector<int64_t> input_shape_;
|
||||
S bound_;
|
||||
bool is_null_input_;
|
||||
bool is_null_argmax_input_;
|
||||
};
|
||||
} // namespace cukernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -66,7 +66,7 @@ class FractionalPoolHelperGpuKernel : public GpuKernelHelperBase {
|
|||
public:
|
||||
explicit FractionalPoolHelperGpuKernel(const std::string &kernel_name, const uint32_t &device_id)
|
||||
: GpuKernelHelperBase(kernel_name, device_id) {
|
||||
is_null_input_ = false;
|
||||
is_null_fractional_input_ = false;
|
||||
}
|
||||
|
||||
virtual ~FractionalPoolHelperGpuKernel() = default;
|
||||
|
@ -100,7 +100,7 @@ class FractionalPoolHelperGpuKernel : public GpuKernelHelperBase {
|
|||
output_size_list_.emplace_back(cur_size * row_pooling_shape_[0]);
|
||||
output_size_list_.emplace_back(cur_size * col_pooling_shape_[0]);
|
||||
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_fractional_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ class FractionalPoolHelperGpuKernel : public GpuKernelHelperBase {
|
|||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_fractional_input_) {
|
||||
return 0;
|
||||
}
|
||||
T *input_ptr = nullptr;
|
||||
|
@ -291,7 +291,7 @@ class FractionalPoolHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::vector<int64_t> output_shape_;
|
||||
std::vector<int64_t> row_pooling_shape_;
|
||||
std::vector<int64_t> col_pooling_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_fractional_input_;
|
||||
};
|
||||
|
||||
class FractionalPoolGradAttr : public GpuKernelAttrBase {
|
||||
|
@ -306,7 +306,7 @@ class FractionalPoolGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
public:
|
||||
explicit FractionalPoolGradHelperGpuKernel(const std::string &kernel_name, const uint32_t &device_id)
|
||||
: GpuKernelHelperBase(kernel_name, device_id) {
|
||||
is_null_input_ = false;
|
||||
is_null_fractional_grad_input_ = false;
|
||||
is_max_pooling_grad_ = (kernel_name_.find("FractionalMaxPoolGrad") != std::string::npos);
|
||||
}
|
||||
|
||||
|
@ -375,13 +375,13 @@ class FractionalPoolGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
return out_flag;
|
||||
}
|
||||
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_fractional_grad_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_fractional_grad_input_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ class FractionalPoolGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::vector<int64_t> row_pooling_shape_;
|
||||
std::vector<int64_t> col_pooling_shape_;
|
||||
std::vector<int64_t> output_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_fractional_grad_input_;
|
||||
bool is_max_pooling_grad_;
|
||||
};
|
||||
} // namespace cukernel
|
||||
|
|
|
@ -224,7 +224,7 @@ class MaxUnpool3DGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
for (int64_t i = dims - 1; i >= 0; i--) {
|
||||
outer_size *= backprop_output_shape_[i];
|
||||
}
|
||||
CalMaxUnpool3DGrad(grad, indices, backprop_input_shape_, grad_shape_, output_ptr, outer_size, data_format_,
|
||||
CalMaxUnpool3DGrad(grad, indices, backprop_input_shape_, grad_shape_, output_ptr, outer_size, grad_data_format_,
|
||||
device_id_, reinterpret_cast<cudaStream_t>(cuda_stream));
|
||||
return 0;
|
||||
}
|
||||
|
@ -235,13 +235,13 @@ class MaxUnpool3DGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
|
||||
protected:
|
||||
int CheckKernelParam() override {
|
||||
data_format_ = attr_ptr_->data_format;
|
||||
if (data_format_ != "NCDHW" && data_format_ != "NDHWC") {
|
||||
grad_data_format_ = attr_ptr_->data_format;
|
||||
if (grad_data_format_ != "NCDHW" && grad_data_format_ != "NDHWC") {
|
||||
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the 'data_format' must be 'NCDHW' or 'NDHWC' ,"
|
||||
<< " but got " << data_format_;
|
||||
<< " but got " << grad_data_format_;
|
||||
return -1;
|
||||
}
|
||||
data_format_ = attr_ptr_->data_format;
|
||||
grad_data_format_ = attr_ptr_->data_format;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -251,7 +251,7 @@ class MaxUnpool3DGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::vector<int64_t> grad_shape_;
|
||||
std::vector<int64_t> indices_shape_;
|
||||
std::vector<int64_t> backprop_output_shape_;
|
||||
std::string data_format_;
|
||||
std::string grad_data_format_;
|
||||
bool is_null_input_;
|
||||
};
|
||||
} // namespace cukernel
|
||||
|
|
|
@ -46,7 +46,7 @@ class ResizeBicubicHelperGpuKernel : public GpuKernelHelperBase {
|
|||
inputheight_ = 0;
|
||||
outputwidth_ = 0;
|
||||
outputheight_ = 0;
|
||||
is_null_input_ = false;
|
||||
is_null_resizebicubic_input_ = false;
|
||||
h_scale_ = 0;
|
||||
w_scale_ = 0;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ class ResizeBicubicHelperGpuKernel : public GpuKernelHelperBase {
|
|||
constexpr int INPUT_C_ORDER = 3;
|
||||
ResetResource();
|
||||
align_corners_ = false;
|
||||
is_null_input_ = false;
|
||||
is_null_resizebicubic_input_ = false;
|
||||
batch_ = 0;
|
||||
channel_ = 0;
|
||||
inputheight_ = 0;
|
||||
|
@ -96,13 +96,13 @@ class ResizeBicubicHelperGpuKernel : public GpuKernelHelperBase {
|
|||
if (out_flag == -1) {
|
||||
return out_flag;
|
||||
}
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_resizebicubic_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_resizebicubic_input_) {
|
||||
return 0;
|
||||
}
|
||||
T *input_ptr = nullptr;
|
||||
|
@ -169,7 +169,7 @@ class ResizeBicubicHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::vector<int64_t> input_shape_;
|
||||
std::vector<int64_t> output_shapesize_;
|
||||
std::vector<int64_t> input_out_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_resizebicubic_input_;
|
||||
int batch_;
|
||||
int channel_;
|
||||
int inputwidth_;
|
||||
|
|
|
@ -46,7 +46,7 @@ class ResizeBicubicGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
input_grad_height_ = 0;
|
||||
origin_width_ = 0;
|
||||
origin_height_ = 0;
|
||||
is_null_input_ = false;
|
||||
is_null_resizebicubic_grad_input_ = false;
|
||||
h_scale_ = 0;
|
||||
w_scale_ = 0;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ class ResizeBicubicGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
constexpr int INPUT_C_ORDER = 3;
|
||||
ResetResource();
|
||||
align_corners_ = false;
|
||||
is_null_input_ = false;
|
||||
is_null_resizebicubic_grad_input_ = false;
|
||||
batch_ = 0;
|
||||
channel_ = 0;
|
||||
input_grad_height_ = 0;
|
||||
|
@ -96,13 +96,13 @@ class ResizeBicubicGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
if (out_flag == -1) {
|
||||
return out_flag;
|
||||
}
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_resizebicubic_grad_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_resizebicubic_grad_input_) {
|
||||
return 0;
|
||||
}
|
||||
T *input_grad_ptr = nullptr;
|
||||
|
@ -172,7 +172,7 @@ class ResizeBicubicGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
std::vector<int64_t> input_grad_shape_;
|
||||
std::vector<int64_t> origin_shape_;
|
||||
std::vector<int64_t> output_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_resizebicubic_grad_input_;
|
||||
int batch_;
|
||||
int channel_;
|
||||
int input_grad_width_;
|
||||
|
|
|
@ -37,7 +37,7 @@ class SoftMarginLossGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
explicit SoftMarginLossGradHelperGpuKernel(const std::string &kernel_name, const uint32_t &device_id)
|
||||
: GpuKernelHelperBase(kernel_name, device_id) {
|
||||
reduction_ = ReductionMode::kMean;
|
||||
is_null_input_ = false;
|
||||
is_null_softmarginloss_grad_input_ = false;
|
||||
input_size_ = 1;
|
||||
}
|
||||
|
||||
|
@ -48,9 +48,10 @@ class SoftMarginLossGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
constexpr size_t OUTPUT_NUM = 1;
|
||||
ResetResource();
|
||||
|
||||
int inp_flag = CalShapesSizeInBytes<T>(input_shapes, INPUT_NUM, kernel_name_, "input_shapes", &input_size_list_);
|
||||
if (inp_flag == -1) {
|
||||
return inp_flag;
|
||||
int grad_inp_flag =
|
||||
CalShapesSizeInBytes<T>(input_shapes, INPUT_NUM, kernel_name_, "input_shapes", &input_size_list_);
|
||||
if (grad_inp_flag == -1) {
|
||||
return grad_inp_flag;
|
||||
}
|
||||
if (input_shapes[0] != input_shapes[1]) {
|
||||
MS_LOG(ERROR) << "For '" << kernel_name_ << "', the input shape should be equal to " << input_shapes[0]
|
||||
|
@ -78,18 +79,18 @@ class SoftMarginLossGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
input_size_ *= input_shape_[i];
|
||||
}
|
||||
|
||||
int out_flag =
|
||||
int grad_out_flag =
|
||||
CalShapesSizeInBytes<T>(output_shapes, OUTPUT_NUM, kernel_name_, "output_shapes", &output_size_list_);
|
||||
if (out_flag == -1) {
|
||||
return out_flag;
|
||||
if (grad_out_flag == -1) {
|
||||
return grad_out_flag;
|
||||
}
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_softmarginloss_grad_input_ = (grad_inp_flag == 1 || grad_out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_softmarginloss_grad_input_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -150,7 +151,7 @@ class SoftMarginLossGradHelperGpuKernel : public GpuKernelHelperBase {
|
|||
ReductionMode reduction_;
|
||||
std::shared_ptr<SoftMarginLossGradAttr> attr_ptr_;
|
||||
std::vector<int64_t> input_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_softmarginloss_grad_input_;
|
||||
size_t input_size_;
|
||||
};
|
||||
} // namespace cukernel
|
||||
|
|
|
@ -37,7 +37,7 @@ class SoftMarginLossHelperGpuKernel : public GpuKernelHelperBase {
|
|||
explicit SoftMarginLossHelperGpuKernel(const std::string &kernel_name, const uint32_t &device_id)
|
||||
: GpuKernelHelperBase(kernel_name, device_id) {
|
||||
reduction_ = ReductionMode::kMean;
|
||||
is_null_input_ = false;
|
||||
is_null_softmarginloss_input_ = false;
|
||||
input_size_ = 1;
|
||||
}
|
||||
|
||||
|
@ -69,13 +69,13 @@ class SoftMarginLossHelperGpuKernel : public GpuKernelHelperBase {
|
|||
if (out_flag == -1) {
|
||||
return out_flag;
|
||||
}
|
||||
is_null_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
is_null_softmarginloss_input_ = (inp_flag == 1 || out_flag == 1);
|
||||
return CheckKernelParam();
|
||||
}
|
||||
|
||||
int Process(const std::vector<void *> &input_ptrs, const std::vector<void *> &output_ptrs,
|
||||
const std::vector<void *> &work_ptrs, void *cuda_stream) override {
|
||||
if (is_null_input_) {
|
||||
if (is_null_softmarginloss_input_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ class SoftMarginLossHelperGpuKernel : public GpuKernelHelperBase {
|
|||
ReductionMode reduction_;
|
||||
std::shared_ptr<SoftMarginLossAttr> attr_ptr_;
|
||||
std::vector<int64_t> input_shape_;
|
||||
bool is_null_input_;
|
||||
bool is_null_softmarginloss_input_;
|
||||
size_t input_size_;
|
||||
};
|
||||
} // namespace cukernel
|
||||
|
|
|
@ -127,17 +127,17 @@ bool Dilation2DBackpropFilterGpuKernelMod::Launch(const std::vector<AddressPtr>
|
|||
bool Dilation2DBackpropFilterGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
|
||||
const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::Dilation2DBackpropFilter>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto kernel_dilation2d_backprop_filter_ptr = std::dynamic_pointer_cast<ops::Dilation2DBackpropFilter>(base_operator);
|
||||
kernel_name_ = kernel_dilation2d_backprop_filter_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->stride = kernel_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_ptr->get_format();
|
||||
attr_ptr_->stride = kernel_dilation2d_backprop_filter_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_dilation2d_backprop_filter_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_dilation2d_backprop_filter_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_dilation2d_backprop_filter_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
|
||||
|
@ -168,9 +168,9 @@ int Dilation2DBackpropFilterGpuKernelMod::Resize(const BaseOperatorPtr &base_ope
|
|||
if (helper_ptr_->CalMemSize(input_shapes, output_shapes) == -1) {
|
||||
return KRET_RESIZE_FAILED;
|
||||
}
|
||||
workspace_size_list_ = helper_ptr_->GetWorkSizeList();
|
||||
input_size_list_ = helper_ptr_->GetInputSizeList();
|
||||
output_size_list_ = helper_ptr_->GetOutputSizeList();
|
||||
workspace_size_list_ = helper_ptr_->GetWorkSizeList();
|
||||
return KRET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,17 +127,17 @@ bool Dilation2DBackpropInputGpuKernelMod::Launch(const std::vector<AddressPtr> &
|
|||
bool Dilation2DBackpropInputGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
|
||||
const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::Dilation2DBackpropInput>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto kernel_dilation2d_backprop_input_ptr = std::dynamic_pointer_cast<ops::Dilation2DBackpropInput>(base_operator);
|
||||
kernel_name_ = kernel_dilation2d_backprop_input_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->stride = kernel_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_ptr->get_format();
|
||||
attr_ptr_->stride = kernel_dilation2d_backprop_input_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_dilation2d_backprop_input_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_dilation2d_backprop_input_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_dilation2d_backprop_input_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
|
||||
|
|
|
@ -80,17 +80,17 @@ bool Dilation2DGpuKernelMod::Launch(const std::vector<AddressPtr> &inputs, const
|
|||
|
||||
bool Dilation2DGpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::Dilation2D>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto kernel_dilation2d_ptr = std::dynamic_pointer_cast<ops::Dilation2D>(base_operator);
|
||||
kernel_name_ = kernel_dilation2d_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->stride = kernel_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_ptr->get_format();
|
||||
attr_ptr_->stride = kernel_dilation2d_ptr->get_stride();
|
||||
attr_ptr_->dilation = kernel_dilation2d_ptr->get_dilation();
|
||||
attr_ptr_->pad_mode = kernel_dilation2d_ptr->get_pad_mode();
|
||||
attr_ptr_->format = kernel_dilation2d_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
|
||||
|
|
|
@ -88,14 +88,14 @@ bool MaxUnpool2DGPUKernelMod::Launch(const std::vector<AddressPtr> &inputs, cons
|
|||
|
||||
bool MaxUnpool2DGPUKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool2D>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto maxunpool2d_kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool2D>(base_operator);
|
||||
kernel_name_ = maxunpool2d_kernel_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->data_format = kernel_ptr->get_format();
|
||||
attr_ptr_->data_format = maxunpool2d_kernel_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
return true;
|
||||
|
@ -110,15 +110,15 @@ int MaxUnpool2DGPUKernelMod::Resize(const BaseOperatorPtr &base_operator, const
|
|||
return KRET_UNKNOWN_SHAPE;
|
||||
}
|
||||
}
|
||||
std::vector<std::vector<int64_t>> input_shapes;
|
||||
std::vector<std::vector<int64_t>> output_shapes;
|
||||
std::vector<std::vector<int64_t>> input_maxunpool2d_shapes;
|
||||
std::vector<std::vector<int64_t>> output_maxunpool2d_shapes;
|
||||
std::vector<int64_t> inp_shape = inputs[0]->GetShapeVector();
|
||||
std::vector<int64_t> indices_shape = inputs[1]->GetShapeVector();
|
||||
std::vector<int64_t> out_shape = outputs[0]->GetShapeVector();
|
||||
input_shapes.emplace_back(inp_shape);
|
||||
input_shapes.emplace_back(indices_shape);
|
||||
output_shapes.emplace_back(out_shape);
|
||||
if (helper_ptr_->CalMemSize(input_shapes, output_shapes) == -1) {
|
||||
input_maxunpool2d_shapes.emplace_back(inp_shape);
|
||||
input_maxunpool2d_shapes.emplace_back(indices_shape);
|
||||
output_maxunpool2d_shapes.emplace_back(out_shape);
|
||||
if (helper_ptr_->CalMemSize(input_maxunpool2d_shapes, output_maxunpool2d_shapes) == -1) {
|
||||
return KRET_RESIZE_FAILED;
|
||||
}
|
||||
input_size_list_ = helper_ptr_->GetInputSizeList();
|
||||
|
|
|
@ -179,14 +179,14 @@ bool MaxUnpool2DGradGPUKernelMod::Launch(const std::vector<AddressPtr> &inputs,
|
|||
|
||||
bool MaxUnpool2DGradGPUKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool2DGrad>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto maxunpool2d_grad_kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool2DGrad>(base_operator);
|
||||
kernel_name_ = maxunpool2d_grad_kernel_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->data_format = kernel_ptr->get_format();
|
||||
attr_ptr_->data_format = maxunpool2d_grad_kernel_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
return true;
|
||||
|
|
|
@ -88,14 +88,14 @@ bool MaxUnpool3DGPUKernelMod::Launch(const std::vector<AddressPtr> &inputs, cons
|
|||
|
||||
bool MaxUnpool3DGPUKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool3D>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto maxunpool3d_kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool3D>(base_operator);
|
||||
kernel_name_ = maxunpool3d_kernel_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->data_format = kernel_ptr->get_format();
|
||||
attr_ptr_->data_format = maxunpool3d_kernel_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
return true;
|
||||
|
@ -110,15 +110,15 @@ int MaxUnpool3DGPUKernelMod::Resize(const BaseOperatorPtr &base_operator, const
|
|||
return KRET_UNKNOWN_SHAPE;
|
||||
}
|
||||
}
|
||||
std::vector<std::vector<int64_t>> input_shapes;
|
||||
std::vector<std::vector<int64_t>> output_shapes;
|
||||
std::vector<std::vector<int64_t>> input_maxunpool3d_shapes;
|
||||
std::vector<std::vector<int64_t>> output_maxunpool3d_shapes;
|
||||
std::vector<int64_t> inp_shape = inputs[0]->GetShapeVector();
|
||||
std::vector<int64_t> indices_shape = inputs[1]->GetShapeVector();
|
||||
std::vector<int64_t> out_shape = outputs[0]->GetShapeVector();
|
||||
input_shapes.emplace_back(inp_shape);
|
||||
input_shapes.emplace_back(indices_shape);
|
||||
output_shapes.emplace_back(out_shape);
|
||||
if (helper_ptr_->CalMemSize(input_shapes, output_shapes) == -1) {
|
||||
input_maxunpool3d_shapes.emplace_back(inp_shape);
|
||||
input_maxunpool3d_shapes.emplace_back(indices_shape);
|
||||
output_maxunpool3d_shapes.emplace_back(out_shape);
|
||||
if (helper_ptr_->CalMemSize(input_maxunpool3d_shapes, output_maxunpool3d_shapes) == -1) {
|
||||
return KRET_RESIZE_FAILED;
|
||||
}
|
||||
input_size_list_ = helper_ptr_->GetInputSizeList();
|
||||
|
|
|
@ -179,14 +179,14 @@ bool MaxUnpool3DGradGPUKernelMod::Launch(const std::vector<AddressPtr> &inputs,
|
|||
|
||||
bool MaxUnpool3DGradGPUKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool3DGrad>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto maxunpool3d_grad_kernel_ptr = std::dynamic_pointer_cast<ops::MaxUnpool3DGrad>(base_operator);
|
||||
kernel_name_ = maxunpool3d_grad_kernel_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->data_format = kernel_ptr->get_format();
|
||||
attr_ptr_->data_format = maxunpool3d_grad_kernel_ptr->get_format();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
return true;
|
||||
|
|
|
@ -53,15 +53,15 @@ bool ResizeBicubicGradGpuKernelMod::Launch(const std::vector<AddressPtr> &inputs
|
|||
bool ResizeBicubicGradGpuKernelMod::Init(const BaseOperatorPtr &base_operator,
|
||||
const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
auto kernel_ptr = std::dynamic_pointer_cast<ops::ResizeBicubicGrad>(base_operator);
|
||||
kernel_name_ = kernel_ptr->name();
|
||||
auto kernel_grad_ptr = std::dynamic_pointer_cast<ops::ResizeBicubicGrad>(base_operator);
|
||||
kernel_name_ = kernel_grad_ptr->name();
|
||||
auto tensor_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(tensor_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
return false;
|
||||
}
|
||||
attr_ptr_->align_corners = kernel_ptr->get_align_corners();
|
||||
attr_ptr_->half_pixel_centers = kernel_ptr->get_half_pixel_centers();
|
||||
attr_ptr_->align_corners = kernel_grad_ptr->get_align_corners();
|
||||
attr_ptr_->half_pixel_centers = kernel_grad_ptr->get_half_pixel_centers();
|
||||
helper_ptr_ = std::move(kernel_attr[index].second(kernel_name_, device_id_));
|
||||
helper_ptr_->SetKernelParam(attr_ptr_);
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@ abstract::ShapePtr InferShapeAdaptiveAvgPool3D(const PrimitivePtr &primitive,
|
|||
const auto &output_size_ptr = primitive->GetAttr("output_size");
|
||||
MS_EXCEPTION_IF_NULL(output_size_ptr);
|
||||
const auto &output_size = GetValue<std::vector<int64_t>>(output_size_ptr);
|
||||
(void)CheckAndConvertUtils::CheckInteger("length of output_size", output_size.size(), kEqual, kOutputSizeLen,
|
||||
op_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("length of output_size", SizeToLong(output_size.size()), kEqual,
|
||||
kOutputSizeLen, op_name);
|
||||
|
||||
// Update the output shape by output size and input shape.
|
||||
auto input_size_iter = x_shape.rbegin();
|
||||
|
|
|
@ -39,19 +39,18 @@ ResizeMethod CropAndResizeGradImage::get_method() const {
|
|||
}
|
||||
|
||||
namespace {
|
||||
constexpr size_t ImagekInputNums = 4;
|
||||
constexpr size_t ImagekGrads = 0;
|
||||
constexpr size_t ImagekGradsShapeLen = 4;
|
||||
constexpr int64_t ImagekGradsShapeLen = 4;
|
||||
constexpr size_t ImagekHeight = 1;
|
||||
constexpr size_t ImagekWidth = 2;
|
||||
constexpr size_t ImagekDepth = 3;
|
||||
constexpr size_t ImagekImagesSize = 3;
|
||||
constexpr size_t ImagekImageSizeShapeLen = 1;
|
||||
constexpr int64_t ImagekImageSizeShapeLen = 1;
|
||||
constexpr size_t ImagekBoxes = 1;
|
||||
constexpr size_t ImagekBoxesShapeLen = 2;
|
||||
constexpr size_t ImagekCoordinateLen = 4;
|
||||
constexpr int64_t ImagekBoxesShapeLen = 2;
|
||||
constexpr int64_t ImagekCoordinateLen = 4;
|
||||
constexpr size_t ImagekBoxIndex = 2;
|
||||
constexpr size_t ImagekBoxIndShapeLen = 1;
|
||||
constexpr int64_t ImagekBoxIndShapeLen = 1;
|
||||
constexpr size_t ImagekOutputSizeD = 1;
|
||||
constexpr int64_t ImagekOutputSizeLen = 4;
|
||||
constexpr int64_t ImageKMaxshapeDim0 = 16;
|
||||
|
@ -62,24 +61,23 @@ abstract::ShapePtr CropAndResizeGradImageInferShape(const PrimitivePtr &primitiv
|
|||
auto prim_name = primitive->name();
|
||||
MS_EXCEPTION_IF_NULL(input_args[ImagekGrads]);
|
||||
auto input_shape0 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[ImagekGrads]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("grads rank", SizeToLong(input_shape0.size()), kEqual, ImagekGradsShapeLen,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("grads rank", input_shape0.size(), kEqual, ImagekGradsShapeLen, prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[ImagekBoxes]);
|
||||
auto input_shape1 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[ImagekBoxes]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("boxes rank", SizeToLong(input_shape1.size()), kEqual, ImagekBoxesShapeLen,
|
||||
(void)CheckAndConvertUtils::CheckInteger("boxes rank", SizeToLong(input_shape1.size()), kEqual, ImagekBoxesShapeLen,
|
||||
prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("shape[1] of boxes", SizeToLong(input_shape1[1]), kEqual, ImagekCoordinateLen,
|
||||
(void)CheckAndConvertUtils::CheckInteger("shape[1] of boxes", input_shape1[1], kEqual, ImagekCoordinateLen,
|
||||
prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[ImagekBoxIndex]);
|
||||
auto input_shape2 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[ImagekBoxIndex]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("box_index rank", SizeToLong(input_shape2.size()), kEqual, ImagekBoxIndShapeLen,
|
||||
(void)CheckAndConvertUtils::CheckInteger("box_index rank", input_shape2.size(), kEqual, ImagekBoxIndShapeLen,
|
||||
prim_name);
|
||||
MS_EXCEPTION_IF_NULL(input_args[ImagekImagesSize]);
|
||||
auto input_shape3 =
|
||||
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[ImagekImagesSize]->BuildShape())[kShape];
|
||||
CheckAndConvertUtils::CheckInteger("image_size rank", SizeToLong(input_shape3.size()), kEqual,
|
||||
ImagekImageSizeShapeLen, prim_name);
|
||||
CheckAndConvertUtils::CheckInteger("length of image_size", SizeToLong(input_shape3[0]), kEqual, ImagekGradsShapeLen,
|
||||
(void)CheckAndConvertUtils::CheckInteger("image_size rank", input_shape3.size(), kEqual, ImagekImageSizeShapeLen,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("length of image_size", input_shape3[0], kEqual, ImagekGradsShapeLen,
|
||||
prim_name);
|
||||
|
||||
if (input_shape0[ImagekHeight] <= 0 || input_shape0[ImagekWidth] <= 0) {
|
||||
|
@ -163,6 +161,7 @@ TypePtr CropAndResizeGradImageInferType(const PrimitivePtr &prim, const std::vec
|
|||
}
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto prim_name = prim->name();
|
||||
const int64_t ImagekInputNums = 4;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, ImagekInputNums, prim_name);
|
||||
const std::set<TypePtr> inputs_types = {kFloat32, kFloat64};
|
||||
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64};
|
||||
|
|
|
@ -41,7 +41,8 @@ abstract::ShapePtr Dilation2DInferShape(const PrimitivePtr &primitive, const std
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 2;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
primitive->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -53,9 +54,10 @@ abstract::ShapePtr Dilation2DInferShape(const PrimitivePtr &primitive, const std
|
|||
|
||||
const int64_t x_shape_size = 4;
|
||||
const int64_t filter_shape_size = 3;
|
||||
CheckAndConvertUtils::CheckInteger("x shape size", x_shape.size(), kEqual, x_shape_size, primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("filter shape size", filter_shape.size(), kEqual, filter_shape_size,
|
||||
(void)CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, x_shape_size,
|
||||
primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("filter shape size", SizeToLong(filter_shape.size()), kEqual,
|
||||
filter_shape_size, primitive->name());
|
||||
const uint64_t n_axis = 0;
|
||||
const uint64_t shapeIndex1 = 1;
|
||||
const uint64_t shapeIndex2 = 2;
|
||||
|
@ -80,8 +82,8 @@ abstract::ShapePtr Dilation2DInferShape(const PrimitivePtr &primitive, const std
|
|||
// }
|
||||
std::vector<int64_t> stride = GetValue<std::vector<int64_t>>(primitive->GetAttr("stride"));
|
||||
std::vector<int64_t> dilation = GetValue<std::vector<int64_t>>(primitive->GetAttr("dilation"));
|
||||
int window_h = (kernel_size[0] - 1) * dilation[h_axis] + 1;
|
||||
int window_w = (kernel_size[1] - 1) * dilation[w_axis] + 1;
|
||||
int window_h = static_cast<int>((kernel_size[0] - 1) * dilation[h_axis] + 1);
|
||||
int window_w = static_cast<int>((kernel_size[1] - 1) * dilation[w_axis] + 1);
|
||||
const int64_t wLengthMaxLimit = 255;
|
||||
const int64_t wSizeMaxLimit = 512;
|
||||
if (window_h < 1 || window_h > wLengthMaxLimit || window_w < 1 || window_w > wLengthMaxLimit ||
|
||||
|
@ -127,7 +129,8 @@ abstract::ShapePtr Dilation2DInferShape(const PrimitivePtr &primitive, const std
|
|||
TypePtr Dilation2DInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
const int64_t input_num = 2;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ abstract::ShapePtr InferShapeAdaptiveAvgPool3DGrad(const PrimitivePtr &primitive
|
|||
auto value = static_cast<int32_t *>(orig_input_shape_tensor->data_c());
|
||||
MS_EXCEPTION_IF_NULL(value);
|
||||
for (int64_t i = 0; i < input_grad_dims; ++i) {
|
||||
orig_input_shape_value_vec[i] = value[i] > 0 ? static_cast<int64_t>(value[i]) : 1;
|
||||
orig_input_shape_value_vec[i] = value[i] > 0 ? static_cast<int64_t>(value[i]) : static_cast<int64_t>(1);
|
||||
}
|
||||
gen_value_succ = true;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ abstract::ShapePtr Dilation2DBackpropFilterInferShape(const PrimitivePtr &primit
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 3;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, primitive->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -68,11 +68,11 @@ abstract::ShapePtr Dilation2DBackpropFilterInferShape(const PrimitivePtr &primit
|
|||
const int64_t x_shape_size = 4;
|
||||
const int64_t filter_shape_size = 3;
|
||||
const int64_t out_backprop_shape_size = 4;
|
||||
CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, x_shape_size,
|
||||
(void)CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, x_shape_size,
|
||||
primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("filter shape size", SizeToLong(filter_shape.size()), kEqual, filter_shape_size,
|
||||
primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("out_backprop shape size", SizeToLong(out_backprop_shape.size()), kEqual,
|
||||
(void)CheckAndConvertUtils::CheckInteger("filter shape size", SizeToLong(filter_shape.size()), kEqual,
|
||||
filter_shape_size, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("out_backprop shape size", SizeToLong(out_backprop_shape.size()), kEqual,
|
||||
out_backprop_shape_size, primitive->name());
|
||||
// Get Attributes
|
||||
std::string data_format = GetValue<std::string>(primitive->GetAttr("format"));
|
||||
|
@ -149,7 +149,8 @@ TypePtr Dilation2DBackpropFilterInferType(const PrimitivePtr &prim, const std::v
|
|||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto prim_name = prim->name();
|
||||
const int64_t input_num = 3;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ abstract::ShapePtr Dilation2DBackpropInputInferShape(const PrimitivePtr &primiti
|
|||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 3;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, primitive->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
@ -69,11 +69,11 @@ abstract::ShapePtr Dilation2DBackpropInputInferShape(const PrimitivePtr &primiti
|
|||
const int64_t x_shape_size = 4;
|
||||
const int64_t filter_shape_size = 3;
|
||||
const int64_t out_backprop_shape_size = 4;
|
||||
CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, x_shape_size,
|
||||
(void)CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, x_shape_size,
|
||||
primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("filter shape size", SizeToLong(filter_shape.size()), kEqual, filter_shape_size,
|
||||
primitive->name());
|
||||
CheckAndConvertUtils::CheckInteger("out_backprop shape size", SizeToLong(out_backprop_shape.size()), kEqual,
|
||||
(void)CheckAndConvertUtils::CheckInteger("filter shape size", SizeToLong(filter_shape.size()), kEqual,
|
||||
filter_shape_size, primitive->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("out_backprop shape size", SizeToLong(out_backprop_shape.size()), kEqual,
|
||||
out_backprop_shape_size, primitive->name());
|
||||
std::string data_format = GetValue<std::string>(primitive->GetAttr("format"));
|
||||
std::string pad_mode = GetValue<std::string>(primitive->GetAttr("pad_mode"));
|
||||
|
@ -147,7 +147,8 @@ TypePtr Dilation2DBackpropInputInferType(const PrimitivePtr &prim, const std::ve
|
|||
MS_EXCEPTION_IF_NULL(prim);
|
||||
auto prim_name = prim->name();
|
||||
const int64_t input_num = 3;
|
||||
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, input_num, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_num,
|
||||
prim->name());
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue