forked from mindspore-Ecosystem/mindspore
fix static check error
This commit is contained in:
parent
7f725b93a3
commit
4404866e6e
|
@ -33,7 +33,7 @@ using mindspore::schema::PrimitiveType_ActivationGrad;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
int ActivationGradCPUKernel::Init() {
|
||||
if (2 != in_tensors_.size()) {
|
||||
if (in_tensors_.size() != 2) {
|
||||
MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -86,6 +86,7 @@ int ActivationGradCPUKernel::DoActivation(int task_id) {
|
|||
}
|
||||
|
||||
int ActivationGradRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto activationGrad_kernel = reinterpret_cast<ActivationGradCPUKernel *>(cdata);
|
||||
auto error_code = activationGrad_kernel->DoActivation(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
|
|
@ -40,7 +40,6 @@ class ActivationGradCPUKernel : public LiteKernel {
|
|||
private:
|
||||
ActivationParameter *param_act_grad_;
|
||||
};
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ACTIVATION_GRAD_H_
|
||||
|
|
|
@ -45,6 +45,10 @@ int AdamCPUKernel::Execute(int task_id) {
|
|||
auto eps = reinterpret_cast<float *>(in_tensors_[8]->MutableData())[0];
|
||||
auto gradient = reinterpret_cast<float *>(in_tensors_[9]->MutableData());
|
||||
size_t elem_num = in_tensors_[0]->ElementsNum();
|
||||
if (fabs(1 - beta1_power) <= 0.0f) {
|
||||
MS_LOG(ERROR) << "divisor cannot be 0";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto update_lr = learning_rate * std::sqrt(1 - beta2_power) / (1 - beta1_power);
|
||||
|
||||
if (adam_param_->use_nesterov_) { // Nadam
|
||||
|
@ -64,6 +68,7 @@ int AdamCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int AdamRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto Adam_kernel = reinterpret_cast<AdamCPUKernel *>(cdata);
|
||||
auto error_code = Adam_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -90,16 +95,19 @@ kernel::LiteKernel *CpuAdamFp32KernelCreator(const std::vector<lite::Tensor *> &
|
|||
const lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Adam);
|
||||
auto *kernel = new (std::nothrow) AdamCPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
MS_ASSERT(kernel != nullptr);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new AdamCPUKernel fail!";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (0 != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return kernel;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ using mindspore::lite::RET_OK;
|
|||
using mindspore::schema::PrimitiveType_ApplyMomentum;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
||||
int ApplyMomentumCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int ApplyMomentumCPUKernel::Execute(int task_id) {
|
||||
|
@ -55,6 +54,7 @@ int ApplyMomentumCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int ApplyMomentumRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto applyMomentum_kernel = reinterpret_cast<ApplyMomentumCPUKernel *>(cdata);
|
||||
auto error_code = applyMomentum_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -89,13 +89,12 @@ kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector<lite::Te
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (0 != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return kernel;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,6 @@ int ArithmeticGradCPUKernel::Init() {
|
|||
arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradMul2L;
|
||||
else if (Type() == PrimitiveType_DivGrad)
|
||||
arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradDiv2L;
|
||||
|
||||
} else if (dx2->ElementsNum() < dx1->ElementsNum()) {
|
||||
if (Type() == PrimitiveType_MulGrad)
|
||||
arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradMul1L;
|
||||
|
@ -75,25 +74,28 @@ int ArithmeticGradCPUKernel::Init() {
|
|||
|
||||
void ArithmeticGradCPUKernel::ArithmeticGradAdd(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2,
|
||||
int dx2_size) {
|
||||
if (dx1_size == dy_size)
|
||||
if (dx1_size == dy_size) {
|
||||
memcpy(dx1, dy, dy_size * sizeof(float));
|
||||
else
|
||||
} else {
|
||||
ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx1, arithmeticParameter_->in_shape0_,
|
||||
arithmeticParameter_->ndim_);
|
||||
if (dx2_size == dy_size)
|
||||
}
|
||||
if (dx2_size == dy_size) {
|
||||
memcpy(dx2, dy, dy_size * sizeof(float));
|
||||
else
|
||||
} else {
|
||||
ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx2, arithmeticParameter_->in_shape1_,
|
||||
arithmeticParameter_->ndim_);
|
||||
}
|
||||
}
|
||||
|
||||
void ArithmeticGradCPUKernel::ArithmeticGradSub(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2,
|
||||
int dx2_size) {
|
||||
if (dx1_size == dy_size)
|
||||
if (dx1_size == dy_size) {
|
||||
memcpy(dx1, dy, dy_size * sizeof(float));
|
||||
else
|
||||
} else {
|
||||
ReduceSumByAxes(dy, arithmeticParameter_->out_shape_, dx1, arithmeticParameter_->in_shape0_,
|
||||
arithmeticParameter_->ndim_);
|
||||
}
|
||||
if (dx2_size == dy_size) {
|
||||
for (int i = 0; i < dx2_size; i++) {
|
||||
dx2[i] = -dy[i];
|
||||
|
@ -156,7 +158,9 @@ void ArithmeticGradCPUKernel::ArithmeticGradDiv1L(float *dy, int dy_size, float
|
|||
arithmeticParameter_); // broadcast directly to dx1
|
||||
ReduceSumByAxes(tile_data2, arithmeticParameter_->in_shape0_, dx2, arithmeticParameter_->in_shape1_,
|
||||
arithmeticParameter_->ndim_);
|
||||
for (int i = 0; i < dx2_size; i++) dx2[i] = -dx2[i];
|
||||
for (int i = 0; i < dx2_size; i++) {
|
||||
dx2[i] = -dx2[i];
|
||||
}
|
||||
|
||||
// broadcasting x2
|
||||
BroadcastDiv(dy, x2_data, tile_data0, tile_data1, dx1, dy_size, arithmeticParameter_); // broadcast directly to dx1
|
||||
|
@ -214,6 +218,7 @@ int ArithmeticGradCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int ArithmeticGradRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto Arithmetic_kernel = reinterpret_cast<ArithmeticGradCPUKernel *>(cdata);
|
||||
auto error_code = Arithmetic_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
|
|
@ -42,6 +42,7 @@ int AssignCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int AssignRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto Assign_kernel = reinterpret_cast<AssignCPUKernel *>(cdata);
|
||||
auto error_code = Assign_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -68,16 +69,19 @@ kernel::LiteKernel *CpuAssignFp32KernelCreator(const std::vector<lite::Tensor *>
|
|||
const lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Assign);
|
||||
auto *kernel = new (std::nothrow) AssignCPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
MS_ASSERT(kernel != nullptr);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new AssignCPUKernel fail!";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (0 != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return kernel;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,9 @@ int BiasGradCPUKernel::Execute(int task_id) {
|
|||
|
||||
size_t nhw_size = 1;
|
||||
size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC
|
||||
for (unsigned int i = 0; i < bias_param->ndim_ - 1; i++) nhw_size *= bias_param->in_shape0_[i];
|
||||
for (unsigned int i = 0; i < bias_param->ndim_ - 1; i++) {
|
||||
nhw_size *= bias_param->in_shape0_[i];
|
||||
}
|
||||
|
||||
size_t total_size = channels * nhw_size;
|
||||
for (size_t c = 0; c < channels; ++c) {
|
||||
|
@ -98,7 +100,7 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector<lite::Tensor
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -28,7 +28,6 @@ using mindspore::kernel::KERNEL_ARCH::kCPU;
|
|||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
// using mindspore::lite::REG_OP;
|
||||
using mindspore::schema::PrimitiveType_BNGrad;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
@ -84,6 +83,7 @@ int BNGradCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int BNGradRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto bn_kernel = reinterpret_cast<BNGradCPUKernel *>(cdata);
|
||||
if (task_id == 0) {
|
||||
auto error_code = bn_kernel->Execute(task_id);
|
||||
|
@ -117,7 +117,7 @@ kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector<lite::Tensor *>
|
|||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -27,11 +27,11 @@ using mindspore::lite::RET_OK;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
int ConvolutionTrainCPUKernel::Init() {
|
||||
if (2 > in_tensors_.size()) {
|
||||
if (in_tensors_.size() < 2) {
|
||||
MS_LOG(ERROR) << "Convolution should have at least two inputs";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (1 != out_tensors_.size()) {
|
||||
if (out_tensors_.size() != 1) {
|
||||
MS_LOG(ERROR) << "Convolution should have one output";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -105,6 +105,7 @@ int ConvolutionTrainCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionTrainRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto conv_kernel = reinterpret_cast<ConvolutionTrainCPUKernel *>(cdata);
|
||||
auto error_code = conv_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -138,7 +139,7 @@ kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector<lite::Tensor
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
@ -146,5 +147,4 @@ kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector<lite::Tensor
|
|||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -111,6 +111,7 @@ int ConvolutionGradFilterCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionGradFilterRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto convfilter_kernel = reinterpret_cast<ConvolutionGradFilterCPUKernel *>(cdata);
|
||||
auto error_code = convfilter_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -145,7 +146,7 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector<lite::T
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -43,7 +43,6 @@ class ConvolutionGradFilterCPUKernel : public LiteKernel {
|
|||
const int chunk = C12NUM;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_
|
||||
|
|
|
@ -122,6 +122,7 @@ int ConvolutionGradInputCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionGradInputRun(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto convinput_kernel = reinterpret_cast<ConvolutionGradInputCPUKernel *>(cdata);
|
||||
auto error_code = convinput_kernel->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -157,7 +158,7 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector<lite::Te
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (0 != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
@ -168,5 +169,4 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector<lite::Te
|
|||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DGradInput, CpuConvGradInputFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GroupConv2DGradInput, CpuConvGradInputFp32KernelCreator)
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -145,7 +145,7 @@ kernel::LiteKernel *CpuDeConvGradFilterFp32KernelCreator(const std::vector<lite:
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -38,7 +38,6 @@ class DeConvolutionGradFilterCPUKernel : public LiteKernel {
|
|||
size_t ws_size = 0;
|
||||
const int chunk = 1;
|
||||
};
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_DECONVOLUTION_GRAD_FILTER_H_
|
||||
|
|
|
@ -115,6 +115,7 @@ kernel::LiteKernel *CpuDropoutFp32KernelCreator(const std::vector<lite::Tensor *
|
|||
auto *kernel = new (std::nothrow) DropoutCPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Dropout new kernel failed.";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
|
|
|
@ -102,6 +102,7 @@ kernel::LiteKernel *CpuDropoutGradFp32KernelCreator(const std::vector<lite::Tens
|
|||
auto *kernel = new (std::nothrow) DropoutGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "DropoutGrad new kernel failed.";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
|
|
|
@ -32,6 +32,7 @@ namespace {
|
|||
int NegGradRun(void *cdata, int thread_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto kernel = reinterpret_cast<NegGradCPUKernel *>(cdata);
|
||||
MS_ASSERT(kernel != nullptr);
|
||||
return kernel->DoNegGrad(thread_id);
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -77,6 +77,7 @@ int PoolingGradCPUKernel::Execute(int task_id) {
|
|||
}
|
||||
|
||||
int PoolingGradImpl(void *cdata, int task_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto pooling = reinterpret_cast<PoolingGradCPUKernel *>(cdata);
|
||||
auto error_code = pooling->Execute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
|
@ -92,7 +93,9 @@ int PoolingGradCPUKernel::Run() {
|
|||
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
int size =
|
||||
pooling_param->input_w_ * pooling_param->input_h_ * pooling_param->input_channel_ * pooling_param->output_batch_;
|
||||
for (int i = 0; i < size; i++) output_ptr[i] = 0.0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
output_ptr[i] = 0.0;
|
||||
}
|
||||
|
||||
int error_code = ParallelLaunch(this->context_->thread_pool_, PoolingGradImpl, this, 1);
|
||||
if (error_code != RET_OK) {
|
||||
|
|
|
@ -28,11 +28,11 @@ using mindspore::schema::PrimitiveType_PowerGrad;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
int PowerGradCPUKernel::Init() {
|
||||
if (2 != in_tensors_.size()) {
|
||||
if (in_tensors_.size() != 2) {
|
||||
MS_LOG(ERROR) << "Power Grad Filter should have 2 inputs";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (1 != out_tensors_.size()) {
|
||||
if (out_tensors_.size() != 1) {
|
||||
MS_LOG(ERROR) << "Power Grad Filter should have one output";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,9 @@ int SgdCPUKernel::Init() {
|
|||
// Only for test with uninitialized Data
|
||||
size_t elem_num = in_tensors_[0]->ElementsNum();
|
||||
auto accumulate = reinterpret_cast<float *>(in_tensors_[3]->MutableData());
|
||||
for (size_t i = 0; i < elem_num; i++) accumulate[i] = 0.0;
|
||||
for (size_t i = 0; i < elem_num; i++) {
|
||||
accumulate[i] = 0.0;
|
||||
}
|
||||
|
||||
if (sgd_param_->dampening_ < 0.0f) {
|
||||
MS_LOG(ERROR) << "dampening should be at least 0.0";
|
||||
|
@ -105,7 +107,7 @@ kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector<lite::Tensor *> &i
|
|||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -61,7 +61,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
|||
auto ins = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
auto labels = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
float *out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
float *grads = NULL;
|
||||
float *grads = nullptr;
|
||||
if (IsTrain() && out_tensors_.size() > 1) {
|
||||
grads = reinterpret_cast<float *>(out_tensors_.at(1)->MutableData());
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Init() {
|
|||
param_->number_of_classes_ = dims[1];
|
||||
param_->batch_size_ = dims[0];
|
||||
for (unsigned int i = 0; i < dims.size(); i++) param_->input_shape_[i] = dims[i];
|
||||
if (2 != this->in_tensors_.size()) {
|
||||
if (this->in_tensors_.size() != 2) {
|
||||
MS_LOG(ERROR) << "softmax entropy loss should have two inputs";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector<li
|
|||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
|
|
@ -43,6 +43,7 @@ int SoftmaxGradCPUKernel::Init() {
|
|||
auto axis = param->axis_;
|
||||
if ((axis < -1) || (axis > param->n_dim_)) {
|
||||
MS_LOG(ERROR) << "SoftmaxGrad axis is invalid!";
|
||||
return RET_ERROR;
|
||||
} else if (axis == -1) {
|
||||
axis = param->axis_ = (in_dims - 1);
|
||||
}
|
||||
|
@ -108,5 +109,4 @@ kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector<lite::Tens
|
|||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -85,7 +85,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
|||
auto ins = reinterpret_cast<float *>(in_tensors_.at(0)->data_c());
|
||||
auto labels = reinterpret_cast<int *>(in_tensors_.at(1)->data_c());
|
||||
float *out = reinterpret_cast<float *>(out_tensors_.at(0)->data_c());
|
||||
float *grads = NULL;
|
||||
float *grads = nullptr;
|
||||
if (IsTrain() && out_tensors_.size() > 1) {
|
||||
grads = reinterpret_cast<float *>(out_tensors_.at(1)->MutableData());
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator(
|
|||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
if (RET_OK != ret) {
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
|
||||
delete kernel;
|
||||
|
@ -172,5 +172,4 @@ kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator(
|
|||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -30,11 +30,11 @@ using mindspore::schema::PrimitiveType_TupleGetItem;
|
|||
namespace mindspore::kernel {
|
||||
|
||||
int TupleGetItemCPUKernel::Init() {
|
||||
if (1 != in_tensors_.size()) {
|
||||
if (in_tensors_.size() != 1) {
|
||||
MS_LOG(ERROR) << "Tuple Grad Filter should have one input";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (1 != out_tensors_.size()) {
|
||||
if (out_tensors_.size() != 1) {
|
||||
MS_LOG(ERROR) << "Tuple Grad Filter should have one output";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -48,7 +48,6 @@ int TupleGetItemCPUKernel::Execute(int task_id) {
|
|||
auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
|
||||
memcpy(out, in, in_tensors_.at(0)->Size());
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue