forked from mindspore-Ecosystem/mindspore
!5767 fix codex report problems
Merge pull request !5767 from zhaozhenlong/lite/op/clean_codex
This commit is contained in:
commit
9b4f7e4b18
|
@ -91,13 +91,13 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input
|
|||
int y_bottom = y_bottoms[h];
|
||||
int y_top = y_tops[h];
|
||||
float y_bottom_weight = y_bottom_weights[h];
|
||||
float y_top_weight = 1.0f - y_bottom_weight;
|
||||
const float y_top_weight = 1.0f - y_bottom_weight;
|
||||
|
||||
for (w = 0; w < new_width; w++) {
|
||||
int x_left = x_lefts[w];
|
||||
int x_right = x_rights[w];
|
||||
float x_left_weight = x_left_weights[w];
|
||||
float x_right_weight = 1.0f - x_left_weight;
|
||||
const float x_right_weight = 1.0f - x_left_weight;
|
||||
float top_left_weight = y_top_weight * x_left_weight;
|
||||
float top_right_weight = y_top_weight * x_right_weight;
|
||||
float bottom_left_weight = y_bottom_weight * x_left_weight;
|
||||
|
|
|
@ -41,6 +41,7 @@ int ROIPooling(float *in_ptr, float *out_ptr, float *roi, int tid, ROIPoolingPar
|
|||
for (int i = roi_st; i < roi_end; ++i) {
|
||||
int roi_batch_ind = (int)roi[roi_ind_st]; // batch_index
|
||||
if (roi_batch_ind >= batch_size) {
|
||||
free(max_c);
|
||||
return NNACL_ERRCODE_INDEX_OUT_OF_RANGE;
|
||||
}
|
||||
int roi_start_h = (int)roundf(roi[roi_ind_st + 1] * scale); // top-left x1
|
||||
|
|
|
@ -15,17 +15,21 @@
|
|||
*/
|
||||
|
||||
#include "nnacl/int8/leaky_relu_int8.h"
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) {
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) {
|
||||
if (quant_prelu_parm == NULL) {
|
||||
return;
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
float output_scale = quant_prelu_parm->quant_arg.out_args_.scale_;
|
||||
int output_zp = quant_prelu_parm->quant_arg.out_args_.zp_;
|
||||
const float output_inverse_scale = 1.f / output_scale;
|
||||
int output_dim = quant_prelu_parm->input_dim_;
|
||||
|
||||
QuantArg *input_quant = NULL;
|
||||
QuantArg *input_quant = malloc(sizeof(QuantArg)*output_dim);
|
||||
if (input_quant == NULL) {
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
for (int i = 0; i < output_dim; i++) {
|
||||
input_quant[i].scale_ = quant_prelu_parm->quant_arg.in_args_.scale_;
|
||||
input_quant[i].zp_ = quant_prelu_parm->quant_arg.in_args_.zp_;
|
||||
|
@ -56,4 +60,6 @@ void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant
|
|||
}
|
||||
}
|
||||
}
|
||||
free(input_quant);
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id);
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -101,8 +101,14 @@ int ResizeBilinearInt8WithFloatWeight(const int8_t *input_data, int8_t *output_d
|
|||
int32_t new_height = output_shape[1];
|
||||
int32_t new_width = output_shape[2];
|
||||
float height_scale, width_scale;
|
||||
ComputeScaleFloat(in_h, new_height, align_corners, &height_scale);
|
||||
ComputeScaleFloat(in_w, new_width, align_corners, &width_scale);
|
||||
int ret = ComputeScaleFloat(in_h, new_height, align_corners, &height_scale);
|
||||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
ret = ComputeScaleFloat(in_w, new_width, align_corners, &width_scale);
|
||||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
int n, h, w, c;
|
||||
for (n = 0; n < in_n; n++) {
|
||||
|
@ -189,11 +195,15 @@ void ComputeInterpolationArgs(const int32_t pos, const int32_t scale, const int3
|
|||
*scaled_high_weight = *scaled_pos - (1 << 10) * (*low);
|
||||
}
|
||||
|
||||
void ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale) {
|
||||
int ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale) {
|
||||
if (out_value == 0) {
|
||||
return NNACL_ERRCODE_DIVISOR_ZERO;
|
||||
}
|
||||
*scale = (float)in_value / out_value;
|
||||
if (align_corners && out_value > 1) {
|
||||
*scale = (float)(in_value - 1) / (out_value - 1);
|
||||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
void ComputeInterpolationArgsFloatWeight(const int32_t pos, const float scale, const int32_t size, float *actual_pos,
|
||||
|
|
|
@ -40,7 +40,7 @@ void ComputeScale(const int32_t in_value, const int32_t out_value, const bool al
|
|||
void ComputeInterpolationArgs(const int32_t pos, const int32_t scale, const int32_t size, int32_t *scaled_pos,
|
||||
int32_t *low, int32_t *scaled_low_weight, int32_t *high, int32_t *scaled_high_weight);
|
||||
|
||||
void ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale);
|
||||
int ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale);
|
||||
|
||||
void ComputeInterpolationArgsFloatWeight(const int32_t pos, const float scale, const int32_t size, float *actual_pos,
|
||||
int32_t *low, float *low_weight, int32_t *high, float *high_weight);
|
||||
|
|
|
@ -352,6 +352,7 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor)
|
|||
size_t channels = static_cast<size_t>(input_tensor->Batch());
|
||||
if (input_tensor->GetQuantParams().size() != channels) {
|
||||
MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels;
|
||||
free(dequant_data);
|
||||
return RET_ERROR;
|
||||
}
|
||||
size_t per_channel_size = input_tensor->DataSize() / channels;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "src/runtime/runtime_api.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
|
@ -105,7 +106,11 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) {
|
|||
auto out_tensor = out_tensors_.at(kOutputIndex);
|
||||
int8_t *input_data = reinterpret_cast<int8_t *>(input_tensor->Data());
|
||||
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensor->Data());
|
||||
DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id);
|
||||
auto ret = DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id);
|
||||
if (ret != NNACL_OK) {
|
||||
MS_LOG(ERROR) << "DoLeakReluInt8 failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -500,12 +500,11 @@ int DistributeTask(int thread_pool_id, Task *task, int task_num) {
|
|||
} while (!k_success_flag);
|
||||
}
|
||||
// master thread
|
||||
task->func(task->content, size - 1);
|
||||
if (task->func == NULL) {
|
||||
LOG_ERROR("task->func is nullptr");
|
||||
return RET_TP_ERROR;
|
||||
}
|
||||
|
||||
task->func(task->content, size - 1);
|
||||
// wait
|
||||
WaitAllThread(thread_pool_id);
|
||||
return RET_TP_OK;
|
||||
|
@ -547,11 +546,11 @@ void ThreadRun(Thread *thread) {
|
|||
while (thread_pool->is_alive) {
|
||||
while (thread->activate) {
|
||||
if (PopTaskFromQueue(thread, &task)) {
|
||||
task->func(task->content, thread_id);
|
||||
if (task->func == NULL) {
|
||||
LOG_ERROR("task->func is nullptr");
|
||||
return;
|
||||
}
|
||||
task->func(task->content, thread_id);
|
||||
atomic_fetch_sub_explicit(&thread->task_size, 1, memory_order_relaxed);
|
||||
// atomic_store_explicit(&thread->task_size, thread->task_size - 1, memory_order_relaxed);
|
||||
spin_count = 0;
|
||||
|
|
|
@ -54,6 +54,8 @@ int MSNetWork::ReleaseNets(void) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
const int MSNetWork::RET_CATEGORY_SUM = 601;
|
||||
|
||||
const char *MSNetWork::labels_name_map[MSNetWork::RET_CATEGORY_SUM] = {
|
||||
{"Tortoise"}, {"Container"}, {"Magpie"}, {"Seaturtle"}, {"Football"}, {"Ambulance"}, {"Ladder"},
|
||||
{"Toothbrush"}, {"Syringe"}, {"Sink"}, {"Toy"}, {"Organ(MusicalInstrument) "}, {"Cassettedeck"},
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
|
||||
struct ImgDims {
|
||||
int channel = 0;
|
||||
int width = 0;
|
||||
|
@ -43,8 +42,6 @@ struct ImgDims {
|
|||
std::shared_ptr<mindspore::session::LiteSession> sess = nullptr;
|
||||
};*/
|
||||
|
||||
|
||||
|
||||
class MSNetWork {
|
||||
public:
|
||||
MSNetWork();
|
||||
|
@ -55,10 +52,10 @@ class MSNetWork {
|
|||
|
||||
int ReleaseNets(void);
|
||||
|
||||
private:
|
||||
mindspore::session::LiteSession *session;
|
||||
mindspore::lite::Model *model;
|
||||
static const int RET_CATEGORY_SUM = 601;
|
||||
static const int RET_CATEGORY_SUM;
|
||||
static const char *labels_name_map[RET_CATEGORY_SUM];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) {
|
|||
|
||||
imgFloatTmp.convertTo(imgResized256, CV_32FC3, normalizMin / normalizMax);
|
||||
|
||||
int offsetX = 16;
|
||||
int offsetY = 16;
|
||||
int cropWidth = 224;
|
||||
int cropHeight = 224;
|
||||
const int offsetX = 16;
|
||||
const int offsetY = 16;
|
||||
const int cropWidth = 224;
|
||||
const int cropHeight = 224;
|
||||
|
||||
// Standardization processing.
|
||||
float meanR = 0.485;
|
||||
|
|
Loading…
Reference in New Issue