forked from mindspore-Ecosystem/mindspore
!23903 [MSLITE][DEVELOP] clean code check warnings
Merge pull request !23903 from yangruoqi713/codex
This commit is contained in:
commit
211ce1bf1e
|
@ -19,9 +19,9 @@
|
|||
#include <string.h>
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset, int num_unit,
|
||||
int DoSplit(const void *in_data, void **out_data, const int *input_shape, int offset, int num_unit,
|
||||
const SplitParameter *split_param, int data_size) {
|
||||
int8_t *int8_in = (int8_t *)in_data;
|
||||
const int8_t *int8_in = (int8_t *)in_data;
|
||||
|
||||
const int num_split = split_param->num_split_;
|
||||
const int *split_sizes = split_param->split_sizes_;
|
||||
|
@ -37,7 +37,7 @@ int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset,
|
|||
|
||||
split_which = offset % num_split;
|
||||
split_times = offset / num_split;
|
||||
int8_t *src = int8_in + split_times * stride_per_split * data_size;
|
||||
const int8_t *src = int8_in + split_times * stride_per_split * data_size;
|
||||
|
||||
for (int i = 0; i < split_which; i++) {
|
||||
src += split_sizes[i] * in_stride * data_size;
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset, int num_unit,
|
||||
int DoSplit(const void *in_data, void **out_data, const int *input_shape, int offset, int num_unit,
|
||||
const SplitParameter *split_param, int data_size);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -33,13 +33,12 @@ using mindspore::schema::PrimitiveType_Unsqueeze;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
int ReshapeBaseCPUKernel::Run() {
|
||||
auto in_tensor = in_tensors().front();
|
||||
auto out_tensor = out_tensors().front();
|
||||
|
||||
/*
|
||||
* in_tensor : CPU-allocator ; out_tensor : GPU-allocator
|
||||
* out_tensor data_c can not change
|
||||
* */
|
||||
auto in_tensor = in_tensors().front();
|
||||
auto out_tensor = out_tensors().front();
|
||||
if (in_tensor->allocator() == nullptr || in_tensor->allocator() != out_tensor->allocator() ||
|
||||
op_parameter_->is_train_session_) {
|
||||
CHECK_NULL_RETURN(out_tensor->data());
|
||||
|
|
Loading…
Reference in New Issue