forked from mindspore-Ecosystem/mindspore
!21221 [MSLITE][DEVELOP] remove malloc for model GetInputs API, clean code check
Merge pull request !21221 from yangruoqi713/master
This commit is contained in:
commit
3062e03ba1
|
@ -260,7 +260,6 @@ std::vector<MSTensor> ModelImpl::GetInputs() {
|
|||
}
|
||||
res.resize(inputs.size());
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
inputs[i]->MutableData(); // prepare data
|
||||
auto impl = std::shared_ptr<MSTensor::Impl>(new (std::nothrow) MSTensor::Impl(inputs[i]));
|
||||
if (impl == nullptr || impl->lite_tensor() == nullptr) {
|
||||
MS_LOG(ERROR) << "Create tensor failed.";
|
||||
|
|
|
@ -78,7 +78,7 @@ int StackBaseCPUKernel::Init() {
|
|||
}
|
||||
|
||||
int StackBaseCPUKernel::Execute(int task_id) {
|
||||
auto output_data = reinterpret_cast<char *>(out_tensors_.at(0)->data_c());
|
||||
auto output_data = reinterpret_cast<void *>(out_tensors_.at(0)->data_c());
|
||||
if (output_data == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ int StackBaseCPUKernel::Execute(int task_id) {
|
|||
auto start = task_id * step;
|
||||
auto end = MSMIN(start + step, outer_size_);
|
||||
auto input_num = in_tensors_.size();
|
||||
auto output = output_data + input_num * start * copy_size_;
|
||||
auto output = reinterpret_cast<char *>(output_data) + input_num * start * copy_size_;
|
||||
Stack(all_inputs_, reinterpret_cast<void *>(output), input_num, copy_size_, start, end);
|
||||
return RET_OK;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ int StackBaseCPUKernel::Run() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
for (size_t j = 0; j < inputs_num; ++j) {
|
||||
auto input_data = reinterpret_cast<char *>(in_tensors_.at(j)->data_c());
|
||||
auto input_data = reinterpret_cast<void *>(in_tensors_.at(j)->data_c());
|
||||
if (input_data == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
memset(bias_data_, 0, UP_ROUND(output_channel, C8NUM) * sizeof(float16_t));
|
||||
if (in_tensors_.size() == 3) {
|
||||
if (in_tensors_.size() == kInputSize2) {
|
||||
if (in_tensors_.at(kBiasIndex)->data_type() != kNumberTypeFloat16) {
|
||||
MS_LOG(ERROR) << "DeConv fp16 only support fp16 weight";
|
||||
return RET_ERROR;
|
||||
|
|
Loading…
Reference in New Issue