[MSLITE][DEVELOP] code review for lite: base op directory, include directory
This commit is contained in:
parent
b78efaf644
commit
c655a7bd8b
|
@ -139,7 +139,6 @@ class MS_API Kernel {
|
|||
/// \param[in] value define the kernel's attribute value.
|
||||
void SetAttr(const std::string &key, const std::string &value) { attrs_[key] = value; }
|
||||
|
||||
protected:
|
||||
std::string name_;
|
||||
const mindspore::Context *context_ = nullptr;
|
||||
std::vector<mindspore::MSTensor> inputs_;
|
||||
|
|
|
@ -21,7 +21,7 @@ void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, c
|
|||
int32_t block_size = param->block_size_;
|
||||
int32_t in_shape_dim2 = in_shape[2];
|
||||
int32_t in_shape_dim1 = in_shape[1];
|
||||
size_t copy_size = block_size * param->out_stride_dim2_ * param->data_type_size_;
|
||||
size_t copy_size = (size_t)block_size * param->out_stride_dim2_ * param->data_type_size_;
|
||||
for (int i = 0; i < in_shape[0]; ++i) {
|
||||
int in_offset_n = i * param->in_stride_dim0_;
|
||||
int out_offset_n = i * param->out_stride_dim0_;
|
||||
|
|
|
@ -23,7 +23,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
static inline void ApproximateZerosLike(void *output, int data_size) {
|
||||
memset(output, 0.0, data_size);
|
||||
(void)memset(output, 0, data_size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ typedef struct NpuDeviceInfo {
|
|||
|
||||
/// \brief AscendDeviceInfo defined for Ascend's configuration information.
|
||||
typedef struct AscendDeviceInfo {
|
||||
uint32_t device_id_;
|
||||
uint32_t device_id_ = 0;
|
||||
std::string batch_size_;
|
||||
std::string image_size_;
|
||||
} AscendDeviceInfo;
|
||||
|
|
|
@ -116,7 +116,7 @@ class MS_API KernelReg {
|
|||
/// \param[in] creator Define a function pointer to create a kernel.
|
||||
KernelReg(const std::string &arch, const std::string &provider, DataType data_type, int op_type,
|
||||
const CreateKernel creator) {
|
||||
RegisterKernel::RegKernel(arch, provider, data_type, op_type, creator);
|
||||
(void)RegisterKernel::RegKernel(arch, provider, data_type, op_type, creator);
|
||||
}
|
||||
|
||||
/// \brief Method to register customized op.
|
||||
|
@ -128,7 +128,7 @@ class MS_API KernelReg {
|
|||
/// \param[in] creator Define a function pointer to create a kernel.
|
||||
KernelReg(const std::string &arch, const std::string &provider, DataType data_type, const std::string &op_type,
|
||||
const CreateKernel creator) {
|
||||
RegisterKernel::RegCustomKernel(arch, provider, data_type, op_type, creator);
|
||||
(void)RegisterKernel::RegCustomKernel(arch, provider, data_type, op_type, creator);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ class MS_API KernelInterfaceReg {
|
|||
/// \param[in] op_type Define the ordinary op type.
|
||||
/// \param[in] creator Define the KernelInterface create function.
|
||||
KernelInterfaceReg(const std::string &provider, int op_type, const KernelInterfaceCreator creator) {
|
||||
RegisterKernelInterface::Reg(provider, op_type, creator);
|
||||
(void)RegisterKernelInterface::Reg(provider, op_type, creator);
|
||||
}
|
||||
|
||||
/// \brief Constructor of KernelInterfaceReg to register custom op.
|
||||
|
@ -92,7 +92,7 @@ class MS_API KernelInterfaceReg {
|
|||
/// \param[in] op_type Define the concrete type of a custom op.
|
||||
/// \param[in] creator Define the KernelInterface create function.
|
||||
KernelInterfaceReg(const std::string &provider, const std::string &op_type, const KernelInterfaceCreator creator) {
|
||||
RegisterKernelInterface::CustomReg(provider, op_type, creator);
|
||||
(void)RegisterKernelInterface::CustomReg(provider, op_type, creator);
|
||||
}
|
||||
|
||||
virtual ~KernelInterfaceReg() = default;
|
||||
|
|
|
@ -17,17 +17,6 @@
|
|||
#ifndef MINDSPORE_LITE_SRC_CXX_API_CALLBACK_CALLBACK_IMPL_H_
|
||||
#define MINDSPORE_LITE_SRC_CXX_API_CALLBACK_CALLBACK_IMPL_H_
|
||||
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "include/api/model.h"
|
||||
#include "include/api/context.h"
|
||||
#include "include/api/cell.h"
|
||||
#include "include/lite_session.h"
|
||||
#include "include/train/train_loop_callback.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <memory>
|
||||
#include "include/api/types.h"
|
||||
#include "include/api/data_type.h"
|
||||
#include "include/lite_types.h"
|
||||
#include "src/runtime/inner_allocator.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "src/delegate/tensorrt/distribution/distribution_base.h"
|
||||
|
@ -109,6 +110,10 @@ void Context::SetThreadAffinity(int mode) {
|
|||
MS_LOG(ERROR) << "Invalid context.";
|
||||
return;
|
||||
}
|
||||
if (mode < lite::NO_BIND || mode > lite::MID_CPU) {
|
||||
MS_LOG(WARNING) << "Invalid thread affinity mode: " << mode << ", change to NO_BIND mode.";
|
||||
mode = lite::NO_BIND;
|
||||
}
|
||||
data_->affinity_mode_ = mode;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ Status ContextUtils::AddCpuDevice(const std::shared_ptr<Allocator> &allocator, i
|
|||
MS_LOG(ERROR) << "Invalid affinity mode, only supports 0:no affinities, 1:big cores first, 2:little cores first.";
|
||||
return kLiteInputParamInvalid;
|
||||
}
|
||||
lite::DeviceInfo device_info = {0};
|
||||
lite::DeviceInfo device_info;
|
||||
device_info.cpu_device_info_ = {enable_fp16, static_cast<lite::CpuBindMode>(affinity_mode)};
|
||||
inner_context->device_list_.push_back({lite::DT_CPU, device_info, provider, provider_device, allocator});
|
||||
return kSuccess;
|
||||
|
@ -46,7 +46,7 @@ Status ContextUtils::AddGpuDevice(bool enable_fp16, uint32_t device_id, int rank
|
|||
bool enable_gl_texture, void *gl_context, void *gl_display,
|
||||
const std::string &provider, const std::string &provider_device,
|
||||
const std::shared_ptr<Allocator> &allocator, lite::InnerContext *inner_context) {
|
||||
lite::DeviceInfo device_info = {0};
|
||||
lite::DeviceInfo device_info;
|
||||
device_info.gpu_device_info_ = {enable_fp16, device_id, rank_id, group_size,
|
||||
enable_gl_texture, gl_context, gl_display};
|
||||
inner_context->device_list_.push_back({lite::DT_GPU, device_info, provider, provider_device, allocator});
|
||||
|
@ -54,14 +54,14 @@ Status ContextUtils::AddGpuDevice(bool enable_fp16, uint32_t device_id, int rank
|
|||
}
|
||||
|
||||
Status ContextUtils::AddNpuDevice(int frequency, lite::InnerContext *inner_context) {
|
||||
lite::DeviceInfo device_info = {0};
|
||||
lite::DeviceInfo device_info;
|
||||
device_info.npu_device_info_ = {frequency};
|
||||
inner_context->device_list_.push_back({lite::DT_NPU, device_info});
|
||||
return kSuccess;
|
||||
}
|
||||
|
||||
Status ContextUtils::AddAscendDevice(lite::InnerContext *inner_context, DeviceInfoContext *device) {
|
||||
lite::DeviceInfo device_info = {0};
|
||||
lite::DeviceInfo device_info;
|
||||
auto ascend_context = device->Cast<AscendDeviceInfo>();
|
||||
device_info.ascend_device_info_ = {ascend_context->GetDeviceID(), ascend_context->GetDynamicBatchSize(),
|
||||
ascend_context->GetDynamicImageSize()};
|
||||
|
|
|
@ -36,7 +36,7 @@ class Graph::GraphData {
|
|||
|
||||
std::shared_ptr<lite::Model> lite_model() { return lite_model_; }
|
||||
|
||||
bool IsTrainModel() { return true; }
|
||||
bool IsTrainModel() const { return true; }
|
||||
|
||||
private:
|
||||
std::shared_ptr<lite::Model> lite_model_ = nullptr;
|
||||
|
|
|
@ -17,18 +17,7 @@
|
|||
#ifndef MINDSPORE_LITE_SRC_CXX_API_METRICS_METRICS_ADAPTER_H_
|
||||
#define MINDSPORE_LITE_SRC_CXX_API_METRICS_METRICS_ADAPTER_H_
|
||||
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "include/api/model.h"
|
||||
#include "include/api/context.h"
|
||||
#include "include/api/cell.h"
|
||||
#include "include/api/metrics/metrics.h"
|
||||
#include "include/lite_session.h"
|
||||
#include "include/train/metrics.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
|
|
@ -34,7 +34,7 @@ Key::Key(const char *dec_key, size_t key_len) {
|
|||
return;
|
||||
}
|
||||
|
||||
memcpy(key, dec_key, key_len);
|
||||
(void)memcpy(key, dec_key, key_len);
|
||||
len = key_len;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ class MSTensor::Impl {
|
|||
#endif
|
||||
|
||||
virtual const std::string &Name() const {
|
||||
static std::string empty = "";
|
||||
static const std::string empty = "";
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return empty;
|
||||
|
@ -111,7 +111,7 @@ class MSTensor::Impl {
|
|||
}
|
||||
|
||||
virtual const std::vector<int64_t> &Shape() const {
|
||||
static std::vector<int64_t> empty;
|
||||
static std::vector<int64_t> empty{};
|
||||
if (lite_tensor_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Invalid tensor.";
|
||||
return empty;
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
*/
|
||||
|
||||
#include "include/api/types.h"
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include "include/api/status.h"
|
||||
#include "include/api/dual_abi_helper.h"
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CALL_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/runtime/kernel/arm/base/carry_data.h"
|
||||
#include "src/tensor.h"
|
||||
#include "src/inner_kernel.h"
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
#include "src/tensorlist.h"
|
||||
#endif
|
||||
|
|
|
@ -1,139 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/base/carry_data.h"
|
||||
#include "include/errorcode.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NOT_SUPPORT;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int CarryDataKernel::MoveData(const std::vector<lite::Tensor *>::iterator &dst_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &dst_end,
|
||||
const std::vector<lite::Tensor *>::iterator &src_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &src_limit) {
|
||||
for (auto dst_iter = dst_begin, src_iter = src_begin; dst_iter != dst_end; dst_iter++, src_iter++) {
|
||||
if (src_iter == src_limit) {
|
||||
MS_LOG(ERROR) << "out of range of input tensor";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto *dst_tensor = *dst_iter;
|
||||
auto *src_tensor = *src_iter;
|
||||
if (dst_tensor == nullptr || src_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "input tensor or output tensor of merge is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
lite::STATUS ret = RET_OK;
|
||||
if (src_tensor->IsConst() || src_tensor->IsGraphInput()) {
|
||||
MS_LOG(DEBUG) << "Carry const data and graph inputs.";
|
||||
dst_tensor->set_data(src_tensor->data());
|
||||
dst_tensor->set_own_data(false);
|
||||
} else {
|
||||
if (src_tensor->data_type() == kObjectTypeTensorType && dst_tensor->data_type() == kObjectTypeTensorType) {
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
MS_LOG(DEBUG) << "Carry MoveTensorListData";
|
||||
ret = MoveTensorListData(reinterpret_cast<lite::TensorList *>(dst_tensor),
|
||||
reinterpret_cast<lite::TensorList *>(src_tensor));
|
||||
#else
|
||||
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
|
||||
return RET_NOT_SUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
MS_LOG(DEBUG) << "Carry MoveTensorData";
|
||||
ret = MoveTensorData(dst_tensor, src_tensor);
|
||||
}
|
||||
}
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Move data failed : " << ret;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CarryDataKernel::MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_tensor) {
|
||||
if (dst_tensor->data_type() != src_tensor->data_type() || dst_tensor->format() != src_tensor->format() ||
|
||||
!(dst_tensor->shape() == src_tensor->shape() || (dst_tensor->shape().empty() && src_tensor->shape().empty()))) {
|
||||
MS_LOG(ERROR) << "input tensor and output tensor is incompatible.";
|
||||
MS_LOG(ERROR) << "input tensor data_type: " << src_tensor->data_type() << " vs "
|
||||
<< "output tensor data_type: " << dst_tensor->data_type()
|
||||
<< "input tensor format: " << src_tensor->format() << " vs "
|
||||
<< "output tensor format: " << dst_tensor->format() << " input tensor shape: " << src_tensor->shape()
|
||||
<< " vs "
|
||||
<< "output tensor shape: " << dst_tensor->shape();
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (src_tensor->allocator() == nullptr) {
|
||||
MS_LOG(ERROR) << "src_tensor allocator is nullptr.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
CHECK_NULL_RETURN(src_tensor->data());
|
||||
CHECK_NULL_RETURN(dst_tensor->data());
|
||||
// need replace with increase data ref count
|
||||
MS_CHECK_FALSE(src_tensor->Size() == 0, RET_ERROR);
|
||||
memcpy(dst_tensor->data(), src_tensor->data(), src_tensor->Size());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
int CarryDataKernel::MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist) {
|
||||
// shape may change, because tensors.size() can be change in RunGraph
|
||||
if (dst_tensorlist->data_type() != src_tensorlist->data_type() ||
|
||||
dst_tensorlist->format() != src_tensorlist->format()) {
|
||||
MS_LOG(ERROR) << "input tensorlist and output tensorlist data_type or format is incompatible";
|
||||
MS_LOG(ERROR) << "input tensor data_type: " << src_tensorlist->data_type() << " vs "
|
||||
<< "output tensor data_type: " << dst_tensorlist->data_type()
|
||||
<< "input tensor format: " << src_tensorlist->format() << " vs "
|
||||
<< "output tensor format: " << dst_tensorlist->format();
|
||||
return RET_ERROR;
|
||||
}
|
||||
// when tensorlist malloc is done. this need to check element_shape compatibility
|
||||
dst_tensorlist->set_element_shape(src_tensorlist->element_shape());
|
||||
|
||||
auto update_data_type = kTypeUnknown;
|
||||
auto dst_tensor_data_type = dst_tensorlist->tensors_data_type();
|
||||
auto src_tensor_data_type = src_tensorlist->tensors_data_type();
|
||||
if (dst_tensor_data_type != src_tensor_data_type) {
|
||||
if (src_tensor_data_type != kTypeUnknown && dst_tensor_data_type != kTypeUnknown) {
|
||||
MS_LOG(ERROR) << "input tensorlist and output tensorlist is incompatible";
|
||||
return RET_ERROR;
|
||||
}
|
||||
update_data_type = dst_tensor_data_type != kTypeUnknown ? dst_tensor_data_type : src_tensor_data_type;
|
||||
}
|
||||
if (update_data_type != kTypeUnknown) {
|
||||
src_tensorlist->set_tensors_data_type(update_data_type);
|
||||
dst_tensorlist->set_tensors_data_type(update_data_type);
|
||||
}
|
||||
size_t src_tensorlist_tensors_size = src_tensorlist->tensors().size();
|
||||
for (size_t i = 0; i < src_tensorlist_tensors_size; ++i) {
|
||||
auto &src_tensor = src_tensorlist->tensors()[i];
|
||||
auto &dst_tensor = dst_tensorlist->tensors()[i];
|
||||
|
||||
if (src_tensor->allocator() != nullptr) {
|
||||
src_tensor->allocator()->IncRefCount(src_tensor->data(), dst_tensor->ref_count());
|
||||
}
|
||||
dst_tensor->set_own_data(src_tensor->own_data());
|
||||
if (src_tensor->data() != nullptr) {
|
||||
dst_tensor->set_data(src_tensor->data());
|
||||
}
|
||||
dst_tensor->set_shape(src_tensor->shape());
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
#endif
|
||||
} // namespace mindspore::kernel
|
|
@ -1,46 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CARRY_DATA_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CARRY_DATA_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/inner_kernel.h"
|
||||
#include "src/tensor.h"
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
#include "src/tensorlist.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class CarryDataKernel : public InnerKernel {
|
||||
public:
|
||||
CarryDataKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
|
||||
: InnerKernel(parameter, inputs, outputs, ctx) {}
|
||||
~CarryDataKernel() override = default;
|
||||
|
||||
protected:
|
||||
int MoveData(const std::vector<lite::Tensor *>::iterator &dst_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &dst_end,
|
||||
const std::vector<lite::Tensor *>::iterator &src_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &src_limit);
|
||||
int MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_tensor);
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
int MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist);
|
||||
#endif
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CARRY_DATA_H_
|
|
@ -74,7 +74,7 @@ class ConvolutionBaseCPUKernel : public InnerKernel {
|
|||
|
||||
virtual int MallocWeightBiasData() { return RET_OK; }
|
||||
virtual void PackWeight() {}
|
||||
bool IsRepack() { return is_repack_; }
|
||||
bool IsRepack() const { return is_repack_; }
|
||||
std::unordered_map<uintptr_t, void *> addr_map;
|
||||
void *packed_weight_ = nullptr;
|
||||
void *bias_data_ = nullptr;
|
||||
|
|
|
@ -55,7 +55,7 @@ int GroupConvolutionBaseCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
void GroupConvolutionBaseCPUKernel::FreeSubKernel() {
|
||||
GroupConvolutionBaseCPUKernel::~GroupConvolutionBaseCPUKernel() {
|
||||
for (auto &sub_conv : group_convs_) {
|
||||
// free sub conv input tensors / output tensors manually
|
||||
auto sub_in_tensors = sub_conv->in_tensors();
|
||||
|
@ -102,7 +102,6 @@ int GroupConvolutionBaseCPUKernel::PreProcess() {
|
|||
sub_kernel_in_tensor->set_shape(in_shape);
|
||||
ret = sub_kernel_in_tensor->MallocData();
|
||||
if (ret != RET_OK) {
|
||||
FreeSubKernel();
|
||||
MS_LOG(ERROR) << "sub kernel in tensor malloc data failed.";
|
||||
return ret;
|
||||
}
|
||||
|
@ -116,7 +115,6 @@ int GroupConvolutionBaseCPUKernel::PreProcess() {
|
|||
tensor->set_shape(out_shape);
|
||||
ret = tensor->MallocData();
|
||||
if (ret != RET_OK) {
|
||||
FreeSubKernel();
|
||||
MS_LOG(ERROR) << "sub kernel out tensor malloc data failed.";
|
||||
return ret;
|
||||
}
|
||||
|
@ -134,7 +132,6 @@ int GroupConvolutionBaseCPUKernel::PreProcess() {
|
|||
CHECK_NULL_RETURN(output);
|
||||
auto ret = output->MallocData();
|
||||
if (ret != RET_OK) {
|
||||
FreeSubKernel();
|
||||
MS_LOG(ERROR) << "group conv out tensor malloc data failed.";
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ class GroupConvolutionBaseCPUKernel : public ConvolutionBaseCPUKernel {
|
|||
group_conv_creator_(group_conv_creator),
|
||||
group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if
|
||||
// you want to get real params, multiply in channel / out channel with group num
|
||||
~GroupConvolutionBaseCPUKernel() override { FreeSubKernel(); }
|
||||
~GroupConvolutionBaseCPUKernel() override;
|
||||
|
||||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
|
|
|
@ -81,7 +81,8 @@ lite::Tensor *CreateConstTensor(const lite::Tensor *tensor, const std::vector<in
|
|||
MS_LOG(ERROR) << "Tensor data size should not be 0.";
|
||||
return nullptr;
|
||||
}
|
||||
uint8_t *new_tensor_data = reinterpret_cast<uint8_t *>(tensor->data()) + index * new_tensor->Size();
|
||||
void *new_tensor_data =
|
||||
reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(tensor->data()) + index * new_tensor->Size());
|
||||
memcpy(new_tensor->data(), reinterpret_cast<void *>(new_tensor_data), new_tensor->Size());
|
||||
return new_tensor;
|
||||
}
|
||||
|
@ -141,7 +142,7 @@ int GroupConvCreator::NewInputTensor(std::vector<lite::Tensor *> *tensors) {
|
|||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
int GroupConvCreator::NewOutputTensor(std::vector<lite::Tensor *> *tensors, lite::Tensor *output) {
|
||||
int GroupConvCreator::NewOutputTensor(std::vector<lite::Tensor *> *tensors, const lite::Tensor *output) const {
|
||||
auto out_tensor = CreateVarTensor({output_shape_, output->format(), data_type_, output->category(), false}, infered_);
|
||||
if (out_tensor == nullptr) {
|
||||
return lite::RET_ERROR;
|
||||
|
|
|
@ -35,12 +35,11 @@ struct TensorInfo {
|
|||
class GroupConvCreator {
|
||||
public:
|
||||
GroupConvCreator(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs, OpParameter *op_parameter,
|
||||
const lite::InnerContext *ctx, bool is_quant, TypeId data_type)
|
||||
bool is_quant, TypeId data_type)
|
||||
: origin_inputs_(std::move(inputs)),
|
||||
origin_outputs_(std::move(outputs)),
|
||||
is_quant_(is_quant),
|
||||
data_type_(data_type),
|
||||
ctx_(ctx) {
|
||||
data_type_(data_type) {
|
||||
auto shape = origin_outputs_.front()->shape();
|
||||
infered_ = std::find(shape.begin(), shape.end(), -1) == shape.end();
|
||||
conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter);
|
||||
|
@ -48,10 +47,8 @@ class GroupConvCreator {
|
|||
|
||||
~GroupConvCreator() = default;
|
||||
|
||||
public:
|
||||
void SetShapeOfTensors();
|
||||
int CreateConvs(std::vector<kernel::InnerKernel *> *group_convs);
|
||||
std::vector<kernel::InnerKernel *> *get_group_conv() { return &group_convs_; }
|
||||
void CopyQuantParam(const std::vector<lite::Tensor *> *tensors);
|
||||
int GetSingleConvParam(ConvParameter *conv_param, std::vector<lite::Tensor *> *new_inputs,
|
||||
std::vector<lite::Tensor *> *new_outputs, int group_id);
|
||||
|
@ -64,7 +61,7 @@ class GroupConvCreator {
|
|||
void FreeGroupConvs();
|
||||
int NewInputTensor(std::vector<lite::Tensor *> *tensors);
|
||||
int NewConstTensor(std::vector<lite::Tensor *> *tensors, int group_id);
|
||||
int NewOutputTensor(std::vector<lite::Tensor *> *tensors, lite::Tensor *output);
|
||||
int NewOutputTensor(std::vector<lite::Tensor *> *tensors, const lite::Tensor *output) const;
|
||||
|
||||
private:
|
||||
std::vector<lite::Tensor *> origin_inputs_;
|
||||
|
@ -78,7 +75,6 @@ class GroupConvCreator {
|
|||
bool infered_ = false;
|
||||
bool is_quant_ = false;
|
||||
TypeId data_type_;
|
||||
const lite::InnerContext *ctx_ = nullptr;
|
||||
};
|
||||
|
||||
ConvParameter *CreateNewConvParameter(const ConvParameter *parameter);
|
||||
|
|
|
@ -87,7 +87,6 @@ int OneHotCPUKernel::ReSize() {
|
|||
}
|
||||
|
||||
int RunOneHot(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto onehot_kernel = reinterpret_cast<OneHotCPUKernel *>(cdata);
|
||||
if (onehot_kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "cast OneHotCPUKernel failed";
|
||||
|
|
|
@ -41,7 +41,6 @@ class OneHotCPUKernel : public InnerKernel {
|
|||
int InitOnOffValueForThreeInputs();
|
||||
int InitOnOffValueForFourInputs();
|
||||
|
||||
private:
|
||||
int thread_num_ = 1;
|
||||
int axis_ = 0;
|
||||
int outer_size_ = 0;
|
||||
|
|
|
@ -33,8 +33,6 @@ class PartialFusionKernel : public InnerKernel {
|
|||
int Run() override;
|
||||
void set_subgraph_kernels(const std::vector<LiteKernel *> &subgraph_kernels) { subgraph_kernels_ = subgraph_kernels; }
|
||||
std::vector<LiteKernel *> subgraph_kernels() const { return subgraph_kernels_; }
|
||||
|
||||
private:
|
||||
// One partial corresponds to a subgraph at offline stage, after graph schedule, a subgraph may be split into many
|
||||
// graphs, so use a vector.
|
||||
std::vector<LiteKernel *> subgraph_kernels_{};
|
||||
|
|
|
@ -30,7 +30,7 @@ class PoolingBaseCPUKernel : public InnerKernel {
|
|||
public:
|
||||
PoolingBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx)
|
||||
: InnerKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) {
|
||||
: InnerKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {
|
||||
pooling_param_ = reinterpret_cast<PoolingParameter *>(op_parameter_);
|
||||
}
|
||||
~PoolingBaseCPUKernel() = default;
|
||||
|
@ -42,7 +42,6 @@ class PoolingBaseCPUKernel : public InnerKernel {
|
|||
void FreeQuantParam();
|
||||
|
||||
protected:
|
||||
const InnerContext *ctx_;
|
||||
int thread_count_;
|
||||
PoolingParameter *pooling_param_ = nullptr;
|
||||
QuantArg **pooling_quant_arg_ = nullptr;
|
||||
|
|
|
@ -159,12 +159,11 @@ int PriorBoxCPUKernel::GeneratePriorBox() {
|
|||
|
||||
int PriorBoxCPUKernel::PriorBoxImpl(int task_id) {
|
||||
auto src = output_.data();
|
||||
MS_CHECK_TRUE_RET(src != nullptr, RET_NULL_PTR);
|
||||
CHECK_NULL_RETURN(src);
|
||||
auto output = out_tensors_.at(0);
|
||||
CHECK_NULL_RETURN(output);
|
||||
MS_CHECK_TRUE_RET(output != nullptr, RET_NULL_PTR);
|
||||
auto output_data = reinterpret_cast<float *>(output->data());
|
||||
MS_CHECK_TRUE_RET(output_data != nullptr, RET_NULL_PTR);
|
||||
CHECK_NULL_RETURN(output_data);
|
||||
auto ret = PriorBox(src, output_data, output_.size(), task_id, thread_count_);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ int ReduceBaseCPUKernel::Prepare() {
|
|||
MS_CHECK_FALSE_MSG((axes_tensor->data_type() != kNumberTypeInt && axes_tensor->data_type() != kNumberTypeInt32),
|
||||
RET_ERROR, "The data type of axes tensor should be int32");
|
||||
num_axes_ = axes_tensor->ElementsNum();
|
||||
if (num_axes_ <= 0 && num_axes_ > MAX_SHAPE_SIZE) {
|
||||
if (num_axes_ <= 0 || num_axes_ > MAX_SHAPE_SIZE) {
|
||||
MS_LOG(ERROR) << "input axes invalid.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/inner_kernel.h"
|
||||
|
||||
#include "nnacl/reduce_parameter.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "src/inner_kernel.h"
|
||||
#include "include/context.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/kernel/arm/base/carry_data.h"
|
||||
|
||||
using mindspore::lite::InnerContext;
|
||||
namespace mindspore::kernel {
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NOT_SUPPORT;
|
||||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_Select;
|
||||
|
@ -34,6 +35,121 @@ int SelectCPUKernel::Prepare() { return RET_OK; }
|
|||
|
||||
int SelectCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int MoveTensorData(lite::Tensor *dst_tensor, const lite::Tensor *src_tensor) {
|
||||
if (dst_tensor->data_type() != src_tensor->data_type() || dst_tensor->format() != src_tensor->format() ||
|
||||
!(dst_tensor->shape() == src_tensor->shape() || (dst_tensor->shape().empty() && src_tensor->shape().empty()))) {
|
||||
MS_LOG(ERROR) << "input tensor and output tensor is incompatible.";
|
||||
MS_LOG(ERROR) << "input tensor data_type: " << src_tensor->data_type() << " vs "
|
||||
<< "output tensor data_type: " << dst_tensor->data_type()
|
||||
<< "input tensor format: " << src_tensor->format() << " vs "
|
||||
<< "output tensor format: " << dst_tensor->format() << " input tensor shape: " << src_tensor->shape()
|
||||
<< " vs "
|
||||
<< "output tensor shape: " << dst_tensor->shape();
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (src_tensor->allocator() == nullptr) {
|
||||
MS_LOG(ERROR) << "src_tensor allocator is nullptr.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
CHECK_NULL_RETURN(src_tensor->data());
|
||||
CHECK_NULL_RETURN(dst_tensor->data());
|
||||
// need replace with increase data ref count
|
||||
MS_CHECK_FALSE(src_tensor->Size() == 0, RET_ERROR);
|
||||
(void)memcpy(dst_tensor->data(), src_tensor->data(), src_tensor->Size());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
int MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist) {
|
||||
// shape may change, because tensors.size() can be change in RunGraph
|
||||
if (dst_tensorlist->data_type() != src_tensorlist->data_type() ||
|
||||
dst_tensorlist->format() != src_tensorlist->format()) {
|
||||
MS_LOG(ERROR) << "input tensorlist and output tensorlist data_type or format is incompatible";
|
||||
MS_LOG(ERROR) << "input tensor data_type: " << src_tensorlist->data_type() << " vs "
|
||||
<< "output tensor data_type: " << dst_tensorlist->data_type()
|
||||
<< "input tensor format: " << src_tensorlist->format() << " vs "
|
||||
<< "output tensor format: " << dst_tensorlist->format();
|
||||
return RET_ERROR;
|
||||
}
|
||||
// when tensorlist malloc is done. this need to check element_shape compatibility
|
||||
dst_tensorlist->set_element_shape(src_tensorlist->element_shape());
|
||||
|
||||
auto update_data_type = kTypeUnknown;
|
||||
auto dst_tensor_data_type = dst_tensorlist->tensors_data_type();
|
||||
auto src_tensor_data_type = src_tensorlist->tensors_data_type();
|
||||
if (dst_tensor_data_type != src_tensor_data_type) {
|
||||
if (src_tensor_data_type != kTypeUnknown && dst_tensor_data_type != kTypeUnknown) {
|
||||
MS_LOG(ERROR) << "input tensorlist and output tensorlist is incompatible";
|
||||
return RET_ERROR;
|
||||
}
|
||||
update_data_type = dst_tensor_data_type != kTypeUnknown ? dst_tensor_data_type : src_tensor_data_type;
|
||||
}
|
||||
if (update_data_type != kTypeUnknown) {
|
||||
src_tensorlist->set_tensors_data_type(update_data_type);
|
||||
dst_tensorlist->set_tensors_data_type(update_data_type);
|
||||
}
|
||||
size_t src_tensorlist_tensors_size = src_tensorlist->tensors().size();
|
||||
for (size_t i = 0; i < src_tensorlist_tensors_size; ++i) {
|
||||
auto &src_tensor = src_tensorlist->tensors()[i];
|
||||
auto &dst_tensor = dst_tensorlist->tensors()[i];
|
||||
|
||||
if (src_tensor->allocator() != nullptr) {
|
||||
src_tensor->allocator()->IncRefCount(src_tensor->data(), dst_tensor->ref_count());
|
||||
}
|
||||
dst_tensor->set_own_data(src_tensor->own_data());
|
||||
if (src_tensor->data() != nullptr) {
|
||||
dst_tensor->set_data(src_tensor->data());
|
||||
}
|
||||
dst_tensor->set_shape(src_tensor->shape());
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
int MoveData(const std::vector<lite::Tensor *>::iterator &dst_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &dst_end,
|
||||
const std::vector<lite::Tensor *>::iterator &src_begin,
|
||||
const std::vector<lite::Tensor *>::iterator &src_limit) {
|
||||
for (auto dst_iter = dst_begin, src_iter = src_begin; dst_iter != dst_end; dst_iter++, src_iter++) {
|
||||
if (src_iter == src_limit) {
|
||||
MS_LOG(ERROR) << "out of range of input tensor";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto *dst_tensor = *dst_iter;
|
||||
auto *src_tensor = *src_iter;
|
||||
if (dst_tensor == nullptr || src_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "input tensor or output tensor of merge is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
lite::STATUS ret = RET_OK;
|
||||
if (src_tensor->IsConst() || src_tensor->IsGraphInput()) {
|
||||
MS_LOG(DEBUG) << "Carry const data and graph inputs.";
|
||||
dst_tensor->set_data(src_tensor->data());
|
||||
dst_tensor->set_own_data(false);
|
||||
} else {
|
||||
if (src_tensor->data_type() == kObjectTypeTensorType && dst_tensor->data_type() == kObjectTypeTensorType) {
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
MS_LOG(DEBUG) << "Carry MoveTensorListData";
|
||||
ret = MoveTensorListData(reinterpret_cast<lite::TensorList *>(dst_tensor),
|
||||
reinterpret_cast<lite::TensorList *>(src_tensor));
|
||||
#else
|
||||
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
|
||||
return RET_NOT_SUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
MS_LOG(DEBUG) << "Carry MoveTensorData";
|
||||
ret = MoveTensorData(dst_tensor, src_tensor);
|
||||
}
|
||||
}
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Move data failed : " << ret;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
// inputs: bool*1 true-data*n false-data*n
|
||||
// output: data*n
|
||||
int SelectCPUKernel::Run() {
|
||||
|
|
|
@ -17,18 +17,17 @@
|
|||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SELECT_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/runtime/kernel/arm/base/carry_data.h"
|
||||
#include "src/inner_kernel.h"
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
#include "src/tensorlist.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class SelectCPUKernel : public CarryDataKernel {
|
||||
class SelectCPUKernel : public InnerKernel {
|
||||
public:
|
||||
SelectCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
|
||||
: CarryDataKernel(parameter, inputs, outputs, ctx) {}
|
||||
: InnerKernel(parameter, inputs, outputs, ctx) {}
|
||||
~SelectCPUKernel() override = default;
|
||||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
|
|
|
@ -33,8 +33,6 @@ class SliceCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
public:
|
||||
int SliceParallelRun(int thread_id);
|
||||
|
||||
protected:
|
||||
|
|
|
@ -26,7 +26,7 @@ class SoftmaxBaseCPUKernel : public InnerKernel {
|
|||
public:
|
||||
SoftmaxBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
|
||||
: InnerKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) {
|
||||
: InnerKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {
|
||||
softmax_param_ = reinterpret_cast<SoftmaxParameter *>(op_parameter_);
|
||||
}
|
||||
~SoftmaxBaseCPUKernel() = default;
|
||||
|
@ -36,7 +36,6 @@ class SoftmaxBaseCPUKernel : public InnerKernel {
|
|||
int Run() override { return 0; }
|
||||
|
||||
protected:
|
||||
const lite::InnerContext *ctx_;
|
||||
int thread_count_;
|
||||
SoftmaxParameter *softmax_param_;
|
||||
};
|
||||
|
|
|
@ -41,8 +41,6 @@ class SplitBaseCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
public:
|
||||
int Split(int task_id);
|
||||
static int CheckAndInitSplitParam(const lite::Tensor &in_tensor, SplitParameter *param);
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ using mindspore::lite::RET_ERROR;
|
|||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_SplitWithOverlap;
|
||||
#define MIN_NUM_SPLIT 2
|
||||
|
||||
namespace mindspore::kernel {
|
||||
const int MIN_NUM_SPLIT = 2;
|
||||
|
||||
int SplitWithOverlapBaseCPUKernel::CalculateSplitedShapes(const std::vector<int> &shape) {
|
||||
int total_block_count = 0;
|
||||
|
|
|
@ -41,7 +41,6 @@ class SplitWithOverlapBaseCPUKernel : public InnerKernel {
|
|||
private:
|
||||
int CalculateSplitedShapes(const std::vector<int> &shape);
|
||||
|
||||
private:
|
||||
// range: [start, end)
|
||||
std::vector<int> start_indices_;
|
||||
std::vector<int> end_indices_;
|
||||
|
|
|
@ -68,7 +68,6 @@ int StackBaseCPUKernel::ReSize() {
|
|||
copy_size_ = GetCopyNum(input0_shape, axis_, input0_shape.size()) * data_type_size_;
|
||||
outer_size_ = GetOuterSize(input0_shape, axis_);
|
||||
}
|
||||
MS_CHECK_GT(copy_size_, 0, RET_ERROR);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
@ -83,7 +82,7 @@ int StackBaseCPUKernel::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int StackBaseCPUKernel::Execute(int task_id) {
|
||||
int StackBaseCPUKernel::StackExecute(int task_id) {
|
||||
auto output_data = reinterpret_cast<void *>(out_tensors_.at(0)->data());
|
||||
if (output_data == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
|
@ -103,7 +102,7 @@ int StackBaseCPUKernel::Execute(int task_id) {
|
|||
static int StackRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto stack = reinterpret_cast<StackBaseCPUKernel *>(cdata);
|
||||
if (stack->Execute(task_id) != RET_OK) {
|
||||
if (stack->StackExecute(task_id) != RET_OK) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -34,7 +34,7 @@ class StackBaseCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int StackExecute(int task_id);
|
||||
|
||||
protected:
|
||||
StackParameter *stack_param_ = nullptr;
|
||||
|
|
|
@ -202,7 +202,7 @@ kernel::InnerKernel *CpuGroupConvFp16KernelCreator(const std::vector<lite::Tenso
|
|||
const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *op_parameter, const InnerContext *ctx) {
|
||||
auto *group_conv_creator =
|
||||
new (std::nothrow) GroupConvCreator(inputs, outputs, op_parameter, ctx, false, kNumberTypeFloat16);
|
||||
new (std::nothrow) GroupConvCreator(inputs, outputs, op_parameter, false, kNumberTypeFloat16);
|
||||
if (group_conv_creator == nullptr) {
|
||||
MS_LOG(ERROR) << "new GroupConvCreator fail";
|
||||
free(op_parameter);
|
||||
|
|
|
@ -276,7 +276,7 @@ kernel::InnerKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *
|
|||
kernel::InnerKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *op_parameter, const lite::InnerContext *ctx) {
|
||||
auto *group_conv_creator = new GroupConvCreator(inputs, outputs, op_parameter, ctx, false, kNumberTypeFloat32);
|
||||
auto *group_conv_creator = new GroupConvCreator(inputs, outputs, op_parameter, false, kNumberTypeFloat32);
|
||||
auto group_kernel = new (std::nothrow) GroupConvolutionFp32CPUKernel(
|
||||
op_parameter, inputs, outputs, ctx, group_conv_creator, reinterpret_cast<ConvParameter *>(op_parameter)->group_);
|
||||
if (group_kernel == nullptr) {
|
||||
|
|
|
@ -108,7 +108,7 @@ kernel::InnerKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tenso
|
|||
<< conv_param->input_channel_;
|
||||
return nullptr;
|
||||
}
|
||||
auto *group_conv_creator = new GroupConvCreator(inputs, outputs, op_parameter, ctx, true, kNumberTypeInt8);
|
||||
auto *group_conv_creator = new GroupConvCreator(inputs, outputs, op_parameter, true, kNumberTypeInt8);
|
||||
return new (std::nothrow)
|
||||
GroupConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, group_conv_creator, group);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue