!21797 [MS][LITE]Rename control to controlflow

Merge pull request !21797 from gongdaguo/package_clip8
This commit is contained in:
i-robot 2021-08-16 07:22:51 +00:00 committed by Gitee
commit a34d737858
24 changed files with 72 additions and 70 deletions

View File

@ -45,7 +45,7 @@ if(MSLITE_STRING_KERNEL)
${KERNEL_SRC_INFER_STRING}
)
endif()
if(MSLITE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
file(GLOB KERNEL_SRC_INFER_CONTROL_TENSORLIST
${NNACL_DIR}/infer/control/*.c
)

View File

@ -18,7 +18,7 @@
#include <string.h>
#include "nnacl/infer/infer_register.h"
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, const vvector *tensor_shape) {
// This function will create a new tensors_
// Your must to set shape(param2: tensor_shape) and data_type_(tensors_data_type_ = param1: dtype) of each tensor in
@ -418,7 +418,7 @@ bool InferFlag(const TensorC *const *inputs, size_t inputs_size) {
if (inputs[i] == NULL) {
return false;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (inputs[i]->data_type_ == kObjectTypeTensorType) {
TensorListC *input_tensor_list = (TensorListC *)inputs[i];
if (input_tensor_list->shape_value_ == -1) {
@ -431,7 +431,7 @@ bool InferFlag(const TensorC *const *inputs, size_t inputs_size) {
return false;
}
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
}
#endif
}

View File

@ -138,7 +138,7 @@ typedef struct vvector {
size_t size_; // number of shapes
} vvector;
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
typedef struct TensorListC {
bool is_ready_;
int data_type_;
@ -160,7 +160,7 @@ typedef struct VectorC {
size_t per_malloc_size_;
} VectorC;
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, const vvector *tensor_shape);
int TensorListMergeShape(int *element_shape, size_t *element_shape_size, const int *tmp, size_t tmp_size);
bool TensorListIsFullyDefined(const int *shape, size_t shape_size);

View File

@ -34,7 +34,7 @@ int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
TensorC *output = outputs[i];
SetDataTypeFormat(output, input);
if (input->data_type_ == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
TensorListC *input_tensorlist = (TensorListC *)(input);
TensorListC *output_tensorlist = (TensorListC *)(output);
output_tensorlist->element_shape_size_ = input_tensorlist->element_shape_size_;

View File

@ -35,7 +35,7 @@ option(MSLITE_ENABLE_RUNTIME_PASS "enable runtime pass" on)
option(MSLITE_COMPILE_NNIE "compile NNIE" off)
option(MSLITE_ENABLE_HIGH_PERFORMANCE "enable high performance" on)
option(MSLITE_STRING_KERNEL "enable string kernel" on)
option(MSLITE_CONTROL_TENSORLIST "enable control and tensorlist" on)
option(MSLITE_CONTROLFLOW_TENSORLIST "enable control and tensorlist" on)
option(MSLITE_AUTO_PARALLEL "enable automatic parallelism" on)
option(MSLITE_WEIGHT_DECODE "enable weight decode" on)
option(MSLITE_CUSTOM_KERNEL_REGISTRY "enable extend kernel registry" on)
@ -96,8 +96,8 @@ endif()
if(DEFINED ENV{MSLITE_STRING_KERNEL})
set(MSLITE_STRING_KERNEL $ENV{MSLITE_STRING_KERNEL})
endif()
if(DEFINED ENV{MSLITE_CONTROL_TENSORLIST})
set(MSLITE_CONTROL_TENSORLIST $ENV{MSLITE_CONTROL_TENSORLIST})
if(DEFINED ENV{MSLITE_CONTROLFLOW_TENSORLIST})
set(MSLITE_CONTROLFLOW_TENSORLIST $ENV{MSLITE_CONTROLFLOW_TENSORLIST})
endif()
if(DEFINED ENV{MSLITE_AUTO_PARALLEL})
set(MSLITE_AUTO_PARALLEL $ENV{MSLITE_AUTO_PARALLEL})
@ -120,8 +120,8 @@ set(ENABLE_MINDRT $ENV{MSLITE_MINDRT_USE})
if(MSLITE_STRING_KERNEL)
add_compile_definitions(ENABLE_STRING_KERNEL)
endif()
if(MSLITE_CONTROL_TENSORLIST)
add_compile_definitions(ENABLE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
add_compile_definitions(ENABLE_CONTROLFLOW_TENSORLIST)
endif()
if(MSLITE_AUTO_PARALLEL)
add_compile_definitions(ENABLE_AUTO_PARALLEL)
@ -215,7 +215,7 @@ message(STATUS "\tMSLITE_ENABLE_TESTCASES = \t${MSLITE_ENABLE_TESTCASES}")
message(STATUS "\tMSLITE_ENABLE_HIGH_PERFORMANCE = \t${MSLITE_ENABLE_HIGH_PERFORMANCE}")
message(STATUS "\tMSLITE_ENABLE_RUNTIME_PASS = \t${MSLITE_ENABLE_RUNTIME_PASS}")
message(STATUS "\tMSLITE_STRING_KERNEL = \t${MSLITE_STRING_KERNEL}")
message(STATUS "\tMSLITE_CONTROL_TENSORLIST = \t${MSLITE_CONTROL_TENSORLIST}")
message(STATUS "\tMSLITE_CONTROLFLOW_TENSORLIST = \t${MSLITE_CONTROLFLOW_TENSORLIST}")
message(STATUS "\tMSLITE_AUTO_PARALLEL = \t${MSLITE_AUTO_PARALLEL}")
message(STATUS "\tMSLITE_WEIGHT_DECODE = \t${MSLITE_WEIGHT_DECODE}")
message(STATUS "\tMSLITE_CUSTOM_KERNEL_REGISTRY = \t${MSLITE_CUSTOM_KERNEL_REGISTRY}")

View File

@ -90,7 +90,7 @@ set(LITE_SRC
${CMAKE_CURRENT_SOURCE_DIR}/cpu_info.cc
)
if(MSLITE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
set(LITE_SRC
${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/tensorlist.cc

View File

@ -20,20 +20,20 @@ namespace mindspore {
const char *const unsupport_string_tensor_log =
"This mindspore-lite library does not support string tensors. Set environment variable MSLITE_STRING_KERNEL to on to "
"recompile it.";
const char *const unsupport_control_tensorlist_log =
"This mindspore-lite library does not support control and tensorlist op. Set environment variable "
"MSLITE_CONTROL_TENSORLIST to on to recompile it.";
const char *const unsupport_controlflow_tensorlist_log =
"This mindspore-lite library does not support controlflow and tensorlist op. Set environment variable "
"MSLITE_CONTROLFLOW_TENSORLIST to on to recompile it.";
const char *const unsupport_auto_parallel_log =
"The mindspore-lite library does not support auto parallel. Set environment variable MSLITE_AUTO_PARALLEL to on to "
"recompile it.";
const char *const unsupport_weight_decode_log =
"The mindspore-lite library does not support weight decode. Set environment variable MSLITE_WEIGHT_DECODE to on to "
"recompile it.";
const char *const unsuppor_custom_kernel_register_log =
const char *const unsupport_custom_kernel_register_log =
"The mindspore-lite library does not support custom kernel register. Set environment variable "
"MSLITE_CUSTOM_KERNEL_REGISTRY to on to "
"recompile it.";
const char *const unsuppor_delegate_log =
const char *const unsupport_delegate_log =
"The mindspore-lite library does not support delegate. Set environment variable "
"MSLITE_DELEGATE_USE to on to "
"recompile it.";

View File

@ -44,7 +44,7 @@ void FreeAllTensorC(std::vector<TensorC *> *tensors_in) {
if (i == nullptr) {
continue;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (i->data_type_ == kObjectTypeTensorType) {
TensorListC *tensorListC = reinterpret_cast<TensorListC *>(i);
FreeTensorListC(tensorListC);
@ -53,7 +53,7 @@ void FreeAllTensorC(std::vector<TensorC *> *tensors_in) {
#endif
free(i);
i = nullptr;
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
}
#endif
}
@ -84,7 +84,7 @@ void TensorC2Tensor(const TensorC *src, Tensor *dst) {
dst->set_shape(std::vector<int>(src->shape_, src->shape_ + src->shape_size_));
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
void FreeTensorListC(TensorListC *tensorlist_c) {
MS_ASSERT(tensorlist_c != nullptr);
if (tensorlist_c->tensors_ != nullptr) {
@ -169,7 +169,7 @@ int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lit
if (parameter->type_ == mindspore::schema::PrimitiveType_TensorListFromTensor ||
parameter->type_ == mindspore::schema::PrimitiveType_TensorListReserve ||
parameter->type_ == mindspore::schema::PrimitiveType_TensorListSetItem) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
// TensorListC ->TensorC
auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC)));
if (tensor_list_c == nullptr) {
@ -179,7 +179,7 @@ int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lit
out_tensor_c->push_back(reinterpret_cast<TensorC *const>(tensor_list_c));
return RET_OK;
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_ERROR;
#endif
} else {
@ -193,7 +193,7 @@ int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite
int ret = RET_OK;
for (auto input : inputs) {
if (input->data_type() == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
// Tensor ->TensorList -> TensorListC -> TensorC
auto *tensor_list = reinterpret_cast<TensorList *>(input);
auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC)));
@ -210,7 +210,7 @@ int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite
}
in_tensor_c->push_back(reinterpret_cast<TensorC *>(tensor_list_c));
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_NOT_SUPPORT;
#endif
} else {

View File

@ -31,7 +31,7 @@ int OutputTensor2TensorC(const std::vector<lite::Tensor *> &tensors_in, std::vec
void FreeAllTensorC(std::vector<TensorC *> *tensors_in);
int Tensor2TensorC(const Tensor *src, TensorC *dst);
void TensorC2Tensor(const TensorC *src, Tensor *dst);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
void FreeTensorListC(TensorListC *tensorListC);
int TensorList2TensorListC(TensorList *src, TensorListC *dst);
int TensorListC2TensorList(const TensorListC *src, TensorList *dst);

View File

@ -38,8 +38,10 @@ using mindspore::kernel::kCPU;
using mindspore::kernel::KERNEL_ARCH;
using mindspore::kernel::KernelCreator;
using mindspore::kernel::KernelKey;
#ifdef ENABLE_CUSTOM_KERNEL_REGISTRY
using mindspore::lite::registry::CreateKernel;
using mindspore::lite::registry::KernelDesc;
#endif
namespace mindspore::lite {
#ifdef ENABLE_CUSTOM_KERNEL_REGISTRY

View File

@ -196,7 +196,7 @@ void LiteKernelUtil::InitTensorInitRefCount(const std::vector<kernel::LiteKernel
int LiteKernelUtil::SetInput(const LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs) { return -1; }
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
bool LiteKernelUtil::IsSwitchCall(kernel::LiteKernel *kernel) {
#ifdef ENABLE_DELEGATE_USE
if (kernel->desc().delegate != nullptr) {

View File

@ -37,7 +37,7 @@ class LiteKernelUtil {
static int SetInput(const LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
static bool IsSwitchCall(kernel::LiteKernel *kernel);
#endif

View File

@ -108,7 +108,7 @@ void LiteOpActor::IsolateInputData(std::vector<std::shared_ptr<LiteOpActor>> *ac
if (old_tensor->data_type() == kNumberTypeFloat16 || old_tensor->data_type() == kNumberTypeFloat32) {
old_tensor->set_data_type(kernel_->desc().data_type);
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (old_tensor->data_type() == kObjectTypeTensorType) {
auto old_tensorlist = reinterpret_cast<TensorList *>(old_tensor);
if (old_tensorlist->tensors_data_type() == kNumberTypeFloat16 ||
@ -199,7 +199,7 @@ int LiteOpActor::CompileArrowThroughOutputKernels() {
return RET_OK;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int LiteOpActor::CompileArrowThroughPartialCall() {
#ifdef ENABLE_DELEGATE_USE
if (kernel_->desc().delegate != nullptr) {
@ -245,7 +245,7 @@ int LiteOpActor::CompileArrowThroughPartialCall() {
int LiteOpActor::CompileArrow() {
int ret;
output_data_arrows_.clear();
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
ret = CompileArrowThroughPartialCall();
if (ret != RET_OK) {
output_data_arrows_.clear();
@ -288,7 +288,7 @@ void LiteOpActor::MoveInputData(Tensor *dst_tensor, Tensor *src_tensor) {
return;
}
MS_ASSERT(src_tensor->allocator() != nullptr);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (src_tensor->data_type() == kObjectTypeTensorType) {
MoveTensorListInputData(reinterpret_cast<TensorList *>(dst_tensor), reinterpret_cast<TensorList *>(src_tensor));
} else {
@ -307,7 +307,7 @@ void LiteOpActor::SetInputData(Tensor *dst_tensor, Tensor *src_tensor) {
int LiteOpActor::CastInputData(Tensor *dst, Tensor *src) {
int ret = RET_OK;
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (src->data_type() != kObjectTypeTensorType) {
ret = CastTensorInputData(dst, src);
} else {
@ -325,7 +325,7 @@ bool LiteOpActor::NeedCastData(Tensor *dst_tensor, Tensor *src_tensor) {
dst_tensor->data_type() != src_tensor->data_type()) {
return true;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (dst_tensor->data_type() == kObjectTypeTensorType && src_tensor->data_type() == kObjectTypeTensorType &&
reinterpret_cast<TensorList *>(dst_tensor)->tensors_data_type() !=
reinterpret_cast<TensorList *>(src_tensor)->tensors_data_type()) {
@ -362,7 +362,7 @@ int LiteOpActor::CastTensorInputData(Tensor *dst, Tensor *src) {
return RET_ERROR;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
void LiteOpActor::MoveTensorListInputData(TensorList *dst_tensorlist, TensorList *src_tensorlist) {
MS_ASSERT(src_tensorlist != nullptr);
MS_ASSERT(dst_tensorlist != nullptr);
@ -680,7 +680,7 @@ void LiteOpActor::SetInputShape() {
MS_LOG(DEBUG) << "this->kernel_->name(): " << this->kernel_->name();
if (input_tensor->data_type() == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
auto input_tensorlist = reinterpret_cast<TensorList *>(input_tensor);
auto input_data_tensorlist = reinterpret_cast<TensorList *>(inputs_data_[i]);
input_tensorlist->FreeTensorListData();
@ -764,7 +764,7 @@ std::vector<std::shared_ptr<LiteOpActor>> CreateOpActor(const std::vector<kernel
for (auto &kernel : kernels) {
/* make subgraph name (actor name) unique */
kernel->set_name(kernel->name() + "_" + to_string(actor_count++));
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if ((kernel::LiteKernelUtil::IsSwitchCall(kernel))) {
auto switch_actor = std::make_shared<LiteSwitchOpActor>(kernel);
if (switch_actor == nullptr) {
@ -786,7 +786,7 @@ std::vector<std::shared_ptr<LiteOpActor>> CreateOpActor(const std::vector<kernel
actor->set_thread_pool(thread_pool);
subgraph_name_AID_map[kernel] = actor->GetAID();
actors.push_back(actor);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
}
#endif
}

View File

@ -101,7 +101,7 @@ class LiteOpActor : public OpActor<lite::Tensor> {
int CastInputData(Tensor *dst_tensor, Tensor *src_tensor);
bool NeedCastData(Tensor *dst_tensor, Tensor *src_tensor);
int CastTensorInputData(Tensor *dst_tensor, Tensor *src_tensor);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
void MoveTensorListInputData(TensorList *dst_tensor, TensorList *src_tensor);
int CastTensorListInputData(TensorList *dst_tensor, TensorList *src_tensor);
#endif
@ -114,7 +114,7 @@ class LiteOpActor : public OpActor<lite::Tensor> {
#endif
};
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
class LiteSwitchOpActor : public LiteOpActor {
public:
explicit LiteSwitchOpActor(kernel::LiteKernel *kernel) : LiteOpActor(kernel) {}

View File

@ -126,14 +126,14 @@ int LiteSession::ConvertTensorsData(const lite::Model *model, size_t tensor_inde
MS_ASSERT(dst_tensor != nullptr);
if (src_tensor->data() != nullptr && src_tensor->data()->size() > 0) {
if (dst_tensor->data_type() == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
auto tensor_list = reinterpret_cast<TensorList *>(dst_tensor);
if (tensor_list->Decode(reinterpret_cast<const int *>(src_tensor->data()->data())) != RET_OK) {
MS_LOG(ERROR) << "Decode tensorlist data failed";
return RET_ERROR;
}
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_NOT_SUPPORT;
#endif
} else {
@ -167,7 +167,7 @@ lite::Tensor *LiteSession::ConvertTensor(const schema::Tensor &src_tensor) {
}
lite::Tensor *dst_tensor = nullptr;
if (TypeId(src_tensor.dataType()) == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
dst_tensor = new (std::nothrow) TensorList(shape, std::vector<int>(), src_category);
// set tensor list datatype
auto tensor_list = reinterpret_cast<TensorList *>(dst_tensor);
@ -176,7 +176,7 @@ lite::Tensor *LiteSession::ConvertTensor(const schema::Tensor &src_tensor) {
tensor_list->set_tensors_data_type(tensor_data_type);
}
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
#endif
} else {
dst_tensor = new (std::nothrow)

View File

@ -13,7 +13,7 @@ if(MSLITE_STRING_KERNEL)
${OPS_SRC_STRING}
)
endif()
if(MSLITE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
file(GLOB OPS_SRC_CONTROL_TENSORLIST
${CMAKE_CURRENT_SOURCE_DIR}/populate/control/*.cc
)
@ -34,7 +34,7 @@ if(ENABLE_V0)
${OPS_SRC_STRING_V0}
)
endif()
if(MSLITE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
file(GLOB OPS_SRC_CONTROL_TENSORLIST_V0
${CMAKE_CURRENT_SOURCE_DIR}/populate/v0/control/*.cc
)

View File

@ -28,7 +28,7 @@ int RegisterKernel::RegCustomKernel(const std::string &arch, const std::string &
#ifdef ENABLE_CUSTOM_KERNEL_REGISTRY
return lite::RegistryKernelImpl::GetInstance()->RegCustomKernel(arch, provider, data_type, type, creator);
#else
MS_LOG(ERROR) << unsuppor_custom_kernel_register_log;
MS_LOG(ERROR) << unsupport_custom_kernel_register_log;
return lite::RET_NOT_SUPPORT;
#endif
}
@ -38,7 +38,7 @@ int RegisterKernel::RegKernel(const std::string &arch, const std::string &provid
#ifdef ENABLE_CUSTOM_KERNEL_REGISTRY
return lite::RegistryKernelImpl::GetInstance()->RegKernel(arch, provider, data_type, op_type, creator);
#else
MS_LOG(ERROR) << unsuppor_custom_kernel_register_log;
MS_LOG(ERROR) << unsupport_custom_kernel_register_log;
return lite::RET_NOT_SUPPORT;
#endif
}

View File

@ -74,9 +74,9 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
MS_LOG(ERROR) << "No input!";
return RET_ERROR;
}
#ifndef ENABLE_CONTROL_TENSORLIST
#ifndef ENABLE_CONTROLFLOW_TENSORLIST
if (parameter->type_ == schema::PrimitiveType_Switch) {
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_ERROR;
}
#endif
@ -110,7 +110,7 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
if (out_tensors.at(i) == nullptr) {
continue;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (reinterpret_cast<TensorListC *>(out_tensors.at(i))->data_type_ == TypeIdC::kObjectTypeTensorType) {
auto *tensor_list_c = reinterpret_cast<TensorListC *>(out_tensors.at(i));
auto *tensor_list = reinterpret_cast<TensorList *>(outputs.at(i));
@ -124,7 +124,7 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
} else {
#endif
TensorC2Tensor(out_tensors.at(i), outputs.at(i));
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
}
#endif
if (ret == NNACL_INFER_INVALID) {

View File

@ -14,7 +14,7 @@ if(MSLITE_STRING_KERNEL)
${KERNEL_STRING_SRC}
)
endif()
if(MSLITE_CONTROL_TENSORLIST)
if(MSLITE_CONTROLFLOW_TENSORLIST)
file(GLOB KERNEL_CONTROL_TENSORLIST
${CMAKE_CURRENT_SOURCE_DIR}/control/*.cc
)

View File

@ -45,12 +45,12 @@ int CarryDataKernel::MoveData(const std::vector<lite::Tensor *>::iterator &dst_b
MS_LOG(ERROR) << "Carry const data and graph inputs.";
} else {
if (src_tensor->data_type() == kObjectTypeTensorType && dst_tensor->data_type() == kObjectTypeTensorType) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
MS_LOG(ERROR) << "Carry MoveTensorListData";
ret = MoveTensorListData(reinterpret_cast<lite::TensorList *>(dst_tensor),
reinterpret_cast<lite::TensorList *>(src_tensor));
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_NOT_SUPPORT;
#endif
} else {
@ -87,7 +87,7 @@ int CarryDataKernel::MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_
memcpy(dst_tensor->data(), src_tensor->data(), src_tensor->Size());
return RET_OK;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int CarryDataKernel::MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist) {
// shape may change, because tensors.size() can be change in RunGraph
if (dst_tensorlist->data_type() != src_tensorlist->data_type() ||

View File

@ -35,7 +35,7 @@ class CarryDataKernel : public InnerKernel {
const std::vector<lite::Tensor *>::iterator &src_begin,
const std::vector<lite::Tensor *>::iterator &src_limit);
int MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_tensor);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist);
#endif
};

View File

@ -133,7 +133,7 @@ int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) {
return ret;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
SetSubgraphForPartialNode();
#endif
@ -145,7 +145,7 @@ int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) {
return ret;
}
#else
MS_LOG(ERROR) << unsuppor_delegate_log;
MS_LOG(ERROR) << unsupport_delegate_log;
return RET_ERROR;
#endif
}
@ -157,7 +157,7 @@ int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) {
#endif
FindAllInoutKernels(*dst_kernels);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (IsControlFlowParttern(*dst_kernels)) {
ret = ConstructControlFlowMainGraph(dst_kernels);
if (ret != RET_OK) {
@ -174,7 +174,7 @@ int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) {
MS_LOG(ERROR) << "ConstructSubGraphs failed.";
return ret;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
}
#endif
@ -461,7 +461,7 @@ int Scheduler::InferCallShape(const lite::Model::Node *node) {
if (partial_input) {
return InferPartialShape(partial_input);
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
auto switch_input = NodeInputIsSwitch(node);
if (switch_input) {
return InferSwitchShape(switch_input);
@ -1149,7 +1149,7 @@ int Scheduler::ScheduleSubGraphToKernels(size_t subgraph_index, std::vector<kern
if (IsPartialNode(primitive)) {
if (IsControlFlowPattern(*node)) {
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
kernel = ScheduleNodeToKernel(node, prefer_data_type);
auto partial_subgraph_index = GetPartialGraphIndex(primitive);
if (SubGraphHasScheduled(partial_subgraph_index)) {
@ -1161,7 +1161,7 @@ int Scheduler::ScheduleSubGraphToKernels(size_t subgraph_index, std::vector<kern
subgraphs_to_schedule_.push_back(partial_subgraph_index);
}
#else
MS_LOG(ERROR) << unsupport_control_tensorlist_log;
MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
return RET_ERROR;
#endif
} else {
@ -1303,7 +1303,7 @@ TypeId Scheduler::GetFirstFp32Fp16OrInt8Type(const std::vector<Tensor *> &in_ten
if (dtype == kObjectTypeString) {
return kNumberTypeFloat32;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
if (dtype == kObjectTypeTensorType) {
auto tensor_list = reinterpret_cast<TensorList *>(tensor);
auto tensor_list_dtype = tensor_list->tensors_data_type();
@ -1388,7 +1388,7 @@ kernel::SubGraphType Scheduler::PartialSubGraphType(const std::vector<kernel::Li
return kernel::kCpuFP32SubGraph;
}
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int Scheduler::InferSwitchShape(const lite::Model::Node *switch_node) {
MS_ASSERT(src_model_ != nullptr);
MS_ASSERT(switch_node != nullptr);

View File

@ -106,7 +106,7 @@ class Scheduler {
bool IsControlFlowPattern(const lite::Model::Node &partial_node);
int SubGraphPreferDataType(const int &subgraph_index, TypeId *prefer_data_type);
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
int InferSwitchShape(const Model::Node *node);
Model::Node *NodeInputIsSwitch(const Model::Node *node);
bool SubGraphHasScheduled(const int &index);
@ -133,7 +133,7 @@ class Scheduler {
std::shared_ptr<Delegate> delegate_ = nullptr;
std::deque<int> subgraphs_to_schedule_{};
std::unordered_map<size_t, kernel::LiteKernel *> subgraph_index_subgraph_kernel_map_{};
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
std::set<int> scheduled_subgraph_index_{};
std::unordered_map<kernel::LiteKernel *, size_t> partial_kernel_subgraph_index_map_{};
std::set<lite::Model::Node *> partial_cnode_inferred_{};

View File

@ -24,7 +24,7 @@
#include "src/common/log_adapter.h"
#include "schema/model_generated.h"
#include "src/tensor.h"
#ifdef ENABLE_CONTROL_TENSORLIST
#ifdef ENABLE_CONTROLFLOW_TENSORLIST
namespace mindspore::lite {
/**
* Tensorlist is a container of vector, in which each element is a tensor object.