forked from mindspore-Ecosystem/mindspore
!49661 lite vs2017 compile error.
Merge pull request !49661 from wangpingan/windows
This commit is contained in:
commit
e1238e9467
|
@ -444,6 +444,11 @@ endif()
|
|||
|
||||
if(MSVC)
|
||||
set(MSLITE_ENABLE_CONVERTER off)
|
||||
if(MSLITE_ENABLE_RUNTIME_GLOG)
|
||||
add_definitions(-DNOMINMAX)
|
||||
add_definitions(-DNOGDI)
|
||||
set(MSLITE_DEPS_DIRENT on)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(MSLITE_GPU_BACKEND STREQUAL cuda)
|
||||
|
|
|
@ -70,4 +70,8 @@ endif()
|
|||
|
||||
if(MSLITE_DEPS_OPENSSL)
|
||||
include(${TOP_DIR}/cmake/external_libs/openssl.cmake)
|
||||
endif()
|
||||
|
||||
if(MSLITE_DEPS_DIRENT)
|
||||
include(${TOP_DIR}/cmake/external_libs/dirent.cmake)
|
||||
endif()
|
|
@ -429,6 +429,12 @@ if(NOT MSLITE_ENABLE_COREML)
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/litert/delegate/coreml/stub/coreml_delegate_stub.cc)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
set(LITE_SRC ${LITE_SRC}
|
||||
${EXPRESSION_SRC}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/common/storage.cc)
|
||||
endif()
|
||||
|
||||
add_subdirectory(litert/kernel/cpu)
|
||||
|
||||
add_library(lite_src_mid OBJECT ${LITE_SRC})
|
||||
|
|
|
@ -36,7 +36,7 @@ int Param::Fill(Mode mode) {
|
|||
constexpr float scale = 0.01;
|
||||
std::normal_distribution<float> n{0, 1};
|
||||
std::generate_n(data.begin(), size_, [&]() { return n(engine); });
|
||||
(void)std::transform(data.begin(), data.end(), data.begin(), [](float x) { return x * scale; });
|
||||
(void)std::transform(data.begin(), data.end(), data.begin(), [=](float x) { return x * scale; });
|
||||
break;
|
||||
}
|
||||
case UNIFORM: {
|
||||
|
|
|
@ -87,13 +87,13 @@ NetImpl::NetImpl(std::shared_ptr<Net> p) { pnet_ = p; }
|
|||
|
||||
NetImpl::NetImpl(Graph *g) { pnet_ = g->net_data_->net(); }
|
||||
|
||||
std::vector<lite::EXPR *> MS_API NetImpl::construct(const std::vector<lite::EXPR *> &inputs) {
|
||||
std::vector<lite::EXPR *> NetImpl::construct(const std::vector<lite::EXPR *> &inputs) {
|
||||
auto in = Expr::convert(inputs);
|
||||
auto out = pnet_->construct(in);
|
||||
return Expr::convert(out);
|
||||
}
|
||||
|
||||
Net *MS_API NetImpl::Connect(std::shared_ptr<Net> net, lite::Net *lnet) {
|
||||
Net *NetImpl::Connect(std::shared_ptr<Net> net, lite::Net *lnet) {
|
||||
auto impl = GetImpl(net.get());
|
||||
if (impl == nullptr) {
|
||||
MS_LOG(ERROR) << "missing implementation";
|
||||
|
|
|
@ -100,7 +100,7 @@ bool SearchSubGraph::CheckIsParallelSubGraph(const std::vector<Subgraph> &subgra
|
|||
|
||||
// 1. check head_node's input is SplitOverlap node
|
||||
for (const auto &input : head_node->input_indices_) {
|
||||
if (tensors_.at(input).type_ == CONST) {
|
||||
if (tensors_.at(input).type_ == CONSTANT) {
|
||||
continue;
|
||||
}
|
||||
auto input_node_index = tensors_.at(input).out_nodes_.front();
|
||||
|
@ -112,7 +112,7 @@ bool SearchSubGraph::CheckIsParallelSubGraph(const std::vector<Subgraph> &subgra
|
|||
|
||||
// 2. check end_node's output is concat node
|
||||
for (const auto &output : end_node->output_indices_) {
|
||||
if (tensors_.at(output).type_ == CONST) {
|
||||
if (tensors_.at(output).type_ == CONSTANT) {
|
||||
continue;
|
||||
}
|
||||
auto output_node_index = tensors_.at(output).in_nodes_.front();
|
||||
|
@ -276,7 +276,7 @@ void SearchSubGraph::ConvertSubGraphToModel(std::vector<Subgraph> *sub_graphs) {
|
|||
LiteGraph::Node *head_node = model_->graph_.all_nodes_[head_index];
|
||||
std::vector<uint32_t> inputs = head_node->input_indices_;
|
||||
for (auto input : inputs) {
|
||||
if (tensors_[input].type_ == CONST) {
|
||||
if (tensors_[input].type_ == CONSTANT) {
|
||||
continue;
|
||||
}
|
||||
if (std::find(new_sub_graph->input_indices_.begin(), new_sub_graph->input_indices_.end(), input) !=
|
||||
|
@ -359,8 +359,9 @@ void SearchSubGraph::SearchMultyInNodes(std::vector<uint32_t> *multy_in_nodes) {
|
|||
if (IsPartialNode(node->primitive_, model_->GetSchemaVersion())) {
|
||||
continue;
|
||||
}
|
||||
int input_count = std::count_if(node->input_indices_.begin(), node->input_indices_.end(),
|
||||
[&](uint32_t in_tensor_index) { return tensors_[in_tensor_index].type_ != CONST; });
|
||||
int input_count =
|
||||
std::count_if(node->input_indices_.begin(), node->input_indices_.end(),
|
||||
[&](uint32_t in_tensor_index) { return tensors_[in_tensor_index].type_ != CONSTANT; });
|
||||
if (input_count > 1) {
|
||||
multy_in_nodes->push_back(node_index);
|
||||
}
|
||||
|
@ -373,7 +374,7 @@ void SearchSubGraph::RemoveConstNode(std::vector<uint32_t> *nodes) {
|
|||
while (!stop_search) {
|
||||
stop_search = true;
|
||||
for (size_t i = 0; i < nodes->size(); i++) {
|
||||
if (tensors_[nodes->at(i)].type_ == CONST) {
|
||||
if (tensors_[nodes->at(i)].type_ == CONSTANT) {
|
||||
VectorErase(nodes, nodes->at(i));
|
||||
stop_search = false;
|
||||
break;
|
||||
|
@ -596,7 +597,7 @@ void SearchSubGraph::InitMiddleSubgraph(const std::vector<uint32_t> *multy_in_no
|
|||
LiteGraph::Node *node = node_list_[node_index];
|
||||
for (uint32_t input_tensor_index : node->input_indices_) {
|
||||
Tensor *tensor = &tensors_[input_tensor_index];
|
||||
if (tensor->type_ == CONST || tensor->type_ == INPUT) continue;
|
||||
if (tensor->type_ == CONSTANT || tensor->type_ == INPUT) continue;
|
||||
|
||||
std::vector<uint32_t> input_nodes = tensor->out_nodes_;
|
||||
if (input_nodes.empty()) continue;
|
||||
|
@ -662,7 +663,7 @@ void SearchSubGraph::InitSearchTensor() {
|
|||
}
|
||||
auto category = TensorCategory(*src_tensor);
|
||||
if (category == mindspore::lite::Category::CONST_TENSOR || category == mindspore::lite::Category::CONST_SCALAR) {
|
||||
tensors_[i].type_ = CONST;
|
||||
tensors_[i].type_ = CONSTANT;
|
||||
}
|
||||
}
|
||||
std::vector<uint32_t> graph_input = model_->graph_.sub_graphs_[0]->input_indices_;
|
||||
|
@ -867,7 +868,7 @@ void SearchSubGraph::SubGraphSplitByOffLineParallel() {
|
|||
std::vector<Subgraph> node_subs;
|
||||
for (uint32_t input_tensor_index : node->input_indices_) {
|
||||
Tensor *tensor = &tensors_[input_tensor_index];
|
||||
if (tensor->type_ == CONST) continue;
|
||||
if (tensor->type_ == CONSTANT) continue;
|
||||
std::vector<uint32_t> input_nodes = tensor->out_nodes_;
|
||||
Subgraph sub;
|
||||
sub.ends_.push_back(input_nodes[0]);
|
||||
|
@ -971,7 +972,7 @@ void SearchSubGraph::InsertParallelNode(uint32_t index, Subgraph *subgraph) {
|
|||
|
||||
/* remove const node */
|
||||
for (int i = static_cast<int>(input.size()) - 1; i >= 0; i--) {
|
||||
if (tensors_[input[i]].type_ == CONST) {
|
||||
if (tensors_[input[i]].type_ == CONSTANT) {
|
||||
VectorErase(&input, input[i]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ constexpr int kMinSubgraphCost = 50;
|
|||
constexpr double kDefaultGpu = 0.5;
|
||||
class SearchSubGraph {
|
||||
public:
|
||||
enum TensorType { NORMAL, CONST, INPUT };
|
||||
enum TensorType { NORMAL, CONSTANT, INPUT };
|
||||
|
||||
struct Tensor {
|
||||
std::vector<uint32_t> in_nodes_; /* used current tensor as input */
|
||||
|
|
Loading…
Reference in New Issue