forked from mindspore-Ecosystem/mindspore
fix thread pool when mindrt off
This commit is contained in:
parent
81260a2319
commit
ac9b344c8a
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -192,7 +192,7 @@ int ThreadPool::CreateThreads(size_t thread_num, const std::vector<int> &core_li
|
||||||
|
|
||||||
int ThreadPool::ParallelLaunch(const Func &func, Content content, int task_num) const {
|
int ThreadPool::ParallelLaunch(const Func &func, Content content, int task_num) const {
|
||||||
// if single thread, run master thread
|
// if single thread, run master thread
|
||||||
if (thread_num() <= 1 || task_num <= 1) {
|
if (task_num <= 1) {
|
||||||
for (int i = 0; i < task_num; ++i) {
|
for (int i = 0; i < task_num; ++i) {
|
||||||
int ret = func(content, i, 0, 1);
|
int ret = func(content, i, 0, 1);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
|
|
@ -221,7 +221,7 @@ int LiteSession::ConvertTensorsData(const lite::LiteModel *model, size_t tensor_
|
||||||
auto shape_info = dst_tensor->shape();
|
auto shape_info = dst_tensor->shape();
|
||||||
if (shape_info.end() !=
|
if (shape_info.end() !=
|
||||||
std::find_if(shape_info.begin(), shape_info.end(), [](const int shape) { return shape <= 0; })) {
|
std::find_if(shape_info.begin(), shape_info.end(), [](const int shape) { return shape <= 0; })) {
|
||||||
MS_LOG(ERROR) << "Invalid shape size." << src_tensor->handler()->name()->c_str();
|
MS_LOG(ERROR) << "Invalid shape size, tensor name: " << src_tensor->handler()->name();
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -32,6 +32,7 @@ int SkipGramCPUKernel::Prepare() {
|
||||||
CHECK_LESS_RETURN(out_tensors_.size(), 1);
|
CHECK_LESS_RETURN(out_tensors_.size(), 1);
|
||||||
CHECK_NULL_RETURN(in_tensors_[0]);
|
CHECK_NULL_RETURN(in_tensors_[0]);
|
||||||
CHECK_NULL_RETURN(out_tensors_[0]);
|
CHECK_NULL_RETURN(out_tensors_[0]);
|
||||||
|
MS_CHECK_TRUE_RET(in_tensors_[0]->data_type() == kObjectTypeString, RET_ERROR);
|
||||||
if (!InferShapeDone()) {
|
if (!InferShapeDone()) {
|
||||||
return RET_OK;
|
return RET_OK;
|
||||||
}
|
}
|
||||||
|
@ -92,14 +93,15 @@ int SkipGramCPUKernel::Run() {
|
||||||
} else {
|
} else {
|
||||||
if (index > 0 && ((skip_gram_parameter_->include_all_ngrams && index <= skip_gram_parameter_->ngram_size) ||
|
if (index > 0 && ((skip_gram_parameter_->include_all_ngrams && index <= skip_gram_parameter_->ngram_size) ||
|
||||||
(!skip_gram_parameter_->include_all_ngrams && index == skip_gram_parameter_->ngram_size))) {
|
(!skip_gram_parameter_->include_all_ngrams && index == skip_gram_parameter_->ngram_size))) {
|
||||||
std::vector<StringPack> gram(2 * index - 1);
|
const int twice = 2;
|
||||||
|
std::vector<StringPack> gram(twice * index - 1);
|
||||||
char blank[1] = {' '};
|
char blank[1] = {' '};
|
||||||
StringPack blank_str = {1, blank};
|
StringPack blank_str = {1, blank};
|
||||||
for (int i = 0; i < 2 * index - 2; i += 2) {
|
for (int i = 0; i < twice * index - twice; i += twice) {
|
||||||
gram.at(i) = words.at(stack.at(i / 2));
|
gram.at(i) = words.at(stack.at(i / twice));
|
||||||
gram.at(i + 1) = blank_str;
|
gram.at(i + 1) = blank_str;
|
||||||
}
|
}
|
||||||
gram.at(2 * index - 2) = words.at(stack.at(index - 1));
|
gram.at(twice * index - twice) = words.at(stack.at(index - 1));
|
||||||
result.push_back(gram);
|
result.push_back(gram);
|
||||||
}
|
}
|
||||||
index--;
|
index--;
|
||||||
|
|
Loading…
Reference in New Issue