!15702 add parallel kernel split

From: @zoloft
Reviewed-by: 
Signed-off-by:
This commit is contained in:
mindspore-ci-bot 2021-04-30 10:12:56 +08:00 committed by Gitee
commit a2e9aadfbf
7 changed files with 162 additions and 0 deletions

View File

@ -279,6 +279,7 @@ if(ENABLE_CONVERTER)
${LITE_DIR}/tools/optimizer/graph/unify_format_pass.cc
${LITE_DIR}/tools/optimizer/graph/node_infershape.cc
${LITE_DIR}/tools/optimizer/graph/transpose_strategy.cc
${LITE_DIR}/tools/optimizer/parallel/split_strategy.cc
${LITE_DIR}/tools/common/graph_util.cc
${LITE_DIR}/tools/common/tensor_util.cc
${LITE_DIR}/tools/common/node_util.cc

View File

@ -89,6 +89,7 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
../optimizer/graph/unify_format_pass.cc
../optimizer/graph/node_infershape.cc
../optimizer/graph/transpose_strategy.cc
../optimizer/parallel/split_strategy.cc
)
add_subdirectory(../anf_exporter anf_exporter)

View File

@ -17,6 +17,7 @@
#include "tools/converter/anf_transform.h"
#include <memory>
#include <string>
#include <unordered_map>
#include "src/common/log_adapter.h"
#include "tools/optimizer/common/gllo_utils.h"
#include "mindspore/core/ir/primitive.h"
@ -65,6 +66,7 @@
#include "tools/converter/quantizer/post_training_quantizer.h"
#include "tools/converter/quantizer/quant_cast.h"
#include "tools/converter/quantizer/weight_quantizer.h"
#include "tools/optimizer/parallel/split_strategy.h"
using std::string;
namespace mindspore::lite {
@ -120,6 +122,31 @@ int AnfTransform::RunFusionPass(const FuncGraphPtr &old_graph, const converter::
return RET_OK;
}
int AnfTransform::RunParallelPass(const FuncGraphPtr &old_graph, const converter::Flags *config) {
MS_LOG(DEBUG) << "Run ParallelPass start";
auto optimizer = std::make_shared<opt::GraphOptimizer>();
if (config->trainModel || !config->parallelMode) {
return RET_OK;
}
// 1. deal with split strategy
std::unordered_map<std::string, opt::SplitStrategy> split_strategys =
ParserSplitStrategy(static_cast<opt::SplitMode>(config->parallelMode));
if (split_strategys.empty()) {
MS_LOG(ERROR) << "parse split_strategy error.";
return RET_OK;
}
auto parallel_pm = std::make_shared<opt::PassManager>("anf parallel pass manager", false);
// 2. preceding parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
// 3. multi_conv parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
// 4. single conv parallel pass
parallel_pm->AddPass(std::make_shared<opt::RemoveRedundantOpPass>());
optimizer->AddPassManager(parallel_pm);
MS_LOG(DEBUG) << "Run ParallelPass end";
return RET_OK;
}
int AnfTransform::RunGraphPass(const FuncGraphPtr &old_graph, const converter::Flags *config) {
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto graph_pm = std::make_shared<opt::PassManager>("anf graph pass manager", true);
@ -376,6 +403,12 @@ FuncGraphPtr AnfTransform::TransformFuncGraph(const FuncGraphPtr &old_graph, con
return nullptr;
}
status = RunParallelPass(fg, config);
if (status != RET_OK) {
MS_LOG(ERROR) << "Run convert pass failed.";
return nullptr;
}
status = DoQuantize(fg, config);
if (status != RET_OK) {
MS_LOG(ERROR) << "Do Quantize failed.";

View File

@ -61,6 +61,8 @@ class AnfTransform {
static int RunTFAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
static int RunParallelPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
int DoQuantize(const FuncGraphPtr &old_graph, const converter::Flags *config);
void GetAllFuncGraph(const FuncGraphPtr &func_graph);

View File

@ -77,6 +77,7 @@ class Flags : public virtual mindspore::lite::FlagParser {
int quantWeightSize;
std::string bitNumIn;
int bitNum;
int parallelMode;
std::string configFile;
std::string quantWeightChannelStr;
int quantWeightChannel;

View File

@ -0,0 +1,59 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/optimizer/parallel/split_strategy.h"
#include <vector>
#include <unordered_map>
#include <string>
namespace mindspore {
namespace opt {
std::unordered_map<std::string, opt::SplitStrategy> ParserSplitStrategy(SplitMode parallel_mode) {
std::unordered_map<std::string, opt::SplitStrategy> split_strategys;
if (kSplitRatio.empty() || kSplitDefaultRatio.empty() || kSplitDevTypes.empty()) {
return split_strategys;
}
if (kSplitRatio.size() != kSplitDevTypes.size()) {
return split_strategys;
}
std::vector<std::vector<int64_t>> split_feature_map;
std::vector<std::vector<int64_t>> split_weight;
switch (parallel_mode) {
case SplitN:
split_feature_map = {kSplitRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio};
split_weight = {kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio};
break;
case SplitH:
split_feature_map = {kSplitDefaultRatio, kSplitRatio, kSplitDefaultRatio, kSplitDefaultRatio};
split_weight = {kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio};
break;
case SplitCIN:
split_feature_map = {kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitRatio};
split_weight = {kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitRatio};
break;
case SplitCOUT:
split_feature_map = {kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio};
split_weight = {kSplitRatio, kSplitDefaultRatio, kSplitDefaultRatio, kSplitDefaultRatio};
break;
default:
return split_strategys;
}
opt::Strategys strategys = {split_feature_map, split_weight};
split_strategys[opt::kSplitOp] = {strategys, kSplitDevTypes, kSplitDevTypes.size()};
return split_strategys;
}
} // namespace opt
} // namespace mindspore

View File

@ -0,0 +1,65 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <string>
#include <unordered_map>
#ifndef MINDSPORE_LITE_SRC_PASS_PARALLEL_SPLIT_STRATEGY_H_
#define MINDSPORE_LITE_SRC_PASS_PARALLEL_SPLIT_STRATEGY_H_
namespace mindspore {
namespace opt {
constexpr auto OP = "op";
constexpr auto STRATEGY = "strategy";
constexpr auto DEV_TYPE = "dev_type";
constexpr auto PARALLEL_NAME_SUFFIX = "_parallel";
constexpr auto kSplitOp = "Conv2D";
const std::vector<int64_t> kSplitRatio = {1, 1};
const std::vector<int64_t> kSplitDefaultRatio = {0, 0};
const std::vector<std::string> kSplitDevTypes = {"CPU", "GPU"};
using Strategys = std::vector<std::vector<std::vector<int64_t>>>;
enum Status {
SUCCESS = 0,
FAILED,
INVALID_ARGUMENT,
};
enum SplitMode {
SplitN = 0,
SplitH = 1,
SplitCIN = 2,
SplitCOUT = 3,
NoSplit = 4,
};
struct SplitStrategy {
Strategys strategys;
std::vector<std::string> dev_types;
size_t dev_num;
};
std::unordered_map<std::string, opt::SplitStrategy> ParserSplitStrategy(SplitMode parallel_mode);
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_PASS_PARALLEL_SPLIT_STRATEGY_H_