add output strategy for op init

This commit is contained in:
yangzhenzhang 2021-11-05 11:42:19 +08:00
parent 005bc7c380
commit 8431ba616c
37 changed files with 608 additions and 361 deletions

View File

@ -300,14 +300,22 @@ void DumpParallelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo
return; return;
} }
auto strategy = operator_info->strategy(); auto in_strategy = operator_info->strategy();
if (strategy == nullptr) { if (in_strategy == nullptr) {
return; return;
} }
ValuePtr temp = MakeValue(strategy->GetInputDim()); ValuePtr in_tmp = MakeValue(in_strategy->GetInputDim());
gsub->buffer << " { strategy: "; gsub->buffer << " { in_strategy: ";
gsub->buffer << temp->ToString(); gsub->buffer << in_tmp->ToString();
auto out_strategy = operator_info->out_strategy();
if (out_strategy) {
ValuePtr out_tmp = MakeValue(out_strategy->GetInputDim());
gsub->buffer << ", out_strategy: ";
gsub->buffer << out_tmp->ToString();
}
gsub->buffer << " }"; gsub->buffer << " }";
} }

View File

@ -1615,7 +1615,7 @@ Status CostGraph::InitReshapeStrategy() {
reshape_info->SetOutputLayout( reshape_info->SetOutputLayout(
(*next_iter)->next_operator()->inputs_tensor_info()[LongToSize(next_index)].tensor_layout()); (*next_iter)->next_operator()->inputs_tensor_info()[LongToSize(next_index)].tensor_layout());
} }
if (reshape_info->Init(nullptr) != SUCCESS) { if (reshape_info->Init(nullptr, nullptr) != SUCCESS) {
return FAILED; return FAILED;
} }
} }

View File

@ -851,8 +851,8 @@ Status GatherInfo::ComputeReplaceOp() {
return SUCCESS; return SUCCESS;
} }
Status GatherInfo::Init(const StrategyPtr &strategy) { Status GatherInfo::Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Init failed."; MS_LOG(ERROR) << name_ << ": Init failed.";
return FAILED; return FAILED;
} }
@ -864,8 +864,8 @@ Status GatherInfo::Init(const StrategyPtr &strategy) {
return SUCCESS; return SUCCESS;
} }
Status GatherInfo::InitForCostModel(const StrategyPtr &strategy) { Status GatherInfo::InitForCostModel(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitForCostModelWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
if (is_auto_parallel_) { if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << ": Init for cost model failed."; MS_LOG(DEBUG) << name_ << ": Init for cost model failed.";
} else { } else {

View File

@ -40,8 +40,8 @@ class GatherInfo : public OperatorInfo {
slice_size_(0), slice_size_(0),
replace_op_name_(replace_op_name) {} replace_op_name_(replace_op_name) {}
~GatherInfo() override = default; ~GatherInfo() override = default;
Status Init(const StrategyPtr &strategy) override; Status Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) override;
Status InitForCostModel(const StrategyPtr &strategy) override; Status InitForCostModel(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) override;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override; std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override; Status SetCostUnderStrategy(const StrategyPtr &strategy) override;

View File

@ -90,18 +90,7 @@ Status MatMulBase::GetAttrs() {
if (transpose_b_iter->second->isa<BoolImm>()) { if (transpose_b_iter->second->isa<BoolImm>()) {
transpose_b_ = transpose_b_iter->second->cast<BoolImmPtr>()->value(); transpose_b_ = transpose_b_iter->second->cast<BoolImmPtr>()->value();
} else { } else {
MS_LOG(ERROR) << name_ << " : The value of transpose_a is not bool."; MS_LOG(ERROR) << name_ << " : The value of transpose_b is not bool.";
return FAILED;
}
}
auto forward_reduce_scatter_iter = attrs_.find(FORWARD_REDUCE_SCATTER);
if (forward_reduce_scatter_iter != attrs_.end()) {
MS_EXCEPTION_IF_NULL(forward_reduce_scatter_iter->second);
if (forward_reduce_scatter_iter->second->isa<BoolImm>()) {
forward_reduce_scatter_ = forward_reduce_scatter_iter->second->cast<BoolImmPtr>()->value();
} else {
MS_LOG(ERROR) << name_ << " : The value of forward reduce scatter is not bool.";
return FAILED; return FAILED;
} }
} }
@ -193,11 +182,58 @@ Status MatMul::CheckStrategy(const StrategyPtr &strategy) {
} }
} }
if ((mat_a_dimension_ != 2 || mat_b_dimension_ != 2) && forward_reduce_scatter_) { return SUCCESS;
MS_LOG(WARNING) << name_ }
<< ": The dimension of mat a and mat b must be 2 in forward reduce scatter mode, "
"setting the forward reduce scatter mode to false here"; Status MatMul::CheckOutputStrategy(const StrategyPtr &out_strategy) {
if (out_strategy == nullptr) {
MS_LOG(INFO) << name_ << ": The output strategy is null";
return SUCCESS;
}
if (mat_a_dimension_ != 2 || mat_b_dimension_ != 2) {
MS_LOG(ERROR) << name_ << ": The dimension of mat a and mat b must be 2 if set output strategy";
return FAILED;
}
if (CheckStrategyValue(out_strategy, outputs_shape_) != SUCCESS) {
MS_LOG(ERROR) << name_ << " : Invalid output strategy.";
return FAILED;
}
Strategys in_stra = strategy_->GetInputDim();
Dimensions x_strategy = in_stra.at(0);
Dimensions w_strategy = in_stra.at(1);
int64_t in_shard_a = x_strategy[0];
int64_t in_shard_b = x_strategy[1];
int64_t in_shard_c = 1;
if (transpose_b_) {
in_shard_c = w_strategy[0];
} else {
in_shard_c = w_strategy[1];
}
Strategys out_stra = out_strategy->GetInputDim();
Dimensions output_strategy = out_stra[0];
int64_t out_shard_a_or_ab = output_strategy[0];
int64_t out_shard_c = output_strategy[1];
if (out_shard_c != in_shard_c) {
MS_LOG(ERROR) << name_ << ": The input strategy is (" << x_strategy << ", " << w_strategy << ")"
<< ", the second dimension of output strategy must be " << in_shard_c << ", but got " << out_shard_c;
return FAILED;
}
if (out_shard_a_or_ab == in_shard_a) {
forward_reduce_scatter_ = false; forward_reduce_scatter_ = false;
} else if (out_shard_a_or_ab == in_shard_a * in_shard_b) {
forward_reduce_scatter_ = true;
} else {
MS_LOG(ERROR) << name_ << ": The input strategy is (" << x_strategy << ", " << w_strategy << ")"
<< ", the first dimension of output strategy must be " << in_shard_a << " or "
<< in_shard_a * in_shard_b << ", but got " << out_shard_a_or_ab;
return FAILED;
} }
return SUCCESS; return SUCCESS;
@ -289,21 +325,9 @@ Status MatMulBase::InferTensorMap() {
} }
if (forward_reduce_scatter_) { if (forward_reduce_scatter_) {
if (dev_matrix_shape_.size() != 3) {
MS_LOG(WARNING) << name_
<< ": The dimension of dev matrix shape must be 3 in forward reduce scatter mode, "
"setting the forward reduce scatter mode to false here";
forward_reduce_scatter_ = false;
} else if (outputs_shape_[0][0] % (dev_matrix_shape_[0] * dev_matrix_shape_[1]) != 0) {
MS_LOG(WARNING) << name_
<< ": The first dimension of output should be split by dev_matrix[0]*dev_matrix[1] in "
"forward reduce scatter mode, setting the forward reduce scatter mode to false here";
forward_reduce_scatter_ = false;
} else {
// the forward reduce scatter only support that the dimension of output is 2 // the forward reduce scatter only support that the dimension of output is 2
output_tensor_map = {1, 0}; output_tensor_map = {1, 0};
} }
}
inputs_tensor_map_.push_back(mat_a_tensor_map); inputs_tensor_map_.push_back(mat_a_tensor_map);
inputs_tensor_map_.push_back(mat_b_tensor_map); inputs_tensor_map_.push_back(mat_b_tensor_map);
@ -312,21 +336,28 @@ Status MatMulBase::InferTensorMap() {
} }
Status MatMulBase::InferTensorLayout(TensorLayouts *inputs_layout, TensorLayouts *outputs_layout) { Status MatMulBase::InferTensorLayout(TensorLayouts *inputs_layout, TensorLayouts *outputs_layout) {
Shape output_dev_matrix_shape; out_dev_matrix_shape_ = dev_matrix_shape_;
if (forward_reduce_scatter_) { if (forward_reduce_scatter_) {
if (dev_matrix_shape_.size() != 3) { // the reduce scatter mode only use for MatMul
MS_LOG(ERROR) << "The size of origin dev matrix shape must be 3 in forward reduce scatter mode"; out_dev_matrix_shape_ = dev_matrix_shape_;
return FAILED; if (repeated_num_in_dev_matrix_right_ || repeated_calc_num_ == 1) {
} // dev_matrix_shape_ is: [a, b, c, repeat_num] or [a, b, c]
output_dev_matrix_shape = {dev_matrix_shape_[0] * dev_matrix_shape_[1], dev_matrix_shape_[2]}; // out_dev_matrix_shape_ is: [a*b, c, repeat_num] or [a*b, c]
(void)out_dev_matrix_shape_.erase(out_dev_matrix_shape_.begin(), out_dev_matrix_shape_.begin() + 2);
(void)out_dev_matrix_shape_.insert(out_dev_matrix_shape_.begin(), dev_matrix_shape_[0] * dev_matrix_shape_[1]);
} else { } else {
output_dev_matrix_shape = dev_matrix_shape_; // dev_matrix_shape_ is: [repeat_num, a, b, c]
// out_dev_matrix_shape_ is: [repeat_num, a*b, c]
(void)out_dev_matrix_shape_.erase(out_dev_matrix_shape_.begin() + 1, out_dev_matrix_shape_.begin() + 3);
(void)out_dev_matrix_shape_.insert(out_dev_matrix_shape_.begin() + 1,
dev_matrix_shape_[1] * dev_matrix_shape_[2]);
}
} }
TensorLayout mat_a_layout, mat_b_layout, output_layout; TensorLayout mat_a_layout, mat_b_layout, output_layout;
if ((mat_a_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], inputs_shape_[0]) != SUCCESS) || if ((mat_a_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], inputs_shape_[0]) != SUCCESS) ||
(mat_b_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[1], inputs_shape_[1]) != SUCCESS) || (mat_b_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[1], inputs_shape_[1]) != SUCCESS) ||
(output_layout.InitFromVector(output_dev_matrix_shape, outputs_tensor_map_[0], outputs_shape_[0]) != SUCCESS)) { (output_layout.InitFromVector(out_dev_matrix_shape_, outputs_tensor_map_[0], outputs_shape_[0]) != SUCCESS)) {
return FAILED; return FAILED;
} }
@ -360,21 +391,6 @@ Status MatMulBase::InferTensorInfo() {
return SUCCESS; return SUCCESS;
} }
Status MatMulBase::Init(const StrategyPtr &strategy) {
if (InitWithAutoRepeatCalc(strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << " : Init failed.";
return FAILED;
}
if (forward_reduce_scatter_) {
virtual_div_op_.clear();
MS_LOG(INFO) << "The forward reduce scatter mode does not involve repeated calculation, clear the virtual div op";
}
MS_LOG(INFO) << name_ << " : Init success.";
return SUCCESS;
}
Status MatMulBase::SwapLastTwoElements(mindspore::parallel::Shape *const input) { Status MatMulBase::SwapLastTwoElements(mindspore::parallel::Shape *const input) {
if (input->size() < 2) { if (input->size() < 2) {
MS_LOG(ERROR) << name_ << " : The size of inputs small than 2."; MS_LOG(ERROR) << name_ << " : The size of inputs small than 2.";
@ -584,7 +600,7 @@ std::shared_ptr<Strategys> BatchMatMulInfo::GenerateBatchStrategies() {
} }
Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr &strategy) { Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr &strategy) {
if (InitForCostModel(strategy) == FAILED) { if (InitForCostModel(strategy, nullptr) == FAILED) {
MS_LOG(ERROR) << name_ << " : Initialization under the strategy failed."; MS_LOG(ERROR) << name_ << " : Initialization under the strategy failed.";
return FAILED; return FAILED;
} }

View File

@ -37,8 +37,6 @@ class MatMulBase : public OperatorInfo {
: OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<MatMulCost>()) {} : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<MatMulCost>()) {}
~MatMulBase() override = default; ~MatMulBase() override = default;
Status Init(const StrategyPtr &strategy) override;
// Generate all strategies and the corresponding cost for this MatMul operator // Generate all strategies and the corresponding cost for this MatMul operator
Status GenerateStrategies(int64_t stage_id) override; Status GenerateStrategies(int64_t stage_id) override;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override; std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;
@ -75,6 +73,7 @@ class MatMul : public MatMulBase {
protected: protected:
Status CheckStrategy(const StrategyPtr &strategy) override; Status CheckStrategy(const StrategyPtr &strategy) override;
Status CheckOutputStrategy(const StrategyPtr &out_strategy) override;
}; };
class MatMulInfo : public MatMul { class MatMulInfo : public MatMul {

View File

@ -727,8 +727,8 @@ Status OperatorInfo::InferSliceShape(const Strategys &inputs_strategy, const Str
return SUCCESS; return SUCCESS;
} }
Status OperatorInfo::Init(const StrategyPtr &strategy) { Status OperatorInfo::Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << " : Init failed."; MS_LOG(ERROR) << name_ << " : Init failed.";
return FAILED; return FAILED;
} }
@ -737,8 +737,8 @@ Status OperatorInfo::Init(const StrategyPtr &strategy) {
return SUCCESS; return SUCCESS;
} }
Status OperatorInfo::InitForCostModel(const StrategyPtr &strategy) { Status OperatorInfo::InitForCostModel(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitForCostModelWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << " : Init for cost model failed."; MS_LOG(ERROR) << name_ << " : Init for cost model failed.";
return FAILED; return FAILED;
} }
@ -747,9 +747,10 @@ Status OperatorInfo::InitForCostModel(const StrategyPtr &strategy) {
return SUCCESS; return SUCCESS;
} }
// method0: auto insert repeated_calculation_num for dev_matrix_shape when repeated_calculation_num > 1 // auto insert repeated_calculation_num for dev_matrix_shape when repeated_calculation_num > 1
Status OperatorInfo::InitForCostModelWithAutoRepeatCalc(const StrategyPtr &strategy) { Status OperatorInfo::InitForCostModelWithAutoRepeatCalc(const StrategyPtr &in_strategy,
if (strategy == nullptr) { const StrategyPtr &out_strategy) {
if (in_strategy == nullptr) {
MS_LOG(ERROR) << name_ << ": The strategy is null."; MS_LOG(ERROR) << name_ << ": The strategy is null.";
return FAILED; return FAILED;
} }
@ -760,7 +761,7 @@ Status OperatorInfo::InitForCostModelWithAutoRepeatCalc(const StrategyPtr &strat
} }
// must be after InferAttrs() // must be after InferAttrs()
if (CheckStrategy(strategy) != SUCCESS) { if (CheckStrategy(in_strategy) != SUCCESS) {
if (is_auto_parallel_) { if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << ": CheckStrategy failed."; MS_LOG(DEBUG) << name_ << ": CheckStrategy failed.";
} else { } else {
@ -768,13 +769,18 @@ Status OperatorInfo::InitForCostModelWithAutoRepeatCalc(const StrategyPtr &strat
} }
return FAILED; return FAILED;
} }
strategy_ = in_strategy;
if (out_strategy && CheckOutputStrategy(out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": The output strategy is invalid";
return FAILED;
}
out_strategy_ = out_strategy;
// need to clear queues before Init(), // need to clear queues before Init(),
// because Init() may be called multiple times by cost model // because Init() may be called multiple times by cost model
ResetQueueMember(); ResetQueueMember();
strategy_ = strategy;
if (InferDevMatrixShape() != SUCCESS) { if (InferDevMatrixShape() != SUCCESS) {
MS_LOG(ERROR) << name_ << ": InferDevMatrixShape failed."; MS_LOG(ERROR) << name_ << ": InferDevMatrixShape failed.";
return FAILED; return FAILED;
@ -807,13 +813,13 @@ Status OperatorInfo::InitForCostModelWithAutoRepeatCalc(const StrategyPtr &strat
return SUCCESS; return SUCCESS;
} }
Status OperatorInfo::InitWithAutoRepeatCalc(const StrategyPtr &strategy) { Status OperatorInfo::InitWithAutoRepeatCalc(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (strategy == nullptr) { if (in_strategy == nullptr) {
MS_LOG(ERROR) << name_ << ": The strategy is null."; MS_LOG(ERROR) << name_ << ": The input strategy is null.";
return FAILED; return FAILED;
} }
if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitForCostModelWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
return FAILED; return FAILED;
} }
@ -1330,7 +1336,7 @@ Status GenerateStrategiesWithBroadcast(int64_t stage_id, const Shapes &inputs_sh
} }
Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr &strategy) { Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr &strategy) {
if (InitForCostModel(strategy) == FAILED) { if (InitForCostModel(strategy, nullptr) == FAILED) {
if (is_auto_parallel_) { if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << ": Initialization under the strategy failed."; MS_LOG(DEBUG) << name_ << ": Initialization under the strategy failed.";
} else { } else {

View File

@ -81,8 +81,9 @@ class OperatorInfo {
// If output is tuple, outputs_type.size() is greater than 1. // If output is tuple, outputs_type.size() is greater than 1.
Status set_outputs_type(const std::vector<TypePtr> &outputs_type); Status set_outputs_type(const std::vector<TypePtr> &outputs_type);
const std::vector<TypePtr> &outputs_type() const { return outputs_type_; } const std::vector<TypePtr> &outputs_type() const { return outputs_type_; }
virtual Status Init(const StrategyPtr &strategy); virtual Status Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy);
virtual Status InitForCostModel(const StrategyPtr &strategy); // only init the necessary parts // only init the necessary parts
virtual Status InitForCostModel(const StrategyPtr &strategy, const StrategyPtr &out_strategy);
// Given the stage_id (which indicates the number of devices), // Given the stage_id (which indicates the number of devices),
// generate all strategies for this operator // generate all strategies for this operator
@ -152,7 +153,7 @@ class OperatorInfo {
void SetIsStrategyCostExactTrue() { is_strategy_cost_exact_ = true; } void SetIsStrategyCostExactTrue() { is_strategy_cost_exact_ = true; }
void ClearStrategyCost() { strategy_cost_.clear(); } void ClearStrategyCost() { strategy_cost_.clear(); }
void CheckSelectedStrategy(const StrategyPtr &); void CheckSelectedStrategy(const StrategyPtr &);
Status InitSelectedStrategy(const StrategyPtr &s_strategy) { return Init(s_strategy); } Status InitSelectedStrategy(const StrategyPtr &s_strategy) { return Init(s_strategy, nullptr); }
void set_input_value(const std::vector<ValuePtr> &input_value) { input_value_ = input_value; } void set_input_value(const std::vector<ValuePtr> &input_value) { input_value_ = input_value; }
const std::vector<ValuePtr> &input_value() const { return input_value_; } const std::vector<ValuePtr> &input_value() const { return input_value_; }
void set_outputs_dtype(const TypePtr &dtype) { outputs_dtype_ = dtype; } void set_outputs_dtype(const TypePtr &dtype) { outputs_dtype_ = dtype; }
@ -161,6 +162,7 @@ class OperatorInfo {
bool is_alive() const { return is_alive_; } bool is_alive() const { return is_alive_; }
void SetNotAlive() { is_alive_ = false; } void SetNotAlive() { is_alive_ = false; }
StrategyPtr strategy() const { return strategy_; } StrategyPtr strategy() const { return strategy_; }
StrategyPtr out_strategy() const { return out_strategy_; }
void set_strategy(const StrategyPtr &strategy) { strategy_ = strategy; } void set_strategy(const StrategyPtr &strategy) { strategy_ = strategy; }
void set_refkey_parameter_name(std::string p_name) { refkey_parameter_name_ = std::move(p_name); } void set_refkey_parameter_name(std::string p_name) { refkey_parameter_name_ = std::move(p_name); }
const std::string &refkey_parameter_name() const { return refkey_parameter_name_; } const std::string &refkey_parameter_name() const { return refkey_parameter_name_; }
@ -199,14 +201,15 @@ class OperatorInfo {
virtual Status InferMirrorOps(); virtual Status InferMirrorOps();
virtual Status InferTensorInfo(); virtual Status InferTensorInfo();
virtual void InferReplaceOps() {} virtual void InferReplaceOps() {}
virtual Status CheckOutputStrategy(const StrategyPtr &out_strategy) { return SUCCESS; }
Status CheckStrategyValue(const StrategyPtr &strategy, const Shapes &inputs_shape); Status CheckStrategyValue(const StrategyPtr &strategy, const Shapes &inputs_shape);
void SetRepeatedCalcDevMatrix(); void SetRepeatedCalcDevMatrix();
void ResetTensorMapIfRepeatedCalc(); void ResetTensorMapIfRepeatedCalc();
Status CreateGroupByDim(size_t axis, std::vector<Group> *group); Status CreateGroupByDim(size_t axis, std::vector<Group> *group);
Status InferAttrs(); Status InferAttrs();
void ResetQueueMember(); void ResetQueueMember();
Status InitWithAutoRepeatCalc(const StrategyPtr &strategy); Status InitWithAutoRepeatCalc(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy);
Status InitForCostModelWithAutoRepeatCalc(const StrategyPtr &strategy); Status InitForCostModelWithAutoRepeatCalc(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy);
Status InferRepeatedCalcInfo(); Status InferRepeatedCalcInfo();
Status InferVirtualDivOps(); Status InferVirtualDivOps();
@ -232,6 +235,7 @@ class OperatorInfo {
int32_t stage_id_ = 0; int32_t stage_id_ = 0;
StrategyPtr strategy_; StrategyPtr strategy_;
StrategyPtr out_strategy_;
std::vector<TensorInfo> inputs_tensor_info_; std::vector<TensorInfo> inputs_tensor_info_;
std::vector<TensorInfo> outputs_tensor_info_; std::vector<TensorInfo> outputs_tensor_info_;
Shape dev_matrix_shape_; // if repeated calculation, it contains the repeated_calc_num_ Shape dev_matrix_shape_; // if repeated calculation, it contains the repeated_calc_num_

View File

@ -89,6 +89,7 @@ constexpr char AUTO_PARALLEL_RUN_ONCE_ONLY[] = "auto_parallel_run_once_only";
constexpr char SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY[] = "semi_auto_parallel_run_once_only"; constexpr char SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY[] = "semi_auto_parallel_run_once_only";
constexpr char CHECK_SET_STRATEGY_VALID_ONCE_ONLY[] = "check_set_strategy_valid_once_only"; constexpr char CHECK_SET_STRATEGY_VALID_ONCE_ONLY[] = "check_set_strategy_valid_once_only";
constexpr char IN_STRATEGY[] = "in_strategy"; constexpr char IN_STRATEGY[] = "in_strategy";
constexpr char OUT_STRATEGY[] = "out_strategy";
constexpr char STAGE_ATTR[] = "stage"; constexpr char STAGE_ATTR[] = "stage";
constexpr char GEN_STRATEGY[] = "gen_strategy"; constexpr char GEN_STRATEGY[] = "gen_strategy";
constexpr char REDUCE_OP_SUM[] = "sum"; constexpr char REDUCE_OP_SUM[] = "sum";

View File

@ -306,7 +306,7 @@ Status ReshapeInfo::InferDefaultLayout(const Shape &shape, TensorLayout *const l
return Status::SUCCESS; return Status::SUCCESS;
} }
Status ReshapeInfo::Init(const StrategyPtr &strategy) { Status ReshapeInfo::Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
auto reshape_skip_redis_iter = attrs_.find(SKIP_REDISTRIBUTION); auto reshape_skip_redis_iter = attrs_.find(SKIP_REDISTRIBUTION);
if (reshape_skip_redis_iter != attrs_.end()) { if (reshape_skip_redis_iter != attrs_.end()) {
MS_EXCEPTION_IF_NULL(reshape_skip_redis_iter->second); MS_EXCEPTION_IF_NULL(reshape_skip_redis_iter->second);
@ -319,8 +319,8 @@ Status ReshapeInfo::Init(const StrategyPtr &strategy) {
ResetQueueMember(); ResetQueueMember();
device_number(); device_number();
if (strategy) { if (in_strategy) {
if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Init failed."; MS_LOG(ERROR) << name_ << ": Init failed.";
return FAILED; return FAILED;
} }
@ -463,7 +463,7 @@ Status ReshapeInfo::GenetateStrategyCosts(const std::vector<std::shared_ptr<Stra
InferTensorInfoByLayout(); InferTensorInfoByLayout();
SetCostForReshape(reshape_stra); SetCostForReshape(reshape_stra);
} else if (next_stra_costs.empty()) { } else if (next_stra_costs.empty()) {
if (Init(nullptr) == FAILED) { if (Init(nullptr, nullptr) == FAILED) {
MS_LOG(ERROR) << "Failure:operator reshape init failed"; MS_LOG(ERROR) << "Failure:operator reshape init failed";
return FAILED; return FAILED;
} }

View File

@ -43,7 +43,7 @@ class ReshapeInfo : public OperatorInfo {
input_layout_set_flag_(false), input_layout_set_flag_(false),
output_layout_set_flag_(false) {} output_layout_set_flag_(false) {}
~ReshapeInfo() override = default; ~ReshapeInfo() override = default;
Status Init(const StrategyPtr &strategy) override; Status Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) override;
void SetInputLayout(const TensorLayout &input_layout) { void SetInputLayout(const TensorLayout &input_layout) {
input_layout_ = input_layout; input_layout_ = input_layout;
input_layout_set_flag_ = true; input_layout_set_flag_ = true;

View File

@ -126,20 +126,20 @@ Status VirtualDatasetInfo::InferTensorMap() {
Status VirtualDatasetInfo::GetAttrs() { return SUCCESS; } Status VirtualDatasetInfo::GetAttrs() { return SUCCESS; }
Status VirtualDatasetInfo::Init(const StrategyPtr &strategy) { Status VirtualDatasetInfo::Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
repeated_num_in_dev_matrix_right_ = false; repeated_num_in_dev_matrix_right_ = false;
if (ParallelContext::GetInstance()->dataset_repeat_dim_right()) { if (ParallelContext::GetInstance()->dataset_repeat_dim_right()) {
repeated_num_in_dev_matrix_right_ = true; repeated_num_in_dev_matrix_right_ = true;
} }
if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Init failed."; MS_LOG(ERROR) << name_ << ": Init failed.";
return FAILED; return FAILED;
} }
return SUCCESS; return SUCCESS;
} }
Status VirtualDatasetInfo::InitForCostModel(const StrategyPtr &strategy) { Status VirtualDatasetInfo::InitForCostModel(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) {
if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { if (InitForCostModelWithAutoRepeatCalc(in_strategy, out_strategy) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Init for cost model failed."; MS_LOG(ERROR) << name_ << ": Init for cost model failed.";
return FAILED; return FAILED;
} }

View File

@ -34,8 +34,8 @@ class VirtualDatasetInfo : public OperatorInfo {
const PrimitiveAttrs &attrs) const PrimitiveAttrs &attrs)
: OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<VirtualDatasetCost>()) {} : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<VirtualDatasetCost>()) {}
~VirtualDatasetInfo() override = default; ~VirtualDatasetInfo() override = default;
Status Init(const StrategyPtr &strategy) override; Status Init(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) override;
Status InitForCostModel(const StrategyPtr &strategy) override; Status InitForCostModel(const StrategyPtr &in_strategy, const StrategyPtr &out_strategy) override;
Status GenerateStrategies(int64_t stage_id) override; Status GenerateStrategies(int64_t stage_id) override;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override; std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;

View File

@ -331,14 +331,15 @@ OperatorInfoPtr PipelineTransformer::CreateOpInfo(const CNodePtr &cnode, int tup
op_info->set_input_value(input_value); op_info->set_input_value(input_value);
op_info->set_outputs_dtype(temp_node->Type()); op_info->set_outputs_dtype(temp_node->Type());
op_info->set_cnode(temp_node); op_info->set_cnode(temp_node);
StrategyPtr strategy = nullptr; StrategyPtr in_strategy = nullptr, out_strategy = nullptr;
if (!StrategyFound(attrs)) { if (!StrategyFound(attrs)) {
strategy = GenerateBatchParallelStrategy(op_info, prim); in_strategy = GenerateBatchParallelStrategy(op_info, prim);
} else { } else {
strategy = ExtractStrategy(attrs[IN_STRATEGY]); in_strategy = ExtractStrategy(attrs[IN_STRATEGY]);
out_strategy = ExtractStrategy(attrs[OUT_STRATEGY]);
} }
MS_EXCEPTION_IF_NULL(strategy); MS_EXCEPTION_IF_NULL(in_strategy);
if (op_info->Init(strategy) == FAILED) { if (op_info->Init(in_strategy, out_strategy) == FAILED) {
MS_LOG(EXCEPTION) << "operator: " << prim->name() << " init failed."; MS_LOG(EXCEPTION) << "operator: " << prim->name() << " init failed.";
} }
return op_info; return op_info;

View File

@ -1125,22 +1125,12 @@ std::string MirrorOpName() {
return mirror_op_name; return mirror_op_name;
} }
void InsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node) { static void DoInsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node,
MS_EXCEPTION_IF_NULL(node); size_t node_size) {
size_t node_size = node->inputs().size();
FuncGraphPtr func_graph = node->func_graph(); FuncGraphPtr func_graph = node->func_graph();
MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(func_graph);
FuncGraphManagerPtr manager = func_graph->manager(); FuncGraphManagerPtr manager = func_graph->manager();
MS_EXCEPTION_IF_NULL(manager); MS_EXCEPTION_IF_NULL(manager);
for (auto input : node->inputs()) {
if (HasAbstractMonad(input)) {
node_size--;
}
}
if (!CheckInsertMirrorOps(mirror_ops, node, node_size)) {
return;
}
for (size_t index = 1; index < node_size; ++index) { for (size_t index = 1; index < node_size; ++index) {
OperatorVector backward_op = mirror_ops[index - 1]; OperatorVector backward_op = mirror_ops[index - 1];
@ -1226,6 +1216,22 @@ void InsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, cons
} }
} }
void InsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
size_t node_size = node->inputs().size();
for (auto input : node->inputs()) {
if (HasAbstractMonad(input)) {
node_size--;
}
}
if (!CheckInsertMirrorOps(mirror_ops, node, node_size)) {
return;
}
DoInsertMirrorOps(root, mirror_ops, node, node_size);
}
void BackwardCommunication(const FuncGraphPtr &root, const OperatorInfoPtr &distribute_operator, const CNodePtr &node, void BackwardCommunication(const FuncGraphPtr &root, const OperatorInfoPtr &distribute_operator, const CNodePtr &node,
const std::vector<std::pair<CNodePtr, LossNodeInfo>> &sens_loss_pairs) { const std::vector<std::pair<CNodePtr, LossNodeInfo>> &sens_loss_pairs) {
MS_EXCEPTION_IF_NULL(distribute_operator); MS_EXCEPTION_IF_NULL(distribute_operator);
@ -1308,21 +1314,26 @@ OperatorInfoPtr NewOperatorInstance(const PrimitivePtr &prim, const PrimitiveAtt
} }
StrategyPtr ExtractStrategy(const ValuePtr &stra) { StrategyPtr ExtractStrategy(const ValuePtr &stra) {
ValueTuplePtr var = stra->cast<ValueTuplePtr>(); if (stra == nullptr) {
return nullptr;
}
auto var = stra->cast<ValueTuplePtr>();
if (var == nullptr) {
return nullptr;
}
StrategyPtr strategyPtr; StrategyPtr strategyPtr;
int64_t stage_id = g_device_manager->stage_id(); int64_t stage_id = g_device_manager->stage_id();
MS_LOG(INFO) << "Extract information: strategy " << stra->ToString(); MS_LOG(INFO) << "Extract information: strategy " << stra->ToString();
if (var == nullptr) {
MS_LOG(EXCEPTION) << "Strategy value is nullptr";
}
if (var->size() > 0) { if (var->size() > 0) {
std::vector<ValuePtr> elements = var->value(); std::vector<ValuePtr> elements = var->value();
Strategys strategy; Strategys strategy;
for (uint64_t index = 0; index < elements.size(); ++index) { for (uint64_t index = 0; index < elements.size(); ++index) {
Dimensions dim; Dimensions dim;
if (elements[index]->isa<ValueSequeue>()) { if (elements[index]->isa<ValueSequeue>()) {
ValueTuplePtr value_tuple = elements[index]->cast<ValueTuplePtr>(); auto value_tuple = elements[index]->cast<ValueTuplePtr>();
std::vector<ValuePtr> value_vector = value_tuple->value(); std::vector<ValuePtr> value_vector = value_tuple->value();
(void)std::transform(value_vector.begin(), value_vector.end(), std::back_inserter(dim), (void)std::transform(value_vector.begin(), value_vector.end(), std::back_inserter(dim),
[](const ValuePtr &value) { return static_cast<int64_t>(GetValue<int64_t>(value)); }); [](const ValuePtr &value) { return static_cast<int64_t>(GetValue<int64_t>(value)); });
@ -1920,7 +1931,10 @@ void SetStridedSliceSplitStrategy(const std::vector<AnfNodePtr> &all_nodes) {
} }
} }
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) { static void ExtractStrategyAndInit(const CNodePtr &cnode, const PrimitivePtr &prim, const OperatorInfoPtr &op_info) {
StrategyPtr in_strategy = nullptr, out_strategy = nullptr;
auto attrs = prim->attrs();
// load strategy map from checkpoint // load strategy map from checkpoint
StrategyMap stra_map; StrategyMap stra_map;
if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() && if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
@ -1928,6 +1942,35 @@ void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
MS_LOG(EXCEPTION) << "Load strategy checkpoint failed"; MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
} }
std::string strategy_key_name = "";
auto param_names = NodeParameterName(cnode, -1, 0);
if (!param_names.empty()) {
strategy_key_name = prim->name() + "_" + param_names[0].first;
}
bool load_strategy_from_ckpt =
StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map.find(strategy_key_name) != stra_map.end();
if ((!StrategyFound(attrs) && !load_strategy_from_ckpt) && !cnode->HasPrimalAttr(IN_STRATEGY)) {
MS_LOG(INFO) << "ExtractInformation: the strategy of node " << cnode->ToString() << " prim " << prim->name()
<< " is empty, using batch parallel";
in_strategy = GenerateBatchParallelStrategy(op_info, prim);
} else if (cnode->HasPrimalAttr(IN_STRATEGY)) {
in_strategy = ExtractStrategy(cnode->GetPrimalAttr(IN_STRATEGY));
out_strategy = ExtractStrategy(cnode->GetPrimalAttr(OUT_STRATEGY));
} else if (StrategyFound(attrs)) {
in_strategy = ExtractStrategy(attrs[IN_STRATEGY]);
out_strategy = ExtractStrategy(attrs[OUT_STRATEGY]);
} else {
in_strategy = stra_map[strategy_key_name];
}
MS_EXCEPTION_IF_NULL(in_strategy);
if (op_info->Init(in_strategy, out_strategy) == FAILED) {
MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed"
<< " trace: " << trace::DumpSourceLines(cnode);
}
}
void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
SetStridedSliceSplitStrategy(all_nodes); SetStridedSliceSplitStrategy(all_nodes);
for (auto &node : all_nodes) { for (auto &node : all_nodes) {
auto cnode = node->cast<CNodePtr>(); auto cnode = node->cast<CNodePtr>();
@ -1958,7 +2001,7 @@ void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
} }
input_value.emplace_back(nullptr); input_value.emplace_back(nullptr);
} }
StrategyPtr strategyPtr = nullptr;
(*operator_).set_input_value(input_value); (*operator_).set_input_value(input_value);
(*operator_).set_outputs_dtype(cnode->Type()); (*operator_).set_outputs_dtype(cnode->Type());
(*operator_).set_cnode(cnode); (*operator_).set_cnode(cnode);
@ -1966,32 +2009,8 @@ void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
cnode->set_user_data<OperatorInfo>(operator_); cnode->set_user_data<OperatorInfo>(operator_);
continue; continue;
} }
// load strategy checkpoint
// key of strategy map
std::string strategy_key_name = "";
auto param_names = NodeParameterName(cnode, -1, 0);
if (!param_names.empty()) {
strategy_key_name = prim->name() + "_" + param_names[0].first;
}
bool load_strategy_from_ckpt =
StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map.find(strategy_key_name) != stra_map.end();
if ((!StrategyFound(attrs) && !load_strategy_from_ckpt) && !cnode->HasPrimalAttr(IN_STRATEGY)) {
MS_LOG(INFO) << "ExtractInformation: the strategy of node " << node->ToString() << " prim " << prim->name()
<< " is empty, using batch parallel";
strategyPtr = GenerateBatchParallelStrategy(operator_, prim);
} else if (cnode->HasPrimalAttr(IN_STRATEGY)) {
strategyPtr = ExtractStrategy(cnode->GetPrimalAttr(IN_STRATEGY));
} else if (StrategyFound(attrs)) {
strategyPtr = ExtractStrategy(attrs[IN_STRATEGY]);
} else {
strategyPtr = stra_map[strategy_key_name];
}
MS_EXCEPTION_IF_NULL(strategyPtr); ExtractStrategyAndInit(cnode, prim, operator_);
if (operator_->Init(strategyPtr) == FAILED) {
MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed"
<< " trace: " << trace::DumpSourceLines(cnode);
}
cnode->set_user_data<OperatorInfo>(operator_); cnode->set_user_data<OperatorInfo>(operator_);
} }
} }
@ -2268,7 +2287,7 @@ void ReshapeInit(const std::vector<AnfNodePtr> &all_nodes) {
auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info); auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
reshape_info_ptr->SetOutputLayout(*prev_layout_ptr); reshape_info_ptr->SetOutputLayout(*prev_layout_ptr);
} }
if (operator_info->Init(nullptr) == FAILED) { if (operator_info->Init(nullptr, nullptr) == FAILED) {
MS_LOG(EXCEPTION) << "Failure:operator " << prim->ToString() << " init failed"; MS_LOG(EXCEPTION) << "Failure:operator " << prim->ToString() << " init failed";
} }
} }
@ -3102,6 +3121,22 @@ bool IsInsertVirtualOutput(const FuncGraphPtr &root) {
current_stage == split_stage_num - 1); current_stage == split_stage_num - 1);
} }
static void HandleGroupInfo() {
auto group_info = g_device_manager->group_info();
if (StrategyCheckpoint::GetInstance().group_info_save_on() &&
StrategyCheckpoint::GetInstance().SaveGroupInfo(group_info) != SUCCESS) {
MS_LOG(EXCEPTION) << "Save group info failed";
}
}
static void PipelinePostProcess(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
if (pipeline_stages > 1) {
AddVirtualAssignAdd(root);
HandleReceiveParam(root, all_nodes);
}
}
bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer) { bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer) {
#if ((defined ENABLE_CPU) && (!defined _WIN32)) #if ((defined ENABLE_CPU) && (!defined _WIN32))
if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) { if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
@ -3202,16 +3237,9 @@ bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer)
// ForwardCommunication BackwardCommunication TensorRedistribution // ForwardCommunication BackwardCommunication TensorRedistribution
ParallelCommunication(root, all_nodes, manager); ParallelCommunication(root, all_nodes, manager);
if (pipeline_stages > 1) { PipelinePostProcess(root, all_nodes);
AddVirtualAssignAdd(root);
HandleReceiveParam(root, all_nodes);
}
auto group_info = g_device_manager->group_info(); HandleGroupInfo();
if (StrategyCheckpoint::GetInstance().group_info_save_on() &&
StrategyCheckpoint::GetInstance().SaveGroupInfo(group_info) != SUCCESS) {
MS_LOG(EXCEPTION) << "Save group info failed";
}
// handle full split parammeters in grad accumulation, do not contain optimizer-sharding's parameter // handle full split parammeters in grad accumulation, do not contain optimizer-sharding's parameter
HandleFullySplitParameters(root); HandleFullySplitParameters(root);

View File

@ -67,7 +67,7 @@ TEST_F(TestActivationInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
activation->Init(strategy); activation->Init(strategy, nullptr);
Shape dev_matrix_shape = activation->dev_matrix_shape(); Shape dev_matrix_shape = activation->dev_matrix_shape();
Shape expect = {2, 4, 8, 16}; Shape expect = {2, 4, 8, 16};
@ -78,7 +78,7 @@ TEST_F(TestActivationInfo, InferSliceShape1) {
Strategys str = {{2, 4, 8, 16}}; Strategys str = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
activation->Init(strategy); activation->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = activation->inputs_tensor_info(); std::vector<TensorInfo> inputs = activation->inputs_tensor_info();
std::vector<TensorInfo> outputs = activation->outputs_tensor_info(); std::vector<TensorInfo> outputs = activation->outputs_tensor_info();
@ -99,7 +99,7 @@ TEST_F(TestActivationInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 8, 16}}; Strategys str = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
activation->Init(strategy); activation->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = activation->inputs_tensor_info(); std::vector<TensorInfo> inputs = activation->inputs_tensor_info();
std::vector<TensorInfo> outputs = activation->outputs_tensor_info(); std::vector<TensorInfo> outputs = activation->outputs_tensor_info();
@ -120,7 +120,7 @@ TEST_F(TestActivationInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
activation->Init(strategy); activation->Init(strategy, nullptr);
OperatorVector forward_op = activation->forward_op(); OperatorVector forward_op = activation->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -131,7 +131,7 @@ TEST_F(TestActivationInfo, GetMirrorOPs1) {
Strategys inputs = {{1, 4, 8, 16}}; Strategys inputs = {{1, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
activation->Init(strategy); activation->Init(strategy, nullptr);
MirrorOps mirror_ops = activation->mirror_ops(); MirrorOps mirror_ops = activation->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(0); OperatorVector mirror_op = mirror_ops.at(0);
@ -151,7 +151,7 @@ TEST_F(TestActivationInfo, GetMirrorOPs2) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
activation->Init(strategy); activation->Init(strategy, nullptr);
MirrorOps mirror_ops = activation->mirror_ops(); MirrorOps mirror_ops = activation->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -164,7 +164,7 @@ TEST_F(TestActivationInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = activation->Init(strategy); Status ret = activation->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -173,7 +173,7 @@ TEST_F(TestActivationInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = activation->Init(strategy); Status ret = activation->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }

View File

@ -81,7 +81,7 @@ TEST_F(TestActivation, test_activation_strategies) {
ASSERT_NE(sp, nullptr); ASSERT_NE(sp, nullptr);
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
act_ptr_->InitForCostModel(sp); act_ptr_->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = act_ptr_->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = act_ptr_->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = act_ptr_->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = act_ptr_->outputs_tensor_info();
ASSERT_DOUBLE_EQ(act_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), ASSERT_DOUBLE_EQ(act_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()),
@ -106,7 +106,7 @@ TEST_F(TestActivation, test_softmax_strategies) {
Dimensions input0_stra = stra[0]; Dimensions input0_stra = stra[0];
ASSERT_GT(input0_stra.size(), 2); ASSERT_GT(input0_stra.size(), 2);
ASSERT_EQ(input0_stra[2], 1); ASSERT_EQ(input0_stra[2], 1);
soft_ptr_->InitForCostModel(sp); soft_ptr_->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = soft_ptr_->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = soft_ptr_->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = soft_ptr_->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = soft_ptr_->outputs_tensor_info();
ASSERT_DOUBLE_EQ(soft_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), ASSERT_DOUBLE_EQ(soft_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()),

View File

@ -66,7 +66,7 @@ TEST_F(TestGeLUInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
gelu->Init(strategy); gelu->Init(strategy, nullptr);
Shape dev_matrix_shape = gelu->dev_matrix_shape(); Shape dev_matrix_shape = gelu->dev_matrix_shape();
Shape expect = {2, 4, 1, 16}; Shape expect = {2, 4, 1, 16};
@ -77,7 +77,7 @@ TEST_F(TestGeLUInfo, InferSliceShape1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
gelu->Init(strategy); gelu->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = gelu->inputs_tensor_info(); std::vector<TensorInfo> inputs = gelu->inputs_tensor_info();
std::vector<TensorInfo> outputs = gelu->outputs_tensor_info(); std::vector<TensorInfo> outputs = gelu->outputs_tensor_info();
@ -98,7 +98,7 @@ TEST_F(TestGeLUInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
gelu->Init(strategy); gelu->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = gelu->inputs_tensor_info(); std::vector<TensorInfo> inputs = gelu->inputs_tensor_info();
std::vector<TensorInfo> outputs = gelu->outputs_tensor_info(); std::vector<TensorInfo> outputs = gelu->outputs_tensor_info();
@ -119,7 +119,7 @@ TEST_F(TestGeLUInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
gelu->Init(strategy); gelu->Init(strategy, nullptr);
OperatorVector forward_op = gelu->forward_op(); OperatorVector forward_op = gelu->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -130,7 +130,7 @@ TEST_F(TestGeLUInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
gelu->Init(strategy); gelu->Init(strategy, nullptr);
MirrorOps mirror_ops = gelu->mirror_ops(); MirrorOps mirror_ops = gelu->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -143,7 +143,7 @@ TEST_F(TestGeLUInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = gelu->Init(strategy); Status ret = gelu->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -152,7 +152,7 @@ TEST_F(TestGeLUInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = gelu->Init(strategy); Status ret = gelu->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -161,7 +161,7 @@ TEST_F(TestGeLUInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = gelu->Init(strategy); Status ret = gelu->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }

View File

@ -67,7 +67,7 @@ TEST_F(TestL2NormalizeInfo, InferDevMatrixShape1) {
Strategys inputs = {{4, 1, 8}}; Strategys inputs = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
norm->Init(strategy); norm->Init(strategy, nullptr);
Shape dev_matrix_shape = norm->dev_matrix_shape(); Shape dev_matrix_shape = norm->dev_matrix_shape();
Shape expect = {4, 1, 8}; Shape expect = {4, 1, 8};
@ -78,7 +78,7 @@ TEST_F(TestL2NormalizeInfo, InferSliceShape1) {
Strategys str = {{4, 1, 8}}; Strategys str = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
norm->Init(strategy); norm->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = norm->inputs_tensor_info(); std::vector<TensorInfo> inputs = norm->inputs_tensor_info();
std::vector<TensorInfo> outputs = norm->outputs_tensor_info(); std::vector<TensorInfo> outputs = norm->outputs_tensor_info();
@ -99,7 +99,7 @@ TEST_F(TestL2NormalizeInfo, GetTensorLayout1) {
Strategys str = {{4, 1, 8}}; Strategys str = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
norm->Init(strategy); norm->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = norm->inputs_tensor_info(); std::vector<TensorInfo> inputs = norm->inputs_tensor_info();
std::vector<TensorInfo> outputs = norm->outputs_tensor_info(); std::vector<TensorInfo> outputs = norm->outputs_tensor_info();
@ -120,7 +120,7 @@ TEST_F(TestL2NormalizeInfo, GetForwardOp1) {
Strategys inputs = {{4, 1, 8}}; Strategys inputs = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
norm->Init(strategy); norm->Init(strategy, nullptr);
OperatorVector forward_op = norm->forward_op(); OperatorVector forward_op = norm->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -131,7 +131,7 @@ TEST_F(TestL2NormalizeInfo, GetMirrorOPs1) {
Strategys inputs = {{4, 1, 8}}; Strategys inputs = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
norm->Init(strategy); norm->Init(strategy, nullptr);
MirrorOps mirror_ops = norm->mirror_ops(); MirrorOps mirror_ops = norm->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -143,7 +143,7 @@ TEST_F(TestL2NormalizeInfo, CheckStrategy1) {
Strategys inputs = {{4, 1, 8}, {4, 1, 8}}; Strategys inputs = {{4, 1, 8}, {4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = norm->Init(strategy); Status ret = norm->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -151,7 +151,7 @@ TEST_F(TestL2NormalizeInfo, CheckStrategy2) {
Strategys inputs = {{4, 2, 3}}; Strategys inputs = {{4, 2, 3}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = norm->Init(strategy); Status ret = norm->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -159,7 +159,7 @@ TEST_F(TestL2NormalizeInfo, CheckStrategy3) {
Strategys inputs = {{4, 2, 3, 4}}; Strategys inputs = {{4, 2, 3, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = norm->Init(strategy); Status ret = norm->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -167,7 +167,7 @@ TEST_F(TestL2NormalizeInfo, CheckStrategy4) {
Strategys inputs = {{4, 1, 8}}; Strategys inputs = {{4, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = norm->Init(strategy); Status ret = norm->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
@ -175,7 +175,7 @@ TEST_F(TestL2NormalizeInfo, mirror_ops) {
Strategys inputs = {{2, 1, 8}}; Strategys inputs = {{2, 1, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
norm->Init(strategy); norm->Init(strategy, nullptr);
MirrorOps mirror_ops = norm->mirror_ops(); MirrorOps mirror_ops = norm->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(0); OperatorVector mirror_op = mirror_ops.at(0);

View File

@ -67,7 +67,7 @@ TEST_F(TestLogSoftmaxInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
Shape dev_matrix_shape = log_softmax->dev_matrix_shape(); Shape dev_matrix_shape = log_softmax->dev_matrix_shape();
Shape expect = {2, 4, 1, 16}; Shape expect = {2, 4, 1, 16};
@ -78,7 +78,7 @@ TEST_F(TestLogSoftmaxInfo, InferSliceShape1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = log_softmax->inputs_tensor_info(); std::vector<TensorInfo> inputs = log_softmax->inputs_tensor_info();
std::vector<TensorInfo> outputs = log_softmax->outputs_tensor_info(); std::vector<TensorInfo> outputs = log_softmax->outputs_tensor_info();
@ -99,7 +99,7 @@ TEST_F(TestLogSoftmaxInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = log_softmax->inputs_tensor_info(); std::vector<TensorInfo> inputs = log_softmax->inputs_tensor_info();
std::vector<TensorInfo> outputs = log_softmax->outputs_tensor_info(); std::vector<TensorInfo> outputs = log_softmax->outputs_tensor_info();
@ -120,7 +120,7 @@ TEST_F(TestLogSoftmaxInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
OperatorVector forward_op = log_softmax->forward_op(); OperatorVector forward_op = log_softmax->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -131,7 +131,7 @@ TEST_F(TestLogSoftmaxInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
MirrorOps mirror_ops = log_softmax->mirror_ops(); MirrorOps mirror_ops = log_softmax->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -144,7 +144,7 @@ TEST_F(TestLogSoftmaxInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = log_softmax->Init(strategy); Status ret = log_softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -153,7 +153,7 @@ TEST_F(TestLogSoftmaxInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = log_softmax->Init(strategy); Status ret = log_softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -162,7 +162,7 @@ TEST_F(TestLogSoftmaxInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = log_softmax->Init(strategy); Status ret = log_softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -170,7 +170,7 @@ TEST_F(TestLogSoftmaxInfo, GetDeviceList1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
log_softmax->Init(strategy); log_softmax->Init(strategy, nullptr);
RankList dev_list = log_softmax->stage_device_list(); RankList dev_list = log_softmax->stage_device_list();
ASSERT_EQ(dev_list.size(), 128); ASSERT_EQ(dev_list.size(), 128);
} }

View File

@ -97,7 +97,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul1->dev_matrix_shape(); Shape dev_matrix_shape = matmul1->dev_matrix_shape();
Shape expect = {2, 4, 8, 16, 1}; Shape expect = {2, 4, 8, 16, 1};
@ -108,7 +108,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape2) {
Strategys inputs = {{2, 4, 8, 8}, {2, 4, 8, 2}}; Strategys inputs = {{2, 4, 8, 8}, {2, 4, 8, 2}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul1->dev_matrix_shape(); Shape dev_matrix_shape = matmul1->dev_matrix_shape();
Shape expect = {2, 4, 8, 8, 2}; Shape expect = {2, 4, 8, 8, 2};
@ -120,7 +120,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape3) {
Strategys inputs = {{2, 4, 8, 16}, {1, 16}}; Strategys inputs = {{2, 4, 8, 16}, {1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul2->Init(strategy); matmul2->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul2->dev_matrix_shape(); Shape dev_matrix_shape = matmul2->dev_matrix_shape();
Shape expect = {2, 4, 8, 16, 1}; Shape expect = {2, 4, 8, 16, 1};
@ -132,7 +132,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape4) {
Strategys inputs = {{2, 4, 8, 8}, {2, 8}}; Strategys inputs = {{2, 4, 8, 8}, {2, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul2->Init(strategy); matmul2->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul2->dev_matrix_shape(); Shape dev_matrix_shape = matmul2->dev_matrix_shape();
Shape expect = {2, 4, 8, 8, 2}; Shape expect = {2, 4, 8, 8, 2};
@ -144,7 +144,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape5) {
Strategys inputs = {{8, 16}, {2, 4, 1, 16}}; Strategys inputs = {{8, 16}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul3->dev_matrix_shape(); Shape dev_matrix_shape = matmul3->dev_matrix_shape();
Shape expect = {2, 4, 8, 16, 1}; Shape expect = {2, 4, 8, 16, 1};
@ -156,7 +156,7 @@ TEST_F(TestMatmulInfo, InferDevMatrixShape6) {
Strategys inputs = {{8, 8}, {2, 4, 2, 8}}; Strategys inputs = {{8, 8}, {2, 4, 2, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
Shape dev_matrix_shape = matmul3->dev_matrix_shape(); Shape dev_matrix_shape = matmul3->dev_matrix_shape();
Shape expect = {2, 4, 8, 8, 2}; Shape expect = {2, 4, 8, 8, 2};
@ -167,7 +167,7 @@ TEST_F(TestMatmulInfo, InferTensorMap1) {
Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul1->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul1->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul1->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul1->outputs_tensor_info();
@ -193,7 +193,7 @@ TEST_F(TestMatmulInfo, InferTensorMap2) {
Strategys str = {{2, 4, 8, 16}, {1, 16}}; Strategys str = {{2, 4, 8, 16}, {1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul2->Init(strategy); matmul2->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul2->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul2->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul2->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul2->outputs_tensor_info();
@ -219,7 +219,7 @@ TEST_F(TestMatmulInfo, InferTensorMap3) {
Strategys str = {{8, 16}, {2, 4, 1, 16}}; Strategys str = {{8, 16}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info();
@ -244,7 +244,7 @@ TEST_F(TestMatmulInfo, InferSliceShape1) {
Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul1->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul1->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul1->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul1->outputs_tensor_info();
@ -270,7 +270,7 @@ TEST_F(TestMatmulInfo, InferSliceShape2) {
Strategys str = {{2, 4, 8, 16}, {1, 16}}; Strategys str = {{2, 4, 8, 16}, {1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul2->Init(strategy); matmul2->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul2->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul2->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul2->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul2->outputs_tensor_info();
@ -296,7 +296,7 @@ TEST_F(TestMatmulInfo, InferSliceShape3) {
Strategys str = {{8, 16}, {2, 4, 1, 16}}; Strategys str = {{8, 16}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info();
@ -322,7 +322,7 @@ TEST_F(TestMatmulInfo, GetTensorLayout3) {
Strategys str = {{8, 16}, {2, 4, 1, 16}}; Strategys str = {{8, 16}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info(); std::vector<TensorInfo> inputs = matmul3->inputs_tensor_info();
std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info(); std::vector<TensorInfo> outputs = matmul3->outputs_tensor_info();
@ -347,7 +347,7 @@ TEST_F(TestMatmulInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
OperatorVector forward_op = matmul1->forward_op(); OperatorVector forward_op = matmul1->forward_op();
OperatorArgs operator_args = forward_op.at(0).second; OperatorArgs operator_args = forward_op.at(0).second;
@ -373,7 +373,7 @@ TEST_F(TestMatmulInfo, GetForwardOp2) {
Strategys inputs = {{2, 4, 8, 1}, {2, 4, 1, 16}}; Strategys inputs = {{2, 4, 8, 1}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
OperatorVector forward_op = matmul1->forward_op(); OperatorVector forward_op = matmul1->forward_op();
ASSERT_EQ(forward_op.size(), 0); ASSERT_EQ(forward_op.size(), 0);
@ -383,7 +383,7 @@ TEST_F(TestMatmulInfo, GetVirtualDivOp1) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
OperatorVector virtual_div_op = matmul1->virtual_div_op(); OperatorVector virtual_div_op = matmul1->virtual_div_op();
OperatorArgs operator_args = virtual_div_op.at(0).second; OperatorArgs operator_args = virtual_div_op.at(0).second;
@ -402,7 +402,7 @@ TEST_F(TestMatmulInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
MirrorOps mirror_ops = matmul1->mirror_ops(); MirrorOps mirror_ops = matmul1->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
@ -422,7 +422,7 @@ TEST_F(TestMatmulInfo, GetMirrorOPs2) {
Strategys inputs = {{2, 4, 1, 16}, {8, 16}}; Strategys inputs = {{2, 4, 1, 16}, {8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul2->Init(strategy); matmul2->Init(strategy, nullptr);
MirrorOps mirror_ops = matmul2->mirror_ops(); MirrorOps mirror_ops = matmul2->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
@ -442,7 +442,7 @@ TEST_F(TestMatmulInfo, GetMirrorOPs3) {
Strategys inputs = {{8, 16}, {2, 4, 1, 16}}; Strategys inputs = {{8, 16}, {2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul3->Init(strategy); matmul3->Init(strategy, nullptr);
MirrorOps mirror_ops = matmul3->mirror_ops(); MirrorOps mirror_ops = matmul3->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
@ -460,7 +460,7 @@ TEST_F(TestMatmulInfo, GetMirrorOPs4) {
Strategys inputs = {{2, 4, 1, 16}, {2, 4, 16, 8}}; Strategys inputs = {{2, 4, 1, 16}, {2, 4, 16, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
MirrorOps mirror_ops = matmul1->mirror_ops(); MirrorOps mirror_ops = matmul1->mirror_ops();
ASSERT_EQ(mirror_ops.size(), 2); ASSERT_EQ(mirror_ops.size(), 2);
@ -471,8 +471,8 @@ TEST_F(TestMatmulInfo, InitTwice) {
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
// init twice // init twice
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
matmul1->Init(strategy); matmul1->Init(strategy, nullptr);
MirrorOps mirror_ops = matmul1->mirror_ops(); MirrorOps mirror_ops = matmul1->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
@ -492,7 +492,7 @@ TEST_F(TestMatmulInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -501,7 +501,7 @@ TEST_F(TestMatmulInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8, 16}, {4, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -510,7 +510,7 @@ TEST_F(TestMatmulInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 8, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -519,7 +519,7 @@ TEST_F(TestMatmulInfo, CheckStrategy4) {
Strategys inputs = {{2, 4, 8, 16}, {2, 3, 16, 1}}; Strategys inputs = {{2, 4, 8, 16}, {2, 3, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -528,7 +528,7 @@ TEST_F(TestMatmulInfo, CheckStrategy5) {
Strategys inputs = {{0, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{0, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -537,7 +537,7 @@ TEST_F(TestMatmulInfo, CheckStrategy6) {
Strategys inputs = {{-1, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{-1, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -546,7 +546,7 @@ TEST_F(TestMatmulInfo, CheckStrategy7) {
Strategys inputs = {{4, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{4, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul1->Init(strategy); Status ret = matmul1->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -555,7 +555,7 @@ TEST_F(TestMatmulInfo, InitFailed) {
Strategys inputs = {{4, 4, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{4, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = matmul4->Init(strategy); Status ret = matmul4->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -566,7 +566,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies1) {
for (const auto& swc : sc) { for (const auto& swc : sc) {
StrategyPtr sp = swc->strategy_ptr; StrategyPtr sp = swc->strategy_ptr;
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
matmul1->InitForCostModel(sp); matmul1->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = matmul1->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = matmul1->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = matmul1->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = matmul1->outputs_tensor_info();
ASSERT_DOUBLE_EQ(matmul1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), ASSERT_DOUBLE_EQ(matmul1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()),
@ -582,7 +582,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies2) {
for (const auto& swc : sc) { for (const auto& swc : sc) {
StrategyPtr sp = swc->strategy_ptr; StrategyPtr sp = swc->strategy_ptr;
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
matmul3->InitForCostModel(sp); matmul3->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = matmul3->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = matmul3->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = matmul3->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = matmul3->outputs_tensor_info();

View File

@ -67,7 +67,7 @@ TEST_F(TestOneHotInfo, InferDevMatrixShape1) {
Strategys inputs = {{8, 1}, {}, {}}; Strategys inputs = {{8, 1}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info->dev_matrix_shape();
@ -79,7 +79,7 @@ TEST_F(TestOneHotInfo, InferDevMatrixShape2) {
Strategys inputs = {{4, 1}, {}, {}}; Strategys inputs = {{4, 1}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info->dev_matrix_shape();
@ -91,7 +91,7 @@ TEST_F(TestOneHotInfo, InferDevMatrixShape3) {
Strategys inputs = {{4, 2}, {}, {}}; Strategys inputs = {{4, 2}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info->dev_matrix_shape();
@ -103,7 +103,7 @@ TEST_F(TestOneHotInfo, InferTensorMap2) {
Strategys str = {{8, 1}, {}, {}}; Strategys str = {{8, 1}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info();
@ -125,7 +125,7 @@ TEST_F(TestOneHotInfo, InferSliceShape1) {
Strategys str = {{8, 1}, {}, {}}; Strategys str = {{8, 1}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info();
@ -147,7 +147,7 @@ TEST_F(TestOneHotInfo, InferSliceShape2) {
Strategys str = {{4, 2}, {}, {}}; Strategys str = {{4, 2}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info();
@ -169,7 +169,7 @@ TEST_F(TestOneHotInfo, InferSliceShape3) {
Strategys str = {{2, 2}, {}, {}}; Strategys str = {{2, 2}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info->outputs_tensor_info();
@ -191,7 +191,7 @@ TEST_F(TestOneHotInfo, GetMirrorOPs1) {
Strategys inputs = {{8, 1}, {}, {}}; Strategys inputs = {{8, 1}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info->Init(strategy); Status status = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
MirrorOps mirror_ops = onehot_info->mirror_ops(); MirrorOps mirror_ops = onehot_info->mirror_ops();
@ -202,7 +202,7 @@ TEST_F(TestOneHotInfo, CheckStrategy1) {
Strategys inputs = {{16}, {}, {}}; Strategys inputs = {{16}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = onehot_info->Init(strategy); Status ret = onehot_info->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
} // namespace parallel } // namespace parallel

View File

@ -67,7 +67,7 @@ TEST_F(TestOneHotInfo2, InferDevMatrixShape1) {
Strategys inputs = {{1, 8}, {}, {}}; Strategys inputs = {{1, 8}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info2->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info2->dev_matrix_shape();
@ -79,7 +79,7 @@ TEST_F(TestOneHotInfo2, InferDevMatrixShape2) {
Strategys inputs = {{1, 4}, {}, {}}; Strategys inputs = {{1, 4}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info2->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info2->dev_matrix_shape();
@ -91,7 +91,7 @@ TEST_F(TestOneHotInfo2, InferDevMatrixShape3) {
Strategys inputs = {{2, 4}, {}, {}}; Strategys inputs = {{2, 4}, {}, {}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
Shape dev_matrix_shape = onehot_info2->dev_matrix_shape(); Shape dev_matrix_shape = onehot_info2->dev_matrix_shape();
@ -103,7 +103,7 @@ TEST_F(TestOneHotInfo2, InferTensorMap2) {
Strategys str = {{1, 8}, {}, {}}; Strategys str = {{1, 8}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info();
@ -125,7 +125,7 @@ TEST_F(TestOneHotInfo2, InferSliceShape1) {
Strategys str = {{1, 8}, {}, {}}; Strategys str = {{1, 8}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info();
@ -147,7 +147,7 @@ TEST_F(TestOneHotInfo2, InferSliceShape2) {
Strategys str = {{2, 4}, {}, {}}; Strategys str = {{2, 4}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info();
@ -169,7 +169,7 @@ TEST_F(TestOneHotInfo2, InferSliceShape3) {
Strategys str = {{2, 2}, {}, {}}; Strategys str = {{2, 2}, {}, {}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
Status status = onehot_info2->Init(strategy); Status status = onehot_info2->Init(strategy, nullptr);
ASSERT_EQ(status, SUCCESS); ASSERT_EQ(status, SUCCESS);
std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info(); std::vector<TensorInfo> inputs = onehot_info2->inputs_tensor_info();
std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info(); std::vector<TensorInfo> outputs = onehot_info2->outputs_tensor_info();

View File

@ -66,7 +66,7 @@ TEST_F(TestPowInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
pow->Init(strategy); pow->Init(strategy, nullptr);
Shape dev_matrix_shape = pow->dev_matrix_shape(); Shape dev_matrix_shape = pow->dev_matrix_shape();
Shape expect = {2, 4, 8}; Shape expect = {2, 4, 8};
@ -77,7 +77,7 @@ TEST_F(TestPowInfo, InferSliceShape1) {
Strategys str = {{2, 4, 8}, {2, 4, 8}}; Strategys str = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
pow->Init(strategy); pow->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = pow->inputs_tensor_info(); std::vector<TensorInfo> inputs = pow->inputs_tensor_info();
std::vector<TensorInfo> outputs = pow->outputs_tensor_info(); std::vector<TensorInfo> outputs = pow->outputs_tensor_info();
@ -98,7 +98,7 @@ TEST_F(TestPowInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 8}, {2, 4, 8}}; Strategys str = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
pow->Init(strategy); pow->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = pow->inputs_tensor_info(); std::vector<TensorInfo> inputs = pow->inputs_tensor_info();
std::vector<TensorInfo> outputs = pow->outputs_tensor_info(); std::vector<TensorInfo> outputs = pow->outputs_tensor_info();
@ -119,7 +119,7 @@ TEST_F(TestPowInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
pow->Init(strategy); pow->Init(strategy, nullptr);
OperatorVector forward_op = pow->forward_op(); OperatorVector forward_op = pow->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -130,7 +130,7 @@ TEST_F(TestPowInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
pow->Init(strategy); pow->Init(strategy, nullptr);
MirrorOps mirror_ops = pow->mirror_ops(); MirrorOps mirror_ops = pow->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -142,7 +142,7 @@ TEST_F(TestPowInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8}, {2, 4, 8}}; Strategys inputs = {{2, 2, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = pow->Init(strategy); Status ret = pow->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -150,7 +150,7 @@ TEST_F(TestPowInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8, 16}, {2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}, {2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = pow->Init(strategy); Status ret = pow->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -158,7 +158,7 @@ TEST_F(TestPowInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = pow->Init(strategy); Status ret = pow->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }

View File

@ -67,7 +67,7 @@ TEST_F(TestPReLUInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 1, 8, 16}, {1}}; Strategys inputs = {{2, 1, 8, 16}, {1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
prelu->Init(strategy); prelu->Init(strategy, nullptr);
Shape dev_matrix_shape = prelu->dev_matrix_shape(); Shape dev_matrix_shape = prelu->dev_matrix_shape();
Shape expect = {2, 1, 8, 16, 4}; Shape expect = {2, 1, 8, 16, 4};
@ -78,7 +78,7 @@ TEST_F(TestPReLUInfo, InferSliceShape1) {
Strategys str = {{2, 1, 8, 16}, {1}}; Strategys str = {{2, 1, 8, 16}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu->Init(strategy); prelu->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = prelu->inputs_tensor_info(); std::vector<TensorInfo> inputs = prelu->inputs_tensor_info();
std::vector<TensorInfo> outputs = prelu->outputs_tensor_info(); std::vector<TensorInfo> outputs = prelu->outputs_tensor_info();
@ -101,7 +101,7 @@ TEST_F(TestPReLUInfo, GetTensorLayout1) {
Strategys str = {{2, 1, 8, 16}, {1}}; Strategys str = {{2, 1, 8, 16}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu->Init(strategy); prelu->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = prelu->inputs_tensor_info(); std::vector<TensorInfo> inputs = prelu->inputs_tensor_info();
std::vector<TensorInfo> outputs = prelu->outputs_tensor_info(); std::vector<TensorInfo> outputs = prelu->outputs_tensor_info();
@ -124,7 +124,7 @@ TEST_F(TestPReLUInfo, GetTensorLayout1) {
TEST_F(TestPReLUInfo, GetMirrorOPs1) { TEST_F(TestPReLUInfo, GetMirrorOPs1) {
Strategys str = {{2, 1, 2, 2}, {1}}; Strategys str = {{2, 1, 2, 2}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu->Init(strategy); prelu->Init(strategy, nullptr);
MirrorOps mirror_ops = prelu->mirror_ops(); MirrorOps mirror_ops = prelu->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
OperatorArgs operator_args = mirror_op.at(0).second; OperatorArgs operator_args = mirror_op.at(0).second;
@ -141,14 +141,14 @@ TEST_F(TestPReLUInfo, CheckStrategy1) {
// Success: {{2,1,8,16},{1}} // Success: {{2,1,8,16},{1}}
Strategys inputs = {{2, 1, 8, 16}}; Strategys inputs = {{2, 1, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = prelu->Init(strategy); Status ret = prelu->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
TEST_F(TestPReLUInfo, CheckStrategy2) { TEST_F(TestPReLUInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8, 16}, {4}}; Strategys inputs = {{2, 4, 8, 16}, {4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = prelu->Init(strategy); Status ret = prelu->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
@ -172,7 +172,7 @@ TEST_F(TestPReLUInfo, InferDevMatrixShape_2d1) {
Strategys inputs = {{128, 1}, {1}}; Strategys inputs = {{128, 1}, {1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
prelu_2d->Init(strategy); prelu_2d->Init(strategy, nullptr);
Shape dev_matrix_shape = prelu_2d->dev_matrix_shape(); Shape dev_matrix_shape = prelu_2d->dev_matrix_shape();
Shape expect = {128, 1, 8}; Shape expect = {128, 1, 8};
@ -183,7 +183,7 @@ TEST_F(TestPReLUInfo, InferSliceShape_2d1) {
Strategys str = {{128, 1}, {1}}; Strategys str = {{128, 1}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu_2d->Init(strategy); prelu_2d->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = prelu_2d->inputs_tensor_info(); std::vector<TensorInfo> inputs = prelu_2d->inputs_tensor_info();
std::vector<TensorInfo> outputs = prelu_2d->outputs_tensor_info(); std::vector<TensorInfo> outputs = prelu_2d->outputs_tensor_info();
@ -206,7 +206,7 @@ TEST_F(TestPReLUInfo, GetTensorLayout_2d1) {
Strategys str = {{128, 1}, {1}}; Strategys str = {{128, 1}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu_2d->Init(strategy); prelu_2d->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = prelu_2d->inputs_tensor_info(); std::vector<TensorInfo> inputs = prelu_2d->inputs_tensor_info();
std::vector<TensorInfo> outputs = prelu_2d->outputs_tensor_info(); std::vector<TensorInfo> outputs = prelu_2d->outputs_tensor_info();
@ -229,7 +229,7 @@ TEST_F(TestPReLUInfo, GetTensorLayout_2d1) {
TEST_F(TestPReLUInfo, GetMirrorOPs_2d1) { TEST_F(TestPReLUInfo, GetMirrorOPs_2d1) {
Strategys str = {{128, 1}, {1}}; Strategys str = {{128, 1}, {1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
prelu_2d->Init(strategy); prelu_2d->Init(strategy, nullptr);
MirrorOps mirror_ops = prelu_2d->mirror_ops(); MirrorOps mirror_ops = prelu_2d->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);
OperatorArgs operator_args = mirror_op.at(0).second; OperatorArgs operator_args = mirror_op.at(0).second;
@ -246,14 +246,14 @@ TEST_F(TestPReLUInfo, CheckStrategy_2d1) {
// Success: {{2,1,8,16},{1}} // Success: {{2,1,8,16},{1}}
Strategys inputs = {{128, 1}}; Strategys inputs = {{128, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = prelu_2d->Init(strategy); Status ret = prelu_2d->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
TEST_F(TestPReLUInfo, CheckStrategy_2d2) { TEST_F(TestPReLUInfo, CheckStrategy_2d2) {
Strategys inputs = {{128, 4}, {4}}; Strategys inputs = {{128, 4}, {4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = prelu_2d->Init(strategy); Status ret = prelu_2d->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }

View File

@ -71,7 +71,7 @@ TEST_F(TestReduceSumInfo, InferDevMatrixShape1) {
Strategys inputs = {{4, 8, 1}}; Strategys inputs = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
Shape dev_matrix_shape = reduce_sum->dev_matrix_shape(); Shape dev_matrix_shape = reduce_sum->dev_matrix_shape();
Shape expect = {4, 8, 1}; Shape expect = {4, 8, 1};
@ -82,7 +82,7 @@ TEST_F(TestReduceSumInfo, InferSliceShape1) {
Strategys str = {{4, 8, 1}}; Strategys str = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reduce_sum->inputs_tensor_info(); std::vector<TensorInfo> inputs = reduce_sum->inputs_tensor_info();
std::vector<TensorInfo> outputs = reduce_sum->outputs_tensor_info(); std::vector<TensorInfo> outputs = reduce_sum->outputs_tensor_info();
@ -103,7 +103,7 @@ TEST_F(TestReduceSumInfo, GetTensorLayout1) {
Strategys str = {{4, 8, 1}}; Strategys str = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reduce_sum->inputs_tensor_info(); std::vector<TensorInfo> inputs = reduce_sum->inputs_tensor_info();
std::vector<TensorInfo> outputs = reduce_sum->outputs_tensor_info(); std::vector<TensorInfo> outputs = reduce_sum->outputs_tensor_info();
@ -124,7 +124,7 @@ TEST_F(TestReduceSumInfo, GetForwardOp1) {
Strategys inputs = {{4, 8, 1}}; Strategys inputs = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
OperatorVector forward_op = reduce_sum->forward_op(); OperatorVector forward_op = reduce_sum->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -135,7 +135,7 @@ TEST_F(TestReduceSumInfo, GetForwardOp2) {
Strategys inputs = {{4, 4, 2}}; Strategys inputs = {{4, 4, 2}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
OperatorVector forward_op = reduce_sum->forward_op(); OperatorVector forward_op = reduce_sum->forward_op();
OperatorArgs operator_args = forward_op.at(0).second; OperatorArgs operator_args = forward_op.at(0).second;
OperatorAttrs operator_attrs = operator_args.first; OperatorAttrs operator_attrs = operator_args.first;
@ -159,7 +159,7 @@ TEST_F(TestReduceSumInfo, GetMirrorOPs1) {
Strategys inputs = {{4, 8, 1}}; Strategys inputs = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
MirrorOps mirror_ops = reduce_sum->mirror_ops(); MirrorOps mirror_ops = reduce_sum->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -171,7 +171,7 @@ TEST_F(TestReduceSumInfo, GetMirrorOPs2) {
Strategys inputs = {{4, 4, 1}}; Strategys inputs = {{4, 4, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reduce_sum->Init(strategy); reduce_sum->Init(strategy, nullptr);
MirrorOps mirror_ops = reduce_sum->mirror_ops(); MirrorOps mirror_ops = reduce_sum->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(0); OperatorVector mirror_op = mirror_ops.at(0);
OperatorArgs operator_args = mirror_op.at(0).second; OperatorArgs operator_args = mirror_op.at(0).second;
@ -190,7 +190,7 @@ TEST_F(TestReduceSumInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}}; Strategys inputs = {{2, 2, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reduce_sum->Init(strategy); Status ret = reduce_sum->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -198,7 +198,7 @@ TEST_F(TestReduceSumInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reduce_sum->Init(strategy); Status ret = reduce_sum->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -206,7 +206,7 @@ TEST_F(TestReduceSumInfo, CheckStrategy3) {
Strategys inputs = {{4, 4, 2}}; Strategys inputs = {{4, 4, 2}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reduce_sum->Init(strategy); Status ret = reduce_sum->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
@ -214,7 +214,7 @@ TEST_F(TestReduceSumInfo, CheckStrategy4) {
Strategys inputs = {{4, 8, 1}}; Strategys inputs = {{4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reduce_sum->Init(strategy); Status ret = reduce_sum->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
} // namespace parallel } // namespace parallel

View File

@ -71,7 +71,7 @@ TEST_F(TestReshapeInfo, InferDevMatrixShape1) {
Strategys inputs = {{4, 1, 1, 1}}; Strategys inputs = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
Shape dev_matrix_shape = reshape->dev_matrix_shape(); Shape dev_matrix_shape = reshape->dev_matrix_shape();
Shape expect = {4, 1, 1, 1, 8}; Shape expect = {4, 1, 1, 1, 8};
@ -82,7 +82,7 @@ TEST_F(TestReshapeInfo, InferDevMatrixShape2) {
Strategys inputs = {{32, 1, 1, 1}}; Strategys inputs = {{32, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
Shape dev_matrix_shape = reshape->dev_matrix_shape(); Shape dev_matrix_shape = reshape->dev_matrix_shape();
Shape expect = {32, 1, 1, 1}; Shape expect = {32, 1, 1, 1};
@ -93,7 +93,7 @@ TEST_F(TestReshapeInfo, InferSliceShape1) {
Strategys str = {{4, 1, 1, 1}}; Strategys str = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reshape->inputs_tensor_info(); std::vector<TensorInfo> inputs = reshape->inputs_tensor_info();
std::vector<TensorInfo> outputs = reshape->outputs_tensor_info(); std::vector<TensorInfo> outputs = reshape->outputs_tensor_info();
@ -114,7 +114,7 @@ TEST_F(TestReshapeInfo, InferSliceShape2) {
Strategys str = {{32, 1, 1, 1}}; Strategys str = {{32, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reshape->inputs_tensor_info(); std::vector<TensorInfo> inputs = reshape->inputs_tensor_info();
std::vector<TensorInfo> outputs = reshape->outputs_tensor_info(); std::vector<TensorInfo> outputs = reshape->outputs_tensor_info();
@ -135,7 +135,7 @@ TEST_F(TestReshapeInfo, GetTensorLayout1) {
Strategys str = {{4, 1, 1, 1}}; Strategys str = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reshape->inputs_tensor_info(); std::vector<TensorInfo> inputs = reshape->inputs_tensor_info();
std::vector<TensorInfo> outputs = reshape->outputs_tensor_info(); std::vector<TensorInfo> outputs = reshape->outputs_tensor_info();
@ -156,7 +156,7 @@ TEST_F(TestReshapeInfo, GetTensorLayout2) {
Strategys str = {{32, 1, 1, 1}}; Strategys str = {{32, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = reshape->inputs_tensor_info(); std::vector<TensorInfo> inputs = reshape->inputs_tensor_info();
std::vector<TensorInfo> outputs = reshape->outputs_tensor_info(); std::vector<TensorInfo> outputs = reshape->outputs_tensor_info();
@ -177,7 +177,7 @@ TEST_F(TestReshapeInfo, GetForwardOp1) {
Strategys inputs = {{4, 1, 1, 1}}; Strategys inputs = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
OperatorVector forward_op = reshape->forward_op(); OperatorVector forward_op = reshape->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -188,7 +188,7 @@ TEST_F(TestReshapeInfo, GetMirrorOPs1) {
Strategys inputs = {{4, 1, 1, 1}}; Strategys inputs = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
reshape->Init(strategy); reshape->Init(strategy, nullptr);
MirrorOps mirror_ops = reshape->mirror_ops(); MirrorOps mirror_ops = reshape->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -200,7 +200,7 @@ TEST_F(TestReshapeInfo, CheckStrategy1) {
Strategys inputs = {{1, 4, 8}}; Strategys inputs = {{1, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reshape->Init(strategy); Status ret = reshape->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -208,7 +208,7 @@ TEST_F(TestReshapeInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reshape->Init(strategy); Status ret = reshape->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -216,7 +216,7 @@ TEST_F(TestReshapeInfo, CheckStrategy3) {
Strategys inputs = {{4, 1, 1, 1}}; Strategys inputs = {{4, 1, 1, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = reshape->Init(strategy); Status ret = reshape->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
} // namespace parallel } // namespace parallel

View File

@ -67,7 +67,7 @@ TEST_F(TestSoftmaxLoss, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}}; Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
loss->Init(strategy); loss->Init(strategy, nullptr);
Shape dev_matrix_shape = loss->dev_matrix_shape(); Shape dev_matrix_shape = loss->dev_matrix_shape();
Shape expect = {2, 4, 8, 1}; Shape expect = {2, 4, 8, 1};
@ -78,7 +78,7 @@ TEST_F(TestSoftmaxLoss, InferSliceShape1) {
Strategys str = {{2, 4, 8, 1}, {2, 4, 8, 1}}; Strategys str = {{2, 4, 8, 1}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
loss->Init(strategy); loss->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = loss->inputs_tensor_info(); std::vector<TensorInfo> inputs = loss->inputs_tensor_info();
std::vector<TensorInfo> outputs = loss->outputs_tensor_info(); std::vector<TensorInfo> outputs = loss->outputs_tensor_info();
@ -107,7 +107,7 @@ TEST_F(TestSoftmaxLoss, GetTensorLayout1) {
Strategys str = {{2, 4, 8, 1}, {2, 4, 8, 1}}; Strategys str = {{2, 4, 8, 1}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
loss->Init(strategy); loss->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = loss->inputs_tensor_info(); std::vector<TensorInfo> inputs = loss->inputs_tensor_info();
std::vector<TensorInfo> outputs = loss->outputs_tensor_info(); std::vector<TensorInfo> outputs = loss->outputs_tensor_info();
@ -136,7 +136,7 @@ TEST_F(TestSoftmaxLoss, GetForwardOp1) {
Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}}; Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
loss->Init(strategy); loss->Init(strategy, nullptr);
OperatorVector forward_op = loss->forward_op(); OperatorVector forward_op = loss->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -147,7 +147,7 @@ TEST_F(TestSoftmaxLoss, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}}; Strategys inputs = {{2, 4, 8, 1}, {2, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
loss->Init(strategy); loss->Init(strategy, nullptr);
MirrorOps mirror_ops = loss->mirror_ops(); MirrorOps mirror_ops = loss->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -159,7 +159,7 @@ TEST_F(TestSoftmaxLoss, GetVirtualDivOPs1) {
Strategys inputs = {{1, 4, 8, 1}, {1, 4, 8, 1}}; Strategys inputs = {{1, 4, 8, 1}, {1, 4, 8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
loss->Init(strategy); loss->Init(strategy, nullptr);
OperatorVector virtual_div_op = loss->virtual_div_op(); OperatorVector virtual_div_op = loss->virtual_div_op();
OperatorArgs operator_args = virtual_div_op.at(0).second; OperatorArgs operator_args = virtual_div_op.at(0).second;
@ -179,7 +179,7 @@ TEST_F(TestSoftmaxLoss, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = loss->Init(strategy); Status ret = loss->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -188,7 +188,7 @@ TEST_F(TestSoftmaxLoss, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = loss->Init(strategy); Status ret = loss->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }

View File

@ -71,7 +71,7 @@ TEST_F(TestSoftmaxInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
softmax->Init(strategy); softmax->Init(strategy, nullptr);
Shape dev_matrix_shape = softmax->dev_matrix_shape(); Shape dev_matrix_shape = softmax->dev_matrix_shape();
Shape expect = {2, 4, 1, 16}; Shape expect = {2, 4, 1, 16};
@ -82,7 +82,7 @@ TEST_F(TestSoftmaxInfo, InferSliceShape1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
softmax->Init(strategy); softmax->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = softmax->inputs_tensor_info(); std::vector<TensorInfo> inputs = softmax->inputs_tensor_info();
std::vector<TensorInfo> outputs = softmax->outputs_tensor_info(); std::vector<TensorInfo> outputs = softmax->outputs_tensor_info();
@ -103,7 +103,7 @@ TEST_F(TestSoftmaxInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
softmax->Init(strategy); softmax->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = softmax->inputs_tensor_info(); std::vector<TensorInfo> inputs = softmax->inputs_tensor_info();
std::vector<TensorInfo> outputs = softmax->outputs_tensor_info(); std::vector<TensorInfo> outputs = softmax->outputs_tensor_info();
@ -124,7 +124,7 @@ TEST_F(TestSoftmaxInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
softmax->Init(strategy); softmax->Init(strategy, nullptr);
OperatorVector forward_op = softmax->forward_op(); OperatorVector forward_op = softmax->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -135,7 +135,7 @@ TEST_F(TestSoftmaxInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
softmax->Init(strategy); softmax->Init(strategy, nullptr);
MirrorOps mirror_ops = softmax->mirror_ops(); MirrorOps mirror_ops = softmax->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -148,7 +148,7 @@ TEST_F(TestSoftmaxInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = softmax->Init(strategy); Status ret = softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -157,7 +157,7 @@ TEST_F(TestSoftmaxInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = softmax->Init(strategy); Status ret = softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -166,7 +166,7 @@ TEST_F(TestSoftmaxInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = softmax->Init(strategy); Status ret = softmax->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -175,7 +175,7 @@ TEST_F(TestSoftmaxInfo, InitFailed1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = softmax2->Init(strategy); Status ret = softmax2->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -184,7 +184,7 @@ TEST_F(TestSoftmaxInfo, InitFailed2) {
Strategys inputs = {{2, 4, 1, 100}}; Strategys inputs = {{2, 4, 1, 100}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = softmax2->Init(strategy); Status ret = softmax2->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }

View File

@ -66,7 +66,7 @@ TEST_F(TestTanhInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tanh->Init(strategy); tanh->Init(strategy, nullptr);
Shape dev_matrix_shape = tanh->dev_matrix_shape(); Shape dev_matrix_shape = tanh->dev_matrix_shape();
Shape expect = {2, 4, 1, 16}; Shape expect = {2, 4, 1, 16};
@ -77,7 +77,7 @@ TEST_F(TestTanhInfo, InferSliceShape1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
tanh->Init(strategy); tanh->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = tanh->inputs_tensor_info(); std::vector<TensorInfo> inputs = tanh->inputs_tensor_info();
std::vector<TensorInfo> outputs = tanh->outputs_tensor_info(); std::vector<TensorInfo> outputs = tanh->outputs_tensor_info();
@ -98,7 +98,7 @@ TEST_F(TestTanhInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 1, 16}}; Strategys str = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
tanh->Init(strategy); tanh->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = tanh->inputs_tensor_info(); std::vector<TensorInfo> inputs = tanh->inputs_tensor_info();
std::vector<TensorInfo> outputs = tanh->outputs_tensor_info(); std::vector<TensorInfo> outputs = tanh->outputs_tensor_info();
@ -119,7 +119,7 @@ TEST_F(TestTanhInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tanh->Init(strategy); tanh->Init(strategy, nullptr);
OperatorVector forward_op = tanh->forward_op(); OperatorVector forward_op = tanh->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -130,7 +130,7 @@ TEST_F(TestTanhInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tanh->Init(strategy); tanh->Init(strategy, nullptr);
MirrorOps mirror_ops = tanh->mirror_ops(); MirrorOps mirror_ops = tanh->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -143,7 +143,7 @@ TEST_F(TestTanhInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tanh->Init(strategy); Status ret = tanh->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -152,7 +152,7 @@ TEST_F(TestTanhInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tanh->Init(strategy); Status ret = tanh->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -161,7 +161,7 @@ TEST_F(TestTanhInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 1, 16}}; Strategys inputs = {{2, 4, 1, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tanh->Init(strategy); Status ret = tanh->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }

View File

@ -69,7 +69,7 @@ TEST_F(TestTensorAddInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 4}, {2, 4, 4}}; Strategys inputs = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tensor_add->Init(strategy); tensor_add->Init(strategy, nullptr);
Shape dev_matrix_shape = tensor_add->dev_matrix_shape(); Shape dev_matrix_shape = tensor_add->dev_matrix_shape();
Shape expect = {2, 4, 4}; Shape expect = {2, 4, 4};
@ -80,7 +80,7 @@ TEST_F(TestTensorAddInfo, InferSliceShape1) {
Strategys str = {{2, 4, 4}, {2, 4, 4}}; Strategys str = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
tensor_add->Init(strategy); tensor_add->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = tensor_add->inputs_tensor_info(); std::vector<TensorInfo> inputs = tensor_add->inputs_tensor_info();
std::vector<TensorInfo> outputs = tensor_add->outputs_tensor_info(); std::vector<TensorInfo> outputs = tensor_add->outputs_tensor_info();
@ -104,7 +104,7 @@ TEST_F(TestTensorAddInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 4}, {2, 4, 4}}; Strategys str = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
tensor_add->Init(strategy); tensor_add->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = tensor_add->inputs_tensor_info(); std::vector<TensorInfo> inputs = tensor_add->inputs_tensor_info();
std::vector<TensorInfo> outputs = tensor_add->outputs_tensor_info(); std::vector<TensorInfo> outputs = tensor_add->outputs_tensor_info();
@ -128,7 +128,7 @@ TEST_F(TestTensorAddInfo, GetForwardOp1) {
Strategys inputs = {{2, 4, 4}, {2, 4, 4}}; Strategys inputs = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tensor_add->Init(strategy); tensor_add->Init(strategy, nullptr);
OperatorVector forward_op = tensor_add->forward_op(); OperatorVector forward_op = tensor_add->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -139,7 +139,7 @@ TEST_F(TestTensorAddInfo, GetMirrorOPs1) {
Strategys inputs = {{2, 4, 4}, {2, 4, 4}}; Strategys inputs = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tensor_add->Init(strategy); tensor_add->Init(strategy, nullptr);
MirrorOps mirror_ops = tensor_add->mirror_ops(); MirrorOps mirror_ops = tensor_add->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -151,7 +151,7 @@ TEST_F(TestTensorAddInfo, CheckStrategy1) {
Strategys inputs = {{2, 4, 4}, {2, 6, 4}}; Strategys inputs = {{2, 4, 4}, {2, 6, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tensor_add->Init(strategy); Status ret = tensor_add->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -159,7 +159,7 @@ TEST_F(TestTensorAddInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tensor_add->Init(strategy); Status ret = tensor_add->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -167,7 +167,7 @@ TEST_F(TestTensorAddInfo, CheckStrategy3) {
Strategys inputs = {{2, 4, 6}}; Strategys inputs = {{2, 4, 6}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tensor_add->Init(strategy); Status ret = tensor_add->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -175,7 +175,7 @@ TEST_F(TestTensorAddInfo, CheckStrategy4) {
Strategys inputs = {{2, 4, 4}, {2, 4, 4}}; Strategys inputs = {{2, 4, 4}, {2, 4, 4}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = tensor_add->Init(strategy); Status ret = tensor_add->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }
@ -185,7 +185,7 @@ TEST_F(TestTensorAddInfo, GenerateStrategies) {
for (auto& swc : sc) { for (auto& swc : sc) {
StrategyPtr sp = swc->strategy_ptr; StrategyPtr sp = swc->strategy_ptr;
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
tensor_add->InitForCostModel(sp); tensor_add->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = tensor_add->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = tensor_add->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = tensor_add->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = tensor_add->outputs_tensor_info();
double memory_cost0 = tensor_add->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost0 = tensor_add->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage());
@ -207,7 +207,7 @@ TEST_F(TestTensorAddInfo, GenerateStrategies1) {
for (auto& swc : sc) { for (auto& swc : sc) {
StrategyPtr sp = swc->strategy_ptr; StrategyPtr sp = swc->strategy_ptr;
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
tensor_add1->InitForCostModel(sp); tensor_add1->InitForCostModel(sp, nullptr);
std::vector<TensorInfo> inputs_info = tensor_add1->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = tensor_add1->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = tensor_add1->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = tensor_add1->outputs_tensor_info();
double memory_cost0 = tensor_add1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost0 = tensor_add1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage());
@ -227,7 +227,7 @@ TEST_F(TestTensorAddInfo, mirror_ops) {
Strategys inputs = {{1, 8}, {4, 1}}; Strategys inputs = {{1, 8}, {4, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
tensor_add1->Init(strategy); tensor_add1->Init(strategy, nullptr);
MirrorOps mirror_ops = tensor_add1->mirror_ops(); MirrorOps mirror_ops = tensor_add1->mirror_ops();
OperatorVector mirror_op = mirror_ops.at(1); OperatorVector mirror_op = mirror_ops.at(1);

View File

@ -68,7 +68,7 @@ TEST_F(TestTmpIdentityInfo, InferDevMatrixShape1) {
Strategys inputs = {{2, 4, 8, 16}}; Strategys inputs = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
identity_ptr->Init(strategy); identity_ptr->Init(strategy, nullptr);
Shape dev_matrix_shape = identity_ptr->dev_matrix_shape(); Shape dev_matrix_shape = identity_ptr->dev_matrix_shape();
Shape expect = {2, 4, 8, 16}; Shape expect = {2, 4, 8, 16};
@ -79,7 +79,7 @@ TEST_F(TestTmpIdentityInfo, InferSliceShape1) {
Strategys str = {{2, 4, 8, 16}}; Strategys str = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
identity_ptr->Init(strategy); identity_ptr->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = identity_ptr->inputs_tensor_info(); std::vector<TensorInfo> inputs = identity_ptr->inputs_tensor_info();
std::vector<TensorInfo> outputs = identity_ptr->outputs_tensor_info(); std::vector<TensorInfo> outputs = identity_ptr->outputs_tensor_info();
@ -100,7 +100,7 @@ TEST_F(TestTmpIdentityInfo, GetTensorLayout1) {
Strategys str = {{2, 4, 8, 16}}; Strategys str = {{2, 4, 8, 16}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
identity_ptr->Init(strategy); identity_ptr->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = identity_ptr->inputs_tensor_info(); std::vector<TensorInfo> inputs = identity_ptr->inputs_tensor_info();
std::vector<TensorInfo> outputs = identity_ptr->outputs_tensor_info(); std::vector<TensorInfo> outputs = identity_ptr->outputs_tensor_info();
@ -122,7 +122,7 @@ TEST_F(TestTmpIdentityInfo, CheckStrategy1) {
Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}}; Strategys inputs = {{2, 2, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = identity_ptr->Init(strategy); Status ret = identity_ptr->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -131,7 +131,7 @@ TEST_F(TestTmpIdentityInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}}; Strategys inputs = {{2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = identity_ptr->Init(strategy); Status ret = identity_ptr->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -142,7 +142,7 @@ TEST_F(TestTmpIdentityInfo, test_generate_strategies) {
StrategyPtr sp = swc->strategy_ptr; StrategyPtr sp = swc->strategy_ptr;
Cost cost = *(swc->cost_list[0]); Cost cost = *(swc->cost_list[0]);
identity_ptr->Init(sp); identity_ptr->Init(sp, nullptr);
std::vector<TensorInfo> inputs_info = identity_ptr->inputs_tensor_info(); std::vector<TensorInfo> inputs_info = identity_ptr->inputs_tensor_info();
std::vector<TensorInfo> outputs_info = identity_ptr->outputs_tensor_info(); std::vector<TensorInfo> outputs_info = identity_ptr->outputs_tensor_info();
ASSERT_DOUBLE_EQ(identity_ptr->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), ASSERT_DOUBLE_EQ(identity_ptr->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()),

View File

@ -71,7 +71,7 @@ TEST_F(TestTransposeInfo, InferDevMatrixShape1) {
Strategys inputs = {{4, 8}}; Strategys inputs = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
Shape dev_matrix_shape = transpose->dev_matrix_shape(); Shape dev_matrix_shape = transpose->dev_matrix_shape();
Shape expect = {4, 8}; Shape expect = {4, 8};
@ -82,7 +82,7 @@ TEST_F(TestTransposeInfo, InferDevMatrixShape2) {
Strategys inputs = {{4, 1}}; Strategys inputs = {{4, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
Shape dev_matrix_shape = transpose->dev_matrix_shape(); Shape dev_matrix_shape = transpose->dev_matrix_shape();
Shape expect = {4, 1, 8}; Shape expect = {4, 1, 8};
@ -93,7 +93,7 @@ TEST_F(TestTransposeInfo, InferSliceShape1) {
Strategys str = {{4, 8}}; Strategys str = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = transpose->inputs_tensor_info(); std::vector<TensorInfo> inputs = transpose->inputs_tensor_info();
std::vector<TensorInfo> outputs = transpose->outputs_tensor_info(); std::vector<TensorInfo> outputs = transpose->outputs_tensor_info();
@ -114,7 +114,7 @@ TEST_F(TestTransposeInfo, GetTensorLayout1) {
Strategys str = {{4, 8}}; Strategys str = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
std::vector<TensorInfo> inputs = transpose->inputs_tensor_info(); std::vector<TensorInfo> inputs = transpose->inputs_tensor_info();
std::vector<TensorInfo> outputs = transpose->outputs_tensor_info(); std::vector<TensorInfo> outputs = transpose->outputs_tensor_info();
@ -135,7 +135,7 @@ TEST_F(TestTransposeInfo, GetForwardOp1) {
Strategys inputs = {{4, 8}}; Strategys inputs = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
OperatorVector forward_op = transpose->forward_op(); OperatorVector forward_op = transpose->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -146,7 +146,7 @@ TEST_F(TestTransposeInfo, GetMirrorOPs1) {
Strategys inputs = {{4, 8}}; Strategys inputs = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
transpose->Init(strategy); transpose->Init(strategy, nullptr);
MirrorOps mirror_ops = transpose->mirror_ops(); MirrorOps mirror_ops = transpose->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();
@ -158,7 +158,7 @@ TEST_F(TestTransposeInfo, CheckStrategy1) {
Strategys inputs = {{1, 4, 8}}; Strategys inputs = {{1, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = transpose->Init(strategy); Status ret = transpose->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -166,7 +166,7 @@ TEST_F(TestTransposeInfo, CheckStrategy2) {
Strategys inputs = {{2, 4, 8}, {2, 4, 8}}; Strategys inputs = {{2, 4, 8}, {2, 4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = transpose->Init(strategy); Status ret = transpose->Init(strategy, nullptr);
ASSERT_EQ(ret, FAILED); ASSERT_EQ(ret, FAILED);
} }
@ -174,7 +174,7 @@ TEST_F(TestTransposeInfo, CheckStrategy3) {
Strategys inputs = {{4, 8}}; Strategys inputs = {{4, 8}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
Status ret = transpose->Init(strategy); Status ret = transpose->Init(strategy, nullptr);
ASSERT_EQ(ret, SUCCESS); ASSERT_EQ(ret, SUCCESS);
} }

View File

@ -357,7 +357,7 @@ TEST_F(TestStepParallel, OperatorInstance) {
std::vector<Shapes> shape = {inputs_shape, outputs_shape}; std::vector<Shapes> shape = {inputs_shape, outputs_shape};
TOTAL_OPS = 0; TOTAL_OPS = 0;
OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape); OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape);
matmul_info->Init(strategyPtr); matmul_info->Init(strategyPtr, nullptr);
std::string name_expect = "MatMulInfo00"; std::string name_expect = "MatMulInfo00";
std::string name_test = matmul_info->name(); std::string name_test = matmul_info->name();
ASSERT_EQ(name_expect, name_test); ASSERT_EQ(name_expect, name_test);
@ -511,7 +511,7 @@ TEST_F(TestStepParallel, GetTensorInLayout) {
Shapes outputs_shape = std::vector<Shape>{{64, 64}}; Shapes outputs_shape = std::vector<Shape>{{64, 64}};
std::vector<Shapes> shape = {inputs_shape, outputs_shape}; std::vector<Shapes> shape = {inputs_shape, outputs_shape};
OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape); OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape);
matmul_info->Init(strategyPtr); matmul_info->Init(strategyPtr, nullptr);
node->set_user_data<OperatorInfo>(matmul_info); node->set_user_data<OperatorInfo>(matmul_info);
OperatorInfoPtr distribute_operator_pre = node->user_data<OperatorInfo>(); OperatorInfoPtr distribute_operator_pre = node->user_data<OperatorInfo>();
TensorLayout tensorlayout_e; TensorLayout tensorlayout_e;

View File

@ -64,7 +64,7 @@ void TestConstructOperator::SetUp() {
Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}}; Strategys str = {{2, 4, 8, 16}, {2, 4, 16, 1}};
StrategyPtr strategy = NewStrategy(0, str); StrategyPtr strategy = NewStrategy(0, str);
matmul->Init(strategy); matmul->Init(strategy, nullptr);
Shape tensor_shape = {512, 1024}; Shape tensor_shape = {512, 1024};
Shape dev_matrix_shape = {2, 4, 8, 16, 1}; Shape dev_matrix_shape = {2, 4, 8, 16, 1};
RankList used_dev_list = g_device_manager->GetDeviceListByStageId(0); RankList used_dev_list = g_device_manager->GetDeviceListByStageId(0);

View File

@ -64,7 +64,7 @@ void TestVirtualDatasetInfo::SetUp() {
TEST_F(TestVirtualDatasetInfo, InferDevMatrixShape1) { TEST_F(TestVirtualDatasetInfo, InferDevMatrixShape1) {
Strategys inputs = {{16, 1}, {16, 1}, {16, 1}}; Strategys inputs = {{16, 1}, {16, 1}, {16, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
virtual_dataset->Init(strategy); virtual_dataset->Init(strategy, nullptr);
Shape dev_matrix_shape = virtual_dataset->dev_matrix_shape(); Shape dev_matrix_shape = virtual_dataset->dev_matrix_shape();
Shape expect = {16, 1}; Shape expect = {16, 1};
@ -75,7 +75,7 @@ TEST_F(TestVirtualDatasetInfo, GetForwardOp1) {
Strategys inputs = {{8, 1}, {8, 1}, {8, 1}}; Strategys inputs = {{8, 1}, {8, 1}, {8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
virtual_dataset->Init(strategy); virtual_dataset->Init(strategy, nullptr);
OperatorVector forward_op = virtual_dataset->forward_op(); OperatorVector forward_op = virtual_dataset->forward_op();
size_t size = forward_op.size(); size_t size = forward_op.size();
@ -86,7 +86,7 @@ TEST_F(TestVirtualDatasetInfo, GetMirrorOPs1) {
Strategys inputs = {{8, 1}, {8, 1}, {8, 1}}; Strategys inputs = {{8, 1}, {8, 1}, {8, 1}};
StrategyPtr strategy = NewStrategy(0, inputs); StrategyPtr strategy = NewStrategy(0, inputs);
virtual_dataset->Init(strategy); virtual_dataset->Init(strategy, nullptr);
MirrorOps mirror_ops = virtual_dataset->mirror_ops(); MirrorOps mirror_ops = virtual_dataset->mirror_ops();
size_t size = mirror_ops.size(); size_t size = mirror_ops.size();

View File

@ -1,4 +1,4 @@
# Copyright 2019 Huawei Technologies Co., Ltd # Copyright 2021 Huawei Technologies Co., Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -127,13 +127,17 @@ def test_two_matmul_repeated_calculation2():
compile_net(net, x, y, b) compile_net(net, x, y, b)
def test_matmul_forward_reduce_scatter(): def test_matmul_output_strategy_reduce_scatter():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is false, set output strategy and use reduce scatter
Expectation: compile success
"""
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self, strategy1, strategy2): def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__() super().__init__()
self.matmul = P.MatMul().shard(strategy1) self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
self.matmul.add_prim_attr("forward_reduce_scatter", True) self.mul = P.Mul().shard(mul_strategy)
self.mul = P.Mul().shard(strategy2)
def construct(self, x, y, b): def construct(self, x, y, b):
out = self.matmul(x, y) out = self.matmul(x, y)
@ -141,9 +145,10 @@ def test_matmul_forward_reduce_scatter():
return out return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 2), (2, 2)) matmul_in_strategy = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2)) matmul_out_strategy = ((4, 2),)
net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32) x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -151,13 +156,17 @@ def test_matmul_forward_reduce_scatter():
compile_net(net, x, y, b) compile_net(net, x, y, b)
def test_matmul_forward_reduce_scatter_transpose(): def test_matmul_output_strategy_reduce_scatter_transpose():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is true, set output strategy and use reduce scatter
Expectation: compile success
"""
class Net(nn.Cell): class Net(nn.Cell):
def __init__(self, strategy1, strategy2): def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__() super().__init__()
self.matmul = P.MatMul(transpose_b=True).shard(strategy1) self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
self.matmul.add_prim_attr("forward_reduce_scatter", True) self.mul = P.Mul().shard(mul_strategy)
self.mul = P.Mul().shard(strategy2)
def construct(self, x, y, b): def construct(self, x, y, b):
out = self.matmul(x, y) out = self.matmul(x, y)
@ -165,9 +174,184 @@ def test_matmul_forward_reduce_scatter_transpose():
return out return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 4), (2, 4)) matmul_in_strategy = ((2, 4), (2, 4))
strategy2 = ((8, 2), (8, 2)) matmul_out_strategy = ((8, 2),)
net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) mul_strategy = ((8, 2), (8, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_all_reduce():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is false, set output strategy and use all reduce
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
matmul_in_strategy = ((2, 2), (2, 2))
matmul_out_strategy = ((2, 2),)
mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_all_reduce_transpose():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is true, set output strategy and use all reduce
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
matmul_in_strategy = ((2, 2), (2, 2))
matmul_out_strategy = ((2, 2),)
mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_reduce_scatter_repeat_calc():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is false, set output strategy use reduce scatter and repeated calculation
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
matmul_in_strategy = ((2, 2), (2, 2))
matmul_out_strategy = ((4, 2),)
mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_reduce_scatter_transpose_repeat_calc():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is true, set output strategy use reduce scatter and repeated calculation
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=0)
matmul_in_strategy = ((2, 4), (2, 4))
matmul_out_strategy = ((8, 2),)
mul_strategy = ((8, 2), (8, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_all_reduce_repeat_calc():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is false, set output strategy use all reduce and repeated calculation
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul().shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
matmul_in_strategy = ((2, 2), (2, 2))
matmul_out_strategy = ((2, 2),)
mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([128, 64]), dtype=ms.float32)
compile_net(net, x, y, b)
def test_matmul_output_strategy_all_reduce_transpose_repeat_calc():
"""
Feature: test output strategy for matmul operator
Description: transpose_b is true, set output strategy use all reduce and repeated calculation
Expectation: compile success
"""
class Net(nn.Cell):
def __init__(self, matmul_in_strategy, matmul_out_strategy, mul_strategy):
super().__init__()
self.matmul = P.MatMul(transpose_b=True).shard(matmul_in_strategy, matmul_out_strategy)
self.mul = P.Mul().shard(mul_strategy)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.mul(out, b)
return out
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
matmul_in_strategy = ((2, 2), (2, 2))
matmul_out_strategy = ((2, 2),)
mul_strategy = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(matmul_in_strategy, matmul_out_strategy, mul_strategy)))
x = Tensor(np.ones([128, 32]), dtype=ms.float32) x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 32]), dtype=ms.float32)