!245 Add bool type check in communication operator

Merge pull request !245 from chentingting/add_bool_type_check_in_comm_op
This commit is contained in:
mindspore-ci-bot 2020-04-13 20:19:47 +08:00 committed by Gitee
commit 39a46b9342
12 changed files with 120 additions and 19 deletions

View File

@ -61,11 +61,12 @@ Status Edge::InitEdgeCost() {
auto target_output_lyt = target_output.second[prev_op_output_index_].tensor_layout();
auto target_output_str = target_output.first;
auto type_length = prev_op_->GetOutputTypeLengths()[prev_op_output_index_];
auto type = prev_op_->outputs_type()[prev_op_output_index_];
for (auto& target_input : next_op_input_) {
auto target_input_lyt = target_input.second[next_op_input_index_].tensor_layout();
auto target_input_str = target_input.first;
CostPtr cost;
if (GetRedistributionCost(target_output_lyt, target_input_lyt, type_length, &cost) != SUCCESS) {
if (GetRedistributionCost(target_output_lyt, target_input_lyt, type_length, type, &cost) != SUCCESS) {
MS_LOG(EXCEPTION) << "Failure: redistribution cost calculation failed";
}
MS_EXCEPTION_IF_NULL(cost);
@ -99,7 +100,7 @@ Status Edge::InitEdgeCost() {
}
Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, const TensorLayout& next_op_input_layout,
size_t type_length, CostPtr* cost) {
size_t type_length, TypePtr type, CostPtr* cost) {
MS_EXCEPTION_IF_NULL(prev_op_);
MS_EXCEPTION_IF_NULL(cost);
RankList dev_list = prev_op_->global_device_list();
@ -119,6 +120,13 @@ Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, co
double backward_comm_cost = tensor_redistribution.backward_comm_cost();
double computation_cost = tensor_redistribution.computation_cost();
// Now AllGather, ReduceScatter, AlltoAll don't support bool type
MS_EXCEPTION_IF_NULL(type);
if ((type->type_id() == kNumberTypeBool) && (comm_cost > 0)) {
computation_cost = INF;
comm_cost = INF;
MS_LOG(WARNING) << "Communication Operators don't support bool dtype!";
}
*cost = std::make_shared<Cost>(type_length * computation_cost, type_length * comm_cost);
(*cost)->communication_without_parameter_ = type_length * comm_cost;
(*cost)->communication_with_partial_para_ =

View File

@ -84,7 +84,7 @@ class Edge {
// and the input tensor layout of v, return the redistribution cost,
// and the op_list to carry out the redistribution.
Status GetRedistributionCost(const TensorLayout& prev_op_output_layout, const TensorLayout& next_op_input_layout,
size_t, CostPtr* cost);
size_t, TypePtr type, CostPtr* cost);
void set_pre_op_output(const std::vector<std::pair<std::shared_ptr<Strategy>, std::vector<TensorInfo>>>& output_set) {
pre_op_output_ = output_set;

View File

@ -1197,6 +1197,16 @@ Status OperatorInfo::SetInputAndOutputTypeLength(const std::vector<size_t>& inpu
return SUCCESS;
}
Status OperatorInfo::set_outputs_type(const std::vector<TypePtr>& outputs_type) {
if (outputs_type.size() != outputs_shape_.size()) {
MS_LOG(ERROR) << "Outputs type: " << outputs_type.size()
<< " do not have the same number of outputs shape: " << outputs_shape_.size();
return FAILED;
}
outputs_type_ = outputs_type;
return SUCCESS;
}
void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra, const CostPtr& cost) {
if (!stra->GetInputDim().empty() && !stra->GetInputDim()[0].empty()) {
CheckGlobalDeviceManager();

View File

@ -60,7 +60,8 @@ class OperatorInfo {
outputs_shape_(std::move(outputs_shape)),
attrs_(std::move(attrs)),
is_alive_(true),
cost_(cost) {
cost_(cost),
outputs_type_() {
std::vector<bool> not_parameteter(inputs_shape_.size(), false);
is_parameter_ = not_parameteter;
refkey_parameter_name_ = "";
@ -71,6 +72,11 @@ class OperatorInfo {
Status set_is_parameter(const std::vector<bool>& is_parameter);
Status SetInputAndOutputTypeLength(const std::vector<size_t>& input_lengths,
const std::vector<size_t>& output_lengths);
// Set outputs dtype.
// If only one output, outputs_type.size() is 1.
// If output is tuple, outputs_type.size() is greater than 1.
Status set_outputs_type(const std::vector<TypePtr>& outputs_type);
const std::vector<TypePtr>& outputs_type() const { return outputs_type_; }
virtual Status Init(const StrategyPtr& strategy) = 0;
virtual Status InitForCostModel(const StrategyPtr& strategy) = 0; // only init the necessary parts
@ -229,6 +235,7 @@ class OperatorInfo {
private:
OperatorCostPtr cost_;
std::vector<TypePtr> outputs_type_;
};
Shape GetSliceShape(const Shape& tensor_shape, const Dimensions& strategy);

View File

@ -42,6 +42,7 @@ constexpr size_t SoftmaxCrossEntropyWithLogitsAttrSize = 1;
constexpr size_t SoftmaxCrossEntropyWithLogitsInputsSize = 2;
constexpr size_t SoftmaxCrossEntropyWithLogitsOutputsSize = 2;
constexpr double EPS = 1e-6;
constexpr double INF = 1e20;
constexpr char AUTO_PARALLEL_RUN_ONCE_ONLY[] = "auto_parallel_run_once_only";
constexpr char SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY[] = "semi_auto_parallel_run_once_only";

View File

@ -255,12 +255,9 @@ size_t GetInputsTypeLen(const AnfNodePtr &input) {
return input_type_len;
}
// Given the node, return the element length of input and output
std::vector<std::vector<size_t>> ExtractInputAndOutputTypeLengthByNode(const CNodePtr &node) {
std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
std::vector<size_t> inputs_type_len;
std::vector<size_t> outputs_type_len;
std::vector<std::vector<size_t>> all_types;
std::vector<AnfNodePtr> node_inputs{node->inputs()};
// extract input element length
@ -278,9 +275,13 @@ std::vector<std::vector<size_t>> ExtractInputAndOutputTypeLengthByNode(const CNo
inputs_type_len.push_back(GetInputsTypeLen(input));
}
}
all_types.push_back(inputs_type_len);
return inputs_type_len;
}
// extract output element length
std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
std::vector<TypePtr> outputs_type;
// extract output element type
auto primary_output_type = node->Type();
MS_EXCEPTION_IF_NULL(primary_output_type);
if (primary_output_type->isa<mindspore::Tuple>()) {
@ -290,7 +291,7 @@ std::vector<std::vector<size_t>> ExtractInputAndOutputTypeLengthByNode(const CNo
for (auto &ele : elements) {
if (ele->isa<mindspore::TensorType>()) {
auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
outputs_type_len.push_back(GetLengthOfDataType(ele_element_type));
outputs_type.push_back(ele_element_type);
} else {
MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
}
@ -299,14 +300,12 @@ std::vector<std::vector<size_t>> ExtractInputAndOutputTypeLengthByNode(const CNo
// in this case, the output is a single tensor
if (primary_output_type->isa<mindspore::TensorType>()) {
auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
outputs_type_len.push_back(GetLengthOfDataType(element_type));
outputs_type.push_back(element_type);
} else {
MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
}
}
all_types.push_back(outputs_type_len);
return all_types;
return outputs_type;
}
// Be careful the argument is cnode_full_name, not the op_name
@ -367,11 +366,20 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &
return nullptr;
}
// Set the data type for inputs and outputs of this OperatorInfo
std::vector<std::vector<size_t>> type_lengths = ExtractInputAndOutputTypeLengthByNode(cnode);
if (operator_info->SetInputAndOutputTypeLength(type_lengths[0], type_lengths[1]) != SUCCESS) {
auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
auto outputs_type = ExtractOutputTypeByNode(cnode);
std::vector<size_t> outputs_type_length;
outputs_type_length.reserve(outputs_type.size());
std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
GetLengthOfDataType);
if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
return nullptr;
}
if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
return nullptr;
}
// When the 'inputs' contains numerical values for some operators, these values should be extracted from
// ANF graph
auto &inputs = cnode->inputs();

View File

@ -39,7 +39,9 @@ size_t GetLengthOfDataType(const TypePtr &type);
std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node);
std::vector<std::vector<size_t>> ExtractInputAndOutputTypeLengthByNode(const CNodePtr &node);
std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node);
std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node);
Status ConstructCostGraphNodes(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root);

View File

@ -162,6 +162,8 @@ class AllGather(PrimitiveWithInfer):
return x_shape
def infer_dtype(self, x_dtype):
if x_dtype == mstype.bool_:
raise TypeError("AllGather does not support 'Bool' as the dtype of input!")
return x_dtype
def __call__(self, tensor):
@ -219,6 +221,8 @@ class ReduceScatter(PrimitiveWithInfer):
return x_shape
def infer_dtype(self, x_dtype):
if x_dtype == mstype.bool_:
raise TypeError("ReduceScatter does not support 'Bool' as the dtype of input!")
return x_dtype
def __call__(self, tensor):
@ -276,6 +280,8 @@ class Broadcast(PrimitiveWithInfer):
return x_shape
def infer_dtype(self, x_dtype):
if x_dtype == mstype.bool_:
raise TypeError("Broadcast does not support 'Bool' as the dtype of input!")
return x_dtype
@ -318,6 +324,8 @@ class _AlltoAll(PrimitiveWithInfer):
return x_shape
def infer_dtype(self, x_dtype):
if x_dtype == mstype.bool_:
raise TypeError("AlltoAll does not support 'Bool' as the dtype of input!")
return x_dtype
def __call__(self, tensor):

View File

@ -178,6 +178,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_0 = {{4096, 1024}};
matmul0 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_0, outputs_shape_0, attr_0);
matmul0->set_name("MatMul0");
matmul0->set_outputs_type({kFloat32});
// matmul1
ValuePtr transpose_a_1 = MakeValue(false);
@ -187,6 +188,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_1 = {{128, 4096}};
matmul1 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
matmul1->set_name("MatMul1");
matmul1->set_outputs_type({kFloat32});
// matmul2
ValuePtr transpose_a_2 = MakeValue(false);
@ -196,6 +198,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_2 = {{128, 1024}};
matmul2 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
matmul2->set_name("MatMul2");
matmul2->set_outputs_type({kFloat32});
// matmul3
ValuePtr transpose_a_3 = MakeValue(false);
@ -205,6 +208,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_3 = {{1024, 4096}};
matmul3 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
matmul3->set_name("MatMul3");
matmul3->set_outputs_type({kFloat32});
// matmul4
ValuePtr transpose_a_4 = MakeValue(false);
@ -214,6 +218,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_4 = {{128, 4096}};
matmul4 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_4, outputs_shape_4, attr_4);
matmul4->set_name("MatMul4");
matmul4->set_outputs_type({kFloat32});
// matmul5
ValuePtr transpose_a_5 = MakeValue(false);
@ -223,6 +228,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_5 = {{128, 4096}};
matmul5 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_5, outputs_shape_5, attr_5);
matmul5->set_name("MatMul5");
matmul5->set_outputs_type({kFloat32});
// matmul6
ValuePtr transpose_a_6 = MakeValue(false);
@ -232,6 +238,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_6 = {{4096, 1024}};
matmul6 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_6, outputs_shape_6, attr_6);
matmul6->set_name("MatMul6");
matmul6->set_outputs_type({kFloat32});
// matmul7
ValuePtr transpose_a_7 = MakeValue(false);
@ -241,6 +248,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_7 = {{64, 4096}};
matmul7 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_7, outputs_shape_7, attr_7);
matmul7->set_name("MatMul7");
matmul7->set_outputs_type({kFloat32});
// matmul8
ValuePtr transpose_a_8 = MakeValue(false);
@ -250,6 +258,7 @@ void TestDPAlgo::SetUp() {
Shapes outputs_shape_8 = {{64, 40960}};
matmul8 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_8, outputs_shape_8, attr_8);
matmul8->set_name("MatMul8");
matmul8->set_outputs_type({kFloat32});
}
void TestDPAlgo::ConstructTwoLargeMatMul() {
@ -278,12 +287,15 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes outputs_shape = {{64, 64}};
tmp_identity_ptr1 = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr1->set_name("identity_info1");
tmp_identity_ptr1->set_outputs_type({kFloat32});
tmp_identity_ptr2 = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr2->set_name("identity_info2");
tmp_identity_ptr2->set_outputs_type({kFloat32});
tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr->set_name("identity_info");
tmp_identity_ptr->set_outputs_type({kFloat32});
// mm1_ptr
ValuePtr transpose_a_1 = MakeValue(false);
@ -292,6 +304,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_1 = {{64, 64}, {64, 64}};
Shapes outputs_shape_1 = {{64, 64}};
mm1_ptr = std::make_shared<MatMulInfo>("matmul_info1", inputs_shape_1, outputs_shape_1, attr_1);
mm1_ptr->set_outputs_type({kFloat32});
// mm2_ptr
ValuePtr transpose_a_2 = MakeValue(false);
@ -300,6 +313,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_2 = {{64, 64}, {64, 64}};
Shapes outputs_shape_2 = {{64, 64}};
mm2_ptr = std::make_shared<MatMulInfo>("matmul_info2", inputs_shape_2, outputs_shape_2, attr_2);
mm2_ptr->set_outputs_type({kFloat32});
// mm3_ptr
ValuePtr transpose_a_3 = MakeValue(false);
@ -308,6 +322,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_3 = {{64, 64}, {64, 64}};
Shapes outputs_shape_3 = {{64, 64}};
mm3_ptr = std::make_shared<MatMulInfo>("matmul_info3", inputs_shape_3, outputs_shape_3, attr_3);
mm3_ptr->set_outputs_type({kFloat32});
// mm4_ptr
ValuePtr transpose_a_4 = MakeValue(false);
@ -316,6 +331,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_4 = {{64, 64}, {64, 64}};
Shapes outputs_shape_4 = {{64, 64}};
mm4_ptr = std::make_shared<MatMulInfo>("matmul_info4", inputs_shape_4, outputs_shape_4, attr_4);
mm4_ptr->set_outputs_type({kFloat32});
// mm5_ptr
ValuePtr transpose_a_5 = MakeValue(false);
@ -324,6 +340,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_5 = {{64, 64}, {64, 64}};
Shapes outputs_shape_5 = {{64, 64}};
mm5_ptr = std::make_shared<MatMulInfo>("matmul_info5", inputs_shape_5, outputs_shape_5, attr_5);
mm5_ptr->set_outputs_type({kFloat32});
// mm6_ptr
ValuePtr transpose_a_6 = MakeValue(false);
@ -332,6 +349,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_6 = {{64, 64}, {64, 64}};
Shapes outputs_shape_6 = {{64, 64}};
mm6_ptr = std::make_shared<MatMulInfo>("matmul_info6", inputs_shape_6, outputs_shape_6, attr_6);
mm6_ptr->set_outputs_type({kFloat32});
// mm7_ptr
ValuePtr transpose_a_7 = MakeValue(false);
@ -340,6 +358,7 @@ void TestDPAlgo::ConstructBatmanGraph() {
Shapes inputs_shape_7 = {{64, 64}, {64, 64}};
Shapes outputs_shape_7 = {{64, 64}};
mm7_ptr = std::make_shared<MatMulInfo>("matmul_info7", inputs_shape_7, outputs_shape_7, attr_7);
mm7_ptr->set_outputs_type({kFloat32});
// create edges
edge_i0_m3 = std::make_shared<Edge>(edge_iden_matmul_name, tmp_identity_ptr, mm3_ptr, 0, 0, false, true);
@ -451,6 +470,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes outputs_shape = {{64, 64}};
tmp_identity_ptr1 = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr1->set_name("identity_info1");
tmp_identity_ptr1->set_outputs_type({kFloat32});
// mm6_ptr
ValuePtr transpose_a_6 = MakeValue(false);
@ -459,9 +479,11 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_6 = {{64, 64}, {64, 64}};
Shapes outputs_shape_6 = {{64, 64}};
mm6_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_6, outputs_shape_6, attr_6);
mm6_ptr->set_outputs_type({kFloat32});
tmp_identity_ptr2 = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr2->set_name("identity_info2");
tmp_identity_ptr2->set_outputs_type({kFloat32});
// mm1_ptr
ValuePtr transpose_a_1 = MakeValue(false);
@ -470,6 +492,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_1 = {{64, 64}, {64, 64}};
Shapes outputs_shape_1 = {{64, 64}};
mm1_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
mm1_ptr->set_outputs_type({kFloat32});
// mm2_ptr
ValuePtr transpose_a_2 = MakeValue(false);
@ -478,6 +501,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_2 = {{64, 64}, {64, 64}};
Shapes outputs_shape_2 = {{64, 64}};
mm2_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
mm2_ptr->set_outputs_type({kFloat32});
// mm3_ptr
ValuePtr transpose_a_3 = MakeValue(false);
@ -486,6 +510,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_3 = {{64, 64}, {64, 64}};
Shapes outputs_shape_3 = {{64, 64}};
mm3_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
mm3_ptr->set_outputs_type({kFloat32});
// mm4_ptr
ValuePtr transpose_a_4 = MakeValue(false);
@ -494,6 +519,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_4 = {{64, 64}, {64, 64}};
Shapes outputs_shape_4 = {{64, 64}};
mm4_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_4, outputs_shape_4, attr_4);
mm4_ptr->set_outputs_type({kFloat32});
// mm5_ptr
ValuePtr transpose_a_5 = MakeValue(false);
@ -502,6 +528,7 @@ void TestDPAlgo::ConstructTriangleGraph() {
Shapes inputs_shape_5 = {{64, 64}, {64, 64}};
Shapes outputs_shape_5 = {{64, 64}};
mm5_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_5, outputs_shape_5, attr_5);
mm5_ptr->set_outputs_type({kFloat32});
// create edges
std::string edge_matmul_matmul_name = "MatMul-MatMul";
@ -584,6 +611,7 @@ void TestDPAlgo::ConstructTriangleGraph2() {
Shapes outputs_shape = {{64, 64}};
tmp_identity_ptr1 = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr1->set_name("identity_info1");
tmp_identity_ptr1->set_outputs_type({kFloat32});
// mm1_ptr
ValuePtr transpose_a_1 = MakeValue(false);
@ -592,6 +620,7 @@ void TestDPAlgo::ConstructTriangleGraph2() {
Shapes inputs_shape_1 = {{64, 64}, {64, 64}};
Shapes outputs_shape_1 = {{64, 64}};
mm1_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
mm1_ptr->set_outputs_type({kFloat32});
// mm2_ptr
ValuePtr transpose_a_2 = MakeValue(false);
@ -600,6 +629,7 @@ void TestDPAlgo::ConstructTriangleGraph2() {
Shapes inputs_shape_2 = {{64, 64}, {64, 64}};
Shapes outputs_shape_2 = {{64, 64}};
mm2_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
mm2_ptr->set_outputs_type({kFloat32});
// mm3_ptr
ValuePtr transpose_a_3 = MakeValue(false);
@ -608,6 +638,7 @@ void TestDPAlgo::ConstructTriangleGraph2() {
Shapes inputs_shape_3 = {{64, 64}, {64, 64}};
Shapes outputs_shape_3 = {{64, 64}};
mm3_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
mm3_ptr->set_outputs_type({kFloat32});
// create edges
std::string edge_matmul_matmul_name = "MatMul-MatMul";
@ -953,6 +984,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_1 = {{32, 16}, {16, 32}};
Shapes outputs_shape_1 = {{32, 32}};
mm1_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
mm1_ptr->set_outputs_type({kFloat32});
// mm2_ptr
ValuePtr transpose_a_2 = MakeValue(false);
@ -961,6 +993,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_2 = {{8, 32}, {32, 32}};
Shapes outputs_shape_2 = {{8, 32}};
mm2_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
mm2_ptr->set_outputs_type({kFloat32});
// mm3_ptr
ValuePtr transpose_a_3 = MakeValue(false);
@ -969,6 +1002,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_3 = {{32, 32}, {32, 64}};
Shapes outputs_shape_3 = {{32, 64}};
mm3_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
mm3_ptr->set_outputs_type({kFloat32});
// mm4_ptr
ValuePtr transpose_a_4 = MakeValue(false);
@ -977,6 +1011,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_4 = {{64, 32}, {32, 32}};
Shapes outputs_shape_4 = {{64, 32}};
mm4_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_4, outputs_shape_4, attr_4);
mm4_ptr->set_outputs_type({kFloat32});
// mm5_ptr
ValuePtr transpose_a_5 = MakeValue(false);
@ -985,6 +1020,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_5 = {{8, 32}, {32, 64}};
Shapes outputs_shape_5 = {{8, 64}};
mm5_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_5, outputs_shape_5, attr_5);
mm5_ptr->set_outputs_type({kFloat32});
// mm5_ptr
ValuePtr transpose_a_6 = MakeValue(false);
@ -993,6 +1029,7 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes inputs_shape_6 = {{8, 64}, {64, 32}};
Shapes outputs_shape_6 = {{8, 32}};
mm6_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_6, outputs_shape_6, attr_6);
mm6_ptr->set_outputs_type({kFloat32});
ValuePtr relu = MakeValue(std::string("relu"));
std::unordered_map<std::string, ValuePtr> relu_attr = {{"activation_type", relu}};
@ -1001,26 +1038,31 @@ void TestDPAlgo::ConstructMMRGraph() {
Shapes relu1_inputs_shape = {{8, 32}};
Shapes relu1_outputs_shape = {{8, 32}};
relu1_ptr = std::make_shared<ActivationInfo>("relu_info", relu1_inputs_shape, relu1_outputs_shape, relu_attr);
relu1_ptr->set_outputs_type({kFloat32});
// relu2_ptr
Shapes relu2_inputs_shape = {{32, 64}};
Shapes relu2_outputs_shape = {{32, 64}};
relu2_ptr = std::make_shared<ActivationInfo>("relu_info", relu2_inputs_shape, relu2_outputs_shape, relu_attr);
relu2_ptr->set_outputs_type({kFloat32});
// relu3_ptr
Shapes relu3_inputs_shape = {{64, 32}};
Shapes relu3_outputs_shape = {{64, 32}};
relu3_ptr = std::make_shared<ActivationInfo>("relu_info", relu3_inputs_shape, relu3_outputs_shape, relu_attr);
relu3_ptr->set_outputs_type({kFloat32});
// relu4_ptr
Shapes relu4_inputs_shape = {{8, 64}};
Shapes relu4_outputs_shape = {{8, 64}};
relu4_ptr = std::make_shared<ActivationInfo>("relu_info", relu4_inputs_shape, relu4_outputs_shape, relu_attr);
relu4_ptr->set_outputs_type({kFloat32});
// relu5_ptr
Shapes relu5_inputs_shape = {{8, 32}};
Shapes relu5_outputs_shape = {{8, 32}};
relu5_ptr = std::make_shared<ActivationInfo>("relu_info", relu5_inputs_shape, relu5_outputs_shape, relu_attr);
relu5_ptr->set_outputs_type({kFloat32});
std::string edge_matmul_matmul_name = "MatMul-MatMul";
std::string edge_matmul_relu_name = "MatMul-ReLU";
@ -1134,6 +1176,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() {
Shapes inputs_shape = {{32, 64}};
Shapes outputs_shape = {{32, 64}};
tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
tmp_identity_ptr->set_outputs_type({kFloat32});
// mm1_ptr
ValuePtr transpose_a_1 = MakeValue(false);
@ -1142,6 +1185,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() {
Shapes inputs_shape_1 = {{32, 64}, {64, 128}};
Shapes outputs_shape_1 = {{32, 128}};
mm1_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
mm1_ptr->set_outputs_type({kFloat32});
// mm2_ptr
ValuePtr transpose_a_2 = MakeValue(false);
@ -1150,6 +1194,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() {
Shapes inputs_shape_2 = {{128, 32}, {32, 64}};
Shapes outputs_shape_2 = {{128, 64}};
mm2_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
mm2_ptr->set_outputs_type({kFloat32});
// mm3_ptr
ValuePtr transpose_a_3 = MakeValue(false);
@ -1158,6 +1203,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() {
Shapes inputs_shape_3 = {{32, 128}, {128, 64}};
Shapes outputs_shape_3 = {{32, 64}};
mm3_ptr = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
mm3_ptr->set_outputs_type({kFloat32});
// create edges
std::string edge_matmul_matmul_name = "MatMul-MatMul";

View File

@ -65,6 +65,7 @@ void TestEdgeCostModel::SetUp() {
Shapes inputs_shape_1 = {{8, 16}, {16, 32}};
Shapes outputs_shape_1 = {{8, 32}};
matmul1 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
matmul1->set_outputs_type({kFloat32});
// matmul2
ValuePtr transpose_a_2 = MakeValue(false);
@ -73,6 +74,7 @@ void TestEdgeCostModel::SetUp() {
Shapes inputs_shape_2 = {{8, 32}, {32, 16}};
Shapes outputs_shape_2 = {{8, 16}};
matmul2 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
matmul2->set_outputs_type({kFloat32});
// matmul3
ValuePtr transpose_a_3 = MakeValue(false);
@ -81,6 +83,7 @@ void TestEdgeCostModel::SetUp() {
Shapes inputs_shape_3 = {{16, 8}, {8, 32}};
Shapes outputs_shape_3 = {{16, 32}};
matmul3 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
matmul3->set_outputs_type({kFloat32});
// matmul4
ValuePtr transpose_a_4 = MakeValue(false);
@ -89,6 +92,7 @@ void TestEdgeCostModel::SetUp() {
Shapes inputs_shape_4 = {{8, 16}, {16, 32}};
Shapes outputs_shape_4 = {{8, 32}};
matmul4 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_4, outputs_shape_4, attr_4);
matmul4->set_outputs_type({kFloat32});
// matmul5
ValuePtr transpose_a_5 = MakeValue(false);
@ -97,6 +101,7 @@ void TestEdgeCostModel::SetUp() {
Shapes inputs_shape_5 = {{8, 32}, {8, 32}};
Shapes outputs_shape_5 = {{8, 8}};
matmul5 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_5, outputs_shape_5, attr_5);
matmul5->set_outputs_type({kFloat32});
}
TEST_F(TestEdgeCostModel, test_InitEdgeCost) {

View File

@ -76,6 +76,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_0 = {{32, 16}, {16, 16}};
Shapes outputs_shape_0 = {{32, 16}};
matmul0 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_0, outputs_shape_0, attr_0);
matmul0->set_outputs_type({kFloat32});
// matmul1
ValuePtr transpose_a_1 = MakeValue(false);
@ -84,6 +85,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_1 = {{8, 16}, {16, 32}};
Shapes outputs_shape_1 = {{8, 32}};
matmul1 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_1, outputs_shape_1, attr_1);
matmul1->set_outputs_type({kFloat32});
// matmul2
ValuePtr transpose_a_2 = MakeValue(false);
@ -92,6 +94,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_2 = {{8, 32}, {32, 16}};
Shapes outputs_shape_2 = {{8, 16}};
matmul2 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_2, outputs_shape_2, attr_2);
matmul2->set_outputs_type({kFloat32});
// matmul3
ValuePtr transpose_a_3 = MakeValue(false);
@ -100,6 +103,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_3 = {{16, 8}, {8, 32}};
Shapes outputs_shape_3 = {{16, 32}};
matmul3 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_3, outputs_shape_3, attr_3);
matmul3->set_outputs_type({kFloat32});
// matmul4
ValuePtr transpose_a_4 = MakeValue(false);
@ -108,6 +112,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_4 = {{8, 16}, {16, 32}};
Shapes outputs_shape_4 = {{8, 32}};
matmul4 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_4, outputs_shape_4, attr_4);
matmul4->set_outputs_type({kFloat32});
// matmul5
ValuePtr transpose_a_5 = MakeValue(false);
@ -116,6 +121,7 @@ void TestCostGraph::SetUp() {
Shapes inputs_shape_5 = {{8, 32}, {8, 32}};
Shapes outputs_shape_5 = {{8, 8}};
matmul5 = std::make_shared<MatMulInfo>("matmul_info", inputs_shape_5, outputs_shape_5, attr_5);
matmul5->set_outputs_type({kFloat32});
}
void TestCostGraph::ConstructStarGraph2() {

View File

@ -55,7 +55,7 @@ class BroadCastNet(nn.Cell):
self.broadcast = Broadcast(0)
def construct(self, x):
x, = self.broadcast((x,))
x = self.broadcast((x))
x = self.dense(x)
return x