!29421 Implementation of BoundingBoxEncodeInfo, IOUInfo and RandomChoickWithMaskInfo

Merge pull request !29421 from liuluobin/parallel_ops_master
This commit is contained in:
i-robot 2022-01-26 06:40:32 +00:00 committed by Gitee
commit b0b3d7ede7
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
15 changed files with 668 additions and 13 deletions

View File

@ -188,6 +188,9 @@ using SplitCost = CastCost;
using ScatterUpdateCost = CastCost;
using UniformRealCost = CastCost;
using ResizeBilinearCost = CastCost;
using BoundingBoxEncodeCost = CastCost;
using IOUCost = CastCost;
using RandomChoicWithMaskCost = CastCost;
class SqrtCost : public CastCost {
public:

View File

@ -210,6 +210,9 @@ REGISTER(UniformRealInfo);
REGISTER(ResizeBilinearInfo);
REGISTER(ResizeNearestNeighborInfo);
REGISTER(CumSumInfo);
REGISTER(BoundingBoxEncodeInfo);
REGISTER(IOUInfo);
REGISTER(RandomChoiceWithMaskInfo);
} // namespace parallel
} // namespace mindspore

View File

@ -0,0 +1,115 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "frontend/parallel/ops_info/bounding_box_encode_info.h"
namespace mindspore {
namespace parallel {
Status BoundingBoxEncodeInfo::CheckStrategy(const StrategyPtr &strategy) {
if (CheckStrategyValue(strategy, inputs_shape_) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Invalid strategy.";
return FAILED;
}
Strategys strategies = strategy->GetInputDim();
Dimensions input_a_strategy = strategies[0];
Dimensions input_b_strategy = strategies[1];
if (input_a_strategy != input_b_strategy) {
MS_LOG(ERROR) << name_ << ": Strategies of relevant dimensions must be equal, but the strategy is "
<< StrategyToString(strategies);
return FAILED;
}
if (input_a_strategy[1] != 1 || input_b_strategy[1] != 1) {
MS_LOG(ERROR) << name_ << ": Cannot do this operator in the strategy: " << StrategyToString(strategies)
<< ", only support shard the first dimension for each input tensor.";
return FAILED;
}
if (input_a_strategy[0] > stage_device_size_) {
MS_LOG(ERROR) << name_ << ": The strategy is " << StrategyToString(strategies) << ", it requires "
<< input_a_strategy[0] << " devices, but the device number of this stage is " << stage_device_size_;
return FAILED;
}
return SUCCESS;
}
Status BoundingBoxEncodeInfo::InferDevMatrixShape() {
Strategys strategies = strategy_->GetInputDim();
Dimensions input_a_strategy = strategies.at(0);
dev_matrix_shape_.clear();
dev_matrix_shape_.push_back(input_a_strategy[0]);
MS_LOG(INFO) << name_ << ": The dev matrix is " << ShapeToString(dev_matrix_shape_);
return SUCCESS;
}
Status BoundingBoxEncodeInfo::InferTensorMap() {
inputs_tensor_map_.clear();
outputs_tensor_map_.clear();
inputs_tensor_map_.emplace_back(TensorMap({0, -1}));
inputs_tensor_map_.emplace_back(TensorMap({0, -1}));
outputs_tensor_map_.emplace_back(TensorMap({0, -1}));
return SUCCESS;
}
std::vector<StrategyPtr> BoundingBoxEncodeInfo::GenerateOpStrategies(int64_t stage_id) {
std::vector<StrategyPtr> sp_vector;
Shape input0_shape = inputs_shape_[0];
Shape input1_shape = inputs_shape_[1];
if (input0_shape != input1_shape) {
MS_LOG(EXCEPTION) << "The shape of inputs must be equal.";
}
int64_t input0_length = input0_shape[0];
CheckGlobalDeviceManager();
size_t dev_num = g_device_manager->GetDeviceListByStageId(stage_id).size();
for (int64_t i = 1; i <= SizeToLong(dev_num) && i * i <= input0_length; ++i) {
if (input0_length % i != 0) {
continue;
}
StrategyPtr sp;
if (PrepareStrategy(stage_id, i, dev_num, &sp) == SUCCESS) {
sp_vector.push_back(sp);
}
if (PrepareStrategy(stage_id, input0_length / i, dev_num, &sp) == SUCCESS) {
sp_vector.push_back(sp);
}
}
if (sp_vector.empty()) {
MS_LOG(EXCEPTION) << name_ << ": No available strategy.";
}
return sp_vector;
}
Status BoundingBoxEncodeInfo::SetCostUnderStrategy(const StrategyPtr &strategy) {
return SetCostUnderStrategyBase(strategy);
}
Status BoundingBoxEncodeInfo::PrepareStrategy(int64_t stage_id, int64_t split_num, size_t dev_num, StrategyPtr *sp) {
const bool fully_use_device = CostModelContext::GetInstance()->fully_use_device();
if (split_num == 0 || SizeToLong(dev_num) % split_num != 0 ||
(fully_use_device && split_num != SizeToLong(dev_num))) {
return FAILED;
}
Dimensions input0_partitions = {split_num, 1};
Dimensions input1_partitions = {split_num, 1};
Strategys strategies = {input0_partitions, input1_partitions};
(*sp) = std::make_shared<Strategy>(stage_id, strategies);
return SUCCESS;
}
} // namespace parallel
} // namespace mindspore

View File

@ -0,0 +1,53 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_BOUNDING_BOX_ENCODE_INFO_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_BOUNDING_BOX_ENCODE_INFO_H_
#include <string>
#include <vector>
#include <memory>
#include "frontend/parallel/ops_info/operator_info.h"
#include "frontend/parallel/strategy.h"
#include "frontend/parallel/tensor_layout/tensor_redistribution.h"
namespace mindspore {
namespace parallel {
class BoundingBoxEncodeInfo : public OperatorInfo {
public:
BoundingBoxEncodeInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &output_shape,
const PrimitiveAttrs &attrs)
: OperatorInfo(name, inputs_shape, output_shape, attrs, std::make_shared<BoundingBoxEncodeCost>()) {}
~BoundingBoxEncodeInfo() override = default;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override;
protected:
Status CheckStrategy(const StrategyPtr &strategy) override;
Status InferDevMatrixShape() override;
Status InferTensorMap() override;
Status GetAttrs() override { return SUCCESS; }
Status InferForwardCommunication() override { return SUCCESS; };
private:
Status PrepareStrategy(int64_t stage_id, int64_t split_num, size_t dev_num, StrategyPtr *sp);
};
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_BOUNDING_BOX_ENCODE_INFO_H_

View File

@ -0,0 +1,71 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "frontend/parallel/ops_info/iou_info.h"
#include "frontend/parallel/tensor_layout/tensor_redistribution.h"
namespace mindspore {
namespace parallel {
Status IOUInfo::CheckStrategy(const StrategyPtr &strategy) {
if (CheckStrategyValue(strategy, inputs_shape_) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Invalid strategy.";
return FAILED;
}
Strategys strategies = strategy->GetInputDim();
if (strategies[0][1] != 1 || strategies[1][1] != 1) {
MS_LOG(ERROR) << name_ << ": Only supports shard the 0th dimension of each input tensor, but got strategy "
<< StrategyToString(strategies);
return FAILED;
}
return SUCCESS;
}
Status IOUInfo::InferDevMatrixShape() {
Strategys strategise = strategy_->GetInputDim();
int64_t dev1 = strategise[0][0];
int64_t dev0 = strategise[1][0];
dev_matrix_shape_.clear();
dev_matrix_shape_.push_back(dev1);
dev_matrix_shape_.push_back(dev0);
MS_LOG(INFO) << name_ << ": The dev matrix is " << ShapeToString(dev_matrix_shape_);
return SUCCESS;
}
Status IOUInfo::InferTensorMap() {
inputs_tensor_map_.clear();
outputs_tensor_map_.clear();
inputs_tensor_map_.emplace_back(TensorMap({1, -1}));
inputs_tensor_map_.emplace_back(TensorMap({0, -1}));
outputs_tensor_map_.emplace_back(TensorMap({1, 0}));
return SUCCESS;
}
std::vector<StrategyPtr> IOUInfo::GenerateOpStrategies(int64_t stage_id) {
Shape input0_split({1, 0});
Shape input1_split({1, 0});
Shapes splittable_inputs = {input0_split, input1_split};
std::vector<StrategyPtr> sp_vector;
if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) {
MS_LOG(EXCEPTION) << name_ << ": Generate strategies for independent inputs failed.";
}
return sp_vector;
}
} // namespace parallel
} // namespace mindspore

View File

@ -0,0 +1,48 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_IOU_INFO_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_IOU_INFO_H_
#include <string>
#include <vector>
#include <memory>
#include "frontend/parallel/strategy.h"
#include "frontend/parallel/ops_info/operator_info.h"
#include "frontend/parallel/auto_parallel/operator_costmodel.h"
namespace mindspore {
namespace parallel {
class IOUInfo : public OperatorInfo {
public:
IOUInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs)
: OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<IOUCost>()) {}
~IOUInfo() = default;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override { return SetCostUnderStrategyBase(strategy); }
protected:
Status GetAttrs() override { return SUCCESS; }
Status CheckStrategy(const StrategyPtr &strategy) override;
Status InferDevMatrixShape() override;
Status InferTensorMap() override;
Status InferForwardCommunication() override { return SUCCESS; }
};
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_IOU_INFO_H_

View File

@ -113,6 +113,18 @@ Status OperatorInfo::CheckStrategyValue(const StrategyPtr &strategy, const Shape
return FAILED;
}
int64_t shape_value = sub_input_shape.at(j);
if ((shape_value % strategy_value) != 0) {
if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << ": The strategy is " << StrategyToString(stra) << ", shape " << shape_value
<< " cannot be divisible by strategy value " << strategy_value;
} else {
MS_LOG(ERROR) << name_ << ": The strategy is " << StrategyToString(stra) << ", shape " << shape_value
<< " cannot be divisible by strategy value " << strategy_value;
}
return FAILED;
}
if ((LongToUlong(strategy_value) & LongToUlong(strategy_value - 1)) != 0) {
if ((g_device_manager->DeviceNum() & (g_device_manager->DeviceNum() - 1)) != 0) {
MS_LOG(WARNING) << "The device num is not the power of 2, thus do not check the strategy as power of 2";
@ -127,18 +139,6 @@ Status OperatorInfo::CheckStrategyValue(const StrategyPtr &strategy, const Shape
}
return FAILED;
}
int64_t shape_value = sub_input_shape.at(j);
if ((shape_value % strategy_value) != 0) {
if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << ": The strategy is " << StrategyToString(stra) << ", shape " << shape_value
<< " cannot be divisible by strategy value " << strategy_value;
} else {
MS_LOG(ERROR) << name_ << ": The strategy is " << StrategyToString(stra) << ", shape " << shape_value
<< " cannot be divisible by strategy value " << strategy_value;
}
return FAILED;
}
}
}

View File

@ -62,5 +62,8 @@
#include "frontend/parallel/ops_info/dsd_matmul_info.h"
#include "frontend/parallel/ops_info/uniform_real_info.h"
#include "frontend/parallel/ops_info/resizebilinear_info.h"
#include "frontend/parallel/ops_info/bounding_box_encode_info.h"
#include "frontend/parallel/ops_info/iou_info.h"
#include "frontend/parallel/ops_info/random_choice_with_mask_info.h"
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_HEAD_FILES_H_

View File

@ -421,6 +421,9 @@ constexpr char DSD_MATMUL[] = "DSDMatmul";
constexpr char RESIZE_BILINEAR[] = "ResizeBilinear";
constexpr char RESIZE_NEAREST_NEIGHBOR[] = "ResizeNearestNeighbor";
constexpr char CUMSUM[] = "CumSum";
constexpr char BOUNDING_BOX_ENCODE[] = "BoundingBoxEncode";
constexpr char IOU[] = "IOU";
constexpr char RANDOM_CHOICE_WITH_MASK[] = "RandomChoiceWithMask";
// pipeline
constexpr size_t PIPELINE_FUSTION_OFFSET = 100;

View File

@ -0,0 +1,75 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "frontend/parallel/ops_info/random_choice_with_mask_info.h"
#include <algorithm>
namespace mindspore {
namespace parallel {
Status RandomChoiceWithMaskInfo::CheckStrategy(const StrategyPtr &strategy) {
if (CheckStrategyValue(strategy, inputs_shape_) != SUCCESS) {
MS_LOG(ERROR) << name_ << ": Invalid strategy.";
return FAILED;
}
Strategys strategies = strategy->GetInputDim();
Dimensions input_strategy = strategies[0];
auto is_shard = [](int64_t val) -> bool { return val != 1; };
if (std::any_of(input_strategy.begin(), input_strategy.end(), is_shard)) {
MS_LOG(ERROR) << name_ << ": Each dimension of input tensor is not splittable, but the strategy is "
<< StrategyToString(strategies);
return FAILED;
}
return SUCCESS;
}
Status RandomChoiceWithMaskInfo::InferDevMatrixShape() {
dev_matrix_shape_.clear();
return SUCCESS;
}
Status RandomChoiceWithMaskInfo::InferTensorMap() {
Shape input0_shape = inputs_shape_.at(0);
Shape output0_shape = outputs_shape_.at(0);
Shape output1_shape = outputs_shape_.at(1);
inputs_tensor_map_.clear();
inputs_tensor_map_.emplace_back(Shape(input0_shape.size(), -1));
outputs_tensor_map_.emplace_back(Shape(output0_shape.size(), -1));
outputs_tensor_map_.emplace_back(Shape(output1_shape.size(), -1));
return SUCCESS;
}
std::vector<StrategyPtr> RandomChoiceWithMaskInfo::GenerateOpStrategies(int64_t stage_id) {
Dimensions input_partitions(inputs_shape_[0].size(), 1);
Strategys strategies = {input_partitions};
std::vector<StrategyPtr> sp_vector;
sp_vector.emplace_back(std::make_shared<Strategy>(stage_id, strategies));
return sp_vector;
}
Status RandomChoiceWithMaskInfo::InferAsLossDivisor() {
if (out_dev_matrix_shape_.empty()) {
out_dev_matrix_shape_ = dev_matrix_shape_;
}
as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(out_dev_matrix_shape_, outputs_tensor_map_[0]);
MS_LOG(INFO) << name_ << ": the dev matrix shape is " << ShapeToString(out_dev_matrix_shape_)
<< ", the output tensor map is " << ShapeToString(outputs_tensor_map_[0]) << ", loss divisor is "
<< as_loss_divisor_;
return SUCCESS;
}
} // namespace parallel
} // namespace mindspore

View File

@ -0,0 +1,50 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_RANDOM_CHOICE_WITH_MASK_INFO_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_RANDOM_CHOICE_WITH_MASK_INFO_H_
#include <memory>
#include <vector>
#include <string>
#include "frontend/parallel/ops_info/operator_info.h"
#include "frontend/parallel/auto_parallel/edge_costmodel.h"
#include "frontend/parallel/strategy.h"
namespace mindspore {
namespace parallel {
class RandomChoiceWithMaskInfo : public OperatorInfo {
public:
RandomChoiceWithMaskInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape,
const PrimitiveAttrs &attrs)
: OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared<RandomChoicWithMaskCost>()) {}
~RandomChoiceWithMaskInfo() = default;
std::vector<StrategyPtr> GenerateOpStrategies(int64_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override { return SetCostUnderStrategyBase(strategy); }
protected:
Status GetAttrs() override { return SUCCESS; }
Status CheckStrategy(const StrategyPtr &strategy) override;
Status InferDevMatrixShape() override;
Status InferTensorMap() override;
Status InferForwardCommunication() override { return SUCCESS; }
Status InferAsLossDivisor() override;
};
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_RANDOM_CHOICE_WITH_MASK_INFO_H_

View File

@ -168,7 +168,8 @@ bool IsSplittableOperator(const std::string &op_name) {
SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD, UNIQUE, UNSORTED_SEGMENT_SUM,
UNSORTED_SEGMENT_MIN, REPEAT_ELEMENTS, TENSOR_DOT, RANGE, UNIFORM_CANDIDATE_SAMPLER, SLICE, SELECT, GATHERD,
UNSORTED_SEGMENT_MAX, GATHER_ND, TOPK, SCATTER_UPDATE, VIRTUAL_OUTPUT, CONV2D_BACK_PROP_INPUT, CONV2D_TRANSPOSE,
MATMUL_DDS, DSD_MATMUL, UNIFORMREAL, RESIZE_BILINEAR, RESIZE_NEAREST_NEIGHBOR, CUMSUM, FAST_GELU};
MATMUL_DDS, DSD_MATMUL, UNIFORMREAL, RESIZE_BILINEAR, RESIZE_NEAREST_NEIGHBOR, CUMSUM, FAST_GELU, IOU,
BOUNDING_BOX_ENCODE, RANDOM_CHOICE_WITH_MASK};
// clang-format on
auto iter = splittable_op.find(op_name);

View File

@ -0,0 +1,82 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell
from mindspore.ops import operations as P
_anchor_box = Tensor(np.ones([32, 4]), ms.float32)
_gt_boxes = Tensor(np.ones([32, 4]), ms.float32)
class Net(Cell):
"""
Create the test net.
"""
def __init__(self, strategy=None):
super(Net, self).__init__()
self.bbox_encode = P.BoundingBoxEncode().shard(strategy)
def construct(self, anchor_boxes, gt_boxes):
x = self.bbox_encode(anchor_boxes, gt_boxes)
return x
def compile_net(net: Cell, *inputs):
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, *inputs)
context.reset_auto_parallel_context()
def test_bounding_box_encode_data_parallel():
"""
Feature: test BoundingBoxEncode data parallel strategy
Description: only shard the batch dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((8, 1), (8, 1))
net = Net(strategy)
compile_net(net, _anchor_box, _gt_boxes)
def test_bounding_box_encode_auto_parallel():
"""
Feature: test BoundingBoxEncode auto parallel
Description: auto parallel
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net()
compile_net(net, _anchor_box, _gt_boxes)
def test_bounding_box_encode_strategy_error():
"""
Feature: test BoundingBoxEncode with illegal strategy
Description: illegal strategy
Expectation: raise RuntimeError
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((8, 1), (4, 1))
net = Net(strategy)
with pytest.raises(RuntimeError):
compile_net(net, _anchor_box, _gt_boxes)

View File

@ -0,0 +1,79 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell
from mindspore.ops import operations as P
_anchor_boxes = Tensor(np.ones([32, 4]), ms.float32)
_gt_boxes = Tensor(np.ones([64, 4]), ms.float32)
class Net(Cell):
def __init__(self, strategy=None):
super(Net, self).__init__()
self.iou = P.IOU().shard(strategy)
def construct(self, anchor_boxes, gt_boxes):
x = self.iou(anchor_boxes, gt_boxes)
return x
def compile_net(net: Cell):
net.set_train()
net.set_auto_parallel()
_cell_graph_executor.compile(net, _anchor_boxes, _gt_boxes)
context.reset_auto_parallel_context()
def test_auto_parallel_iou():
"""
Feature: test IOU auto parallel
Description: auto parallel
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net()
compile_net(net)
def test_data_parallel_iou():
"""
Feature: test IOU data parallel strategy
Description: only shard the batch dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((2, 1), (4, 1))
net = Net(strategy)
compile_net(net)
def test_iou_strategy_error():
"""
Feature: test IOU with illegal strategy
Description: illegal strategy
Expectation: raise RuntimeError
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((2, 2), (2, 1))
net = Net(strategy)
with pytest.raises(RuntimeError):
compile_net(net)

View File

@ -0,0 +1,69 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell
from mindspore.ops import operations as P
_input_x = Tensor(np.ones([512, 4]), ms.bool_)
class Net(Cell):
"""
Create the test net.
"""
def __init__(self, strategy=None):
super(Net, self).__init__()
self.random_choice_with_mask = P.RandomChoiceWithMask().shard(strategy)
def construct(self, input_x):
x = self.random_choice_with_mask(input_x)
return x
def compile_net(net: Cell, *inputs):
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, *inputs)
context.reset_auto_parallel_context()
def test_auto_parallel_random_choice_with_mask():
"""
Feature: test RandomChoiceWithMask auto parallel
Description: auto parallel
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net()
compile_net(net, _input_x)
def test_random_choice_with_mask_wrong_strategy():
"""
Feature: test RandomChoiceWithMask with illegal strategy
Description: illegal strategy
Expectation: raise RuntimeError
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((8, 1),)
net = Net(strategy)
with pytest.raises(RuntimeError):
compile_net(net, _input_x)