!258 add_minimum_distributed_op

Merge pull request !258 from lichen/add_minimum_distributed_op
This commit is contained in:
mindspore-ci-bot 2020-04-13 20:16:08 +08:00 committed by Gitee
commit 77725e81a4
5 changed files with 108 additions and 13 deletions

View File

@ -114,6 +114,7 @@ REGISTER(DropoutDoMaskInfo);
REGISTER(ReshapeInfo);
REGISTER(FloorDivInfo);
REGISTER(MaximumInfo);
REGISTER(MinimumInfo);
REGISTER(CastInfo);
REGISTER(GreaterInfo);
REGISTER(SparseSoftmaxCrossEntropyWithLogitsInfo);

View File

@ -50,6 +50,14 @@ class MaximumInfo : public ArithmeticBase {
: ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {}
~MaximumInfo() override = default;
};
class MinimumInfo : public ArithmeticBase {
public:
MinimumInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape,
const PrimitiveAttrs& attrs)
: ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {}
~MinimumInfo() override = default;
};
} // namespace parallel
} // namespace mindspore

View File

@ -186,6 +186,7 @@ constexpr char LOG[] = "Log";
constexpr char SIGMOID[] = "Sigmoid";
constexpr char POW[] = "Pow";
constexpr char MAXIMUM[] = "Maximum";
constexpr char MINIMUM[] = "Minimum";
constexpr char EQUAL[] = "Equal";
constexpr char NOT_EQUAL[] = "NotEqual";
constexpr char LOGICALNOT[] = "LogicalNot";

View File

@ -93,6 +93,7 @@ std::vector<std::string> splittable_op_ = {MATMUL,
SIGMOID,
POW,
MAXIMUM,
MINIMUM,
EQUAL,
NOT_EQUAL,
LOGICALNOT,

View File

@ -54,11 +54,10 @@ def test_matmul_equal():
out = self.equal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -78,11 +77,10 @@ def test_matmul_not_equal():
out = self.notequal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -102,11 +100,10 @@ def test_matmul_not_equal_repeated_calculation():
out = self.notequal(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 1), (4, 1))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -126,11 +123,10 @@ def test_matmul_maximum():
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -150,11 +146,10 @@ def test_matmul_maximum_broadcast():
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (2, ))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
@ -174,13 +169,102 @@ def test_matmul_maximum_broadcast2():
out = self.maximum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), (4, 1))
strategy2 = ((4, 1), (1, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
_executor.compile(net, x, y, b)
_executor.compile(net, x, y, b)
def test_matmul_minimum():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().set_strategy(strategy1)
self.minimum = P.Minimum().set_strategy(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (4, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
_executor.compile(net, x, y, b)
def test_matmul_minimum_broadcast():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().set_strategy(strategy1)
self.minimum = P.Maximum().set_strategy(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((4, 2), (2, ))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64]), dtype=ms.float32)
_executor.compile(net, x, y, b)
def test_matmul_minimum_broadcast2():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2):
super().__init__()
self.matmul = P.MatMul().set_strategy(strategy1)
self.minimum = P.Minimum().set_strategy(strategy2)
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
strategy1 = ((2, 4), (4, 1))
strategy2 = ((4, 1), (1, 2))
net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
_executor.compile(net, x, y, b)
def test_matmul_minimum_auto_parallel():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul = P.MatMul()
self.minimum = P.Minimum()
def construct(self, x, y, b):
out = self.matmul(x, y)
out = self.minimum(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel")
net = GradWrap(NetWithLoss(Net()))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
_executor.compile(net, x, y, b)