diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 1650ff0b2151336e3cedf5981124363937482d7e..145a8a0840e3726c140adc857b3d0b1fcb5d3c28 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -114,6 +114,7 @@ REGISTER(DropoutDoMaskInfo); REGISTER(ReshapeInfo); REGISTER(FloorDivInfo); REGISTER(MaximumInfo); +REGISTER(MinimumInfo); REGISTER(CastInfo); REGISTER(GreaterInfo); REGISTER(SparseSoftmaxCrossEntropyWithLogitsInfo); diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 5f51f1d0a91afd0ba5903ec24d3dabddba7fc146..00cc431463dd5d92347f1f562f3f05084eaae529 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -50,6 +50,14 @@ class MaximumInfo : public ArithmeticBase { : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} ~MaximumInfo() override = default; }; + +class MinimumInfo : public ArithmeticBase { + public: + MinimumInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + ~MinimumInfo() override = default; +}; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index befd26e3181f5b91413c603efe5393150a3dbc25..b0a9fb3a3c2cc9fa900168920eb78ea425f1ac24 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -186,6 +186,7 @@ constexpr char LOG[] = "Log"; constexpr char SIGMOID[] = "Sigmoid"; constexpr char POW[] = "Pow"; constexpr char MAXIMUM[] = "Maximum"; +constexpr char MINIMUM[] = "Minimum"; constexpr char EQUAL[] = "Equal"; constexpr char NOT_EQUAL[] = "NotEqual"; constexpr char LOGICALNOT[] = "LogicalNot"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index fe6be575eea5ef73ae56f86a129e0b616f583bee..1eb881b798d6a4b1bf2e4ec6e02cd574c4c05e41 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -93,6 +93,7 @@ std::vector splittable_op_ = {MATMUL, SIGMOID, POW, MAXIMUM, + MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 74de04f1df7bd025a568f12df0a7e0662fc0efa0..93ec5e598154865755b533bea54eaf6de46777ba 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -54,11 +54,10 @@ def test_matmul_equal(): out = self.equal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -78,11 +77,10 @@ def test_matmul_not_equal(): out = self.notequal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -102,11 +100,10 @@ def test_matmul_not_equal_repeated_calculation(): out = self.notequal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 1), (4, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -126,11 +123,10 @@ def test_matmul_maximum(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -150,11 +146,10 @@ def test_matmul_maximum_broadcast(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (2, )) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -174,13 +169,102 @@ def test_matmul_maximum_broadcast2(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 4), (4, 1)) strategy2 = ((4, 1), (1, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 1]), dtype=ms.float32) b = Tensor(np.ones([1, 64]), dtype=ms.float32) - _executor.compile(net, x, y, b) \ No newline at end of file + _executor.compile(net, x, y, b) + + +def test_matmul_minimum(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Minimum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((4, 2), (4, 2)) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 64]), dtype=ms.float32) + b = Tensor(np.ones([64, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_broadcast(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Maximum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((4, 2), (2, )) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 64]), dtype=ms.float32) + b = Tensor(np.ones([64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_broadcast2(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Minimum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 4), (4, 1)) + strategy2 = ((4, 1), (1, 2)) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 1]), dtype=ms.float32) + b = Tensor(np.ones([1, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_auto_parallel(): + class Net(nn.Cell): + def __init__(self): + super().__init__() + self.matmul = P.MatMul() + self.minimum = P.Minimum() + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel") + net = GradWrap(NetWithLoss(Net())) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 1]), dtype=ms.float32) + b = Tensor(np.ones([1, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b)