diff --git a/paddle/fluid/operators/collective/c_reduce_max_op_mlu.cc b/paddle/fluid/operators/collective/c_reduce_max_op_mlu.cc new file mode 100644 index 0000000000000000000000000000000000000000..79c0766b84960e019eccbb3ba6886895625b1aa6 --- /dev/null +++ b/paddle/fluid/operators/collective/c_reduce_max_op_mlu.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/collective/c_reduce_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_MLU_KERNEL(c_reduce_max, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel) diff --git a/paddle/fluid/operators/collective/c_reduce_min_op_mlu.cc b/paddle/fluid/operators/collective/c_reduce_min_op_mlu.cc new file mode 100644 index 0000000000000000000000000000000000000000..2c02803e759a96cde5ab2e4ab364528c99c5c5f3 --- /dev/null +++ b/paddle/fluid/operators/collective/c_reduce_min_op_mlu.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/collective/c_reduce_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_MLU_KERNEL(c_reduce_min, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel) diff --git a/paddle/fluid/operators/collective/c_reduce_op.h b/paddle/fluid/operators/collective/c_reduce_op.h index ddd0052c5a823e549abb1cddbbf45c768c298c7f..4e9edb53730c220657f3518e910f0786b6592782 100644 --- a/paddle/fluid/operators/collective/c_reduce_op.h +++ b/paddle/fluid/operators/collective/c_reduce_op.h @@ -47,6 +47,10 @@ limitations under the License. */ #include "paddle/fluid/platform/device/npu/hccl_helper.h" #endif +#if defined(PADDLE_WITH_CNCL) +#include "paddle/fluid/platform/device/mlu/cncl_helper.h" +#endif + namespace paddle { namespace operators { @@ -331,6 +335,68 @@ class CReduceOpCUDAKernel : public framework::OpKernel { } }; +template +class CReduceOpMLUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { +#if defined(PADDLE_WITH_CNCL) + auto in = ctx.Input("X"); + auto out = ctx.Output("Out"); + auto place = ctx.GetPlace(); + cnclDataType_t dtype = + platform::ToCNCLDataType(framework::TransToProtoVarType(in->dtype())); + int64_t numel = in->numel(); + + const void* sendbuff = in->data(); + out->Resize(in->dims()); + void* recvbuff = out->mutable_data(place); + + int rid = ctx.Attr("ring_id"); + int root = ctx.Attr("root_id"); + auto comm = paddle::platform::CNCLCommContext::Instance().Get(rid, place); + + mluStream stream = nullptr; + if (ctx.Attr("use_calc_stream")) { + auto dev_ctx = platform::DeviceContextPool::Instance().Get(place); + stream = static_cast(dev_ctx)->stream(); + } else { + stream = comm->stream(); + } + + cnclReduceOp_t cncl_red_type = cnclSum; + switch (red_type) { + case kRedSum: + cncl_red_type = cnclSum; + break; + + case kRedMax: + cncl_red_type = cnclMax; + break; + + case kRedMin: + cncl_red_type = cnclMin; + break; + + case kRedProd: + cncl_red_type = cnclProd; + break; + + default: + PADDLE_THROW(platform::errors::InvalidArgument( + "Invalid reduce type: %d", red_type)); + } + + PADDLE_ENFORCE_MLU_SUCCESS(cnclReduce(sendbuff, recvbuff, numel, dtype, + cncl_red_type, root, comm->comm(), + stream)); + +#else + PADDLE_THROW(platform::errors::PreconditionNotMet( + "PaddlePaddle should compile with MLU.")); +#endif + } +}; + class CReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() { diff --git a/paddle/fluid/operators/collective/c_reduce_prod_op_mlu.cc b/paddle/fluid/operators/collective/c_reduce_prod_op_mlu.cc new file mode 100644 index 0000000000000000000000000000000000000000..e1b18ea295599e1713186ea16c8e9ff545d4fb5b --- /dev/null +++ b/paddle/fluid/operators/collective/c_reduce_prod_op_mlu.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/collective/c_reduce_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_MLU_KERNEL(c_reduce_prod, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel) diff --git a/paddle/fluid/operators/collective/c_reduce_sum_op_mlu.cc b/paddle/fluid/operators/collective/c_reduce_sum_op_mlu.cc new file mode 100644 index 0000000000000000000000000000000000000000..302d3cfd98e8572cd911f45bc266738315e88aee --- /dev/null +++ b/paddle/fluid/operators/collective/c_reduce_sum_op_mlu.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/collective/c_reduce_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_MLU_KERNEL(c_reduce_sum, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel, + ops::CReduceOpMLUKernel) diff --git a/python/paddle/fluid/tests/unittests/mlu/CMakeLists.txt b/python/paddle/fluid/tests/unittests/mlu/CMakeLists.txt index 51fc8b3307bd16e709cac6a843113bcef79448d8..1da2fb8b14f75f10e852ef1783866f1d9ceca834 100644 --- a/python/paddle/fluid/tests/unittests/mlu/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/mlu/CMakeLists.txt @@ -26,6 +26,10 @@ if (WITH_MLU) set_tests_properties(test_collective_allreduce_min PROPERTIES TIMEOUT 120) set_tests_properties(test_collective_allreduce_prod PROPERTIES TIMEOUT 120) set_tests_properties(test_collective_allgather PROPERTIES TIMEOUT 120) + set_tests_properties(test_collective_reduce_sum PROPERTIES TIMEOUT 120) + set_tests_properties(test_collective_reduce_max PROPERTIES TIMEOUT 120) + set_tests_properties(test_collective_reduce_min PROPERTIES TIMEOUT 120) + set_tests_properties(test_collective_reduce_prod PROPERTIES TIMEOUT 120) set_tests_properties(test_collective_broadcast_api_mlu PROPERTIES TIMEOUT 120) set_tests_properties(test_collective_allreduce_api_mlu PROPERTIES TIMEOUT 120) set_tests_properties(test_collective_allgather_api_mlu PROPERTIES TIMEOUT 120) diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py new file mode 100644 index 0000000000000000000000000000000000000000..f987a71abda72255de887e0f07dff39541ece370 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import argparse +import os +import sys +import signal +import time +import socket +from contextlib import closing +from six import string_types +import math +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +import paddle.fluid.unique_name as nameGen +from paddle.fluid import core +import unittest +from multiprocessing import Process +import paddle.fluid.layers as layers +from functools import reduce +from test_collective_api_base_mlu import TestCollectiveAPIRunnerBase, runtime_main + +paddle.enable_static() + + +class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase): + def __init__(self): + self.global_ring_id = 0 + + def get_model(self, main_prog, startup_program, rank): + with fluid.program_guard(main_prog, startup_program): + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32') + paddle.distributed.reduce(tindata, dst=0) + return [tindata] + + +if __name__ == "__main__": + runtime_main(TestCollectiveReduceAPI, "reduce") diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py new file mode 100644 index 0000000000000000000000000000000000000000..05fc17a5c7da32a72024f479e816df742746ad05 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py @@ -0,0 +1,72 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import argparse +import os +import sys +import signal +import time +import socket +from contextlib import closing +from six import string_types +import math +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +import paddle.fluid.unique_name as nameGen +from paddle.fluid import core +import unittest +from multiprocessing import Process +import paddle.fluid.layers as layers +from functools import reduce +from test_collective_base_mlu import TestCollectiveRunnerBase, runtime_main + +paddle.enable_static() + + +class TestCollectiveReduce(TestCollectiveRunnerBase): + def __init__(self): + self.global_ring_id = 0 + + def get_model(self, main_prog, startup_program, col_type): + ring_id = 0 + rootid = 1 + with fluid.program_guard(main_prog, startup_program): + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32') + toutdata = main_prog.current_block().create_var( + name="outof" + col_type, + dtype='float32', + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False) + main_prog.global_block().append_op( + type="c_" + col_type, + inputs={'X': tindata}, + attrs={'ring_id': ring_id, + 'root_id': rootid}, + outputs={'Out': toutdata}) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}) + return toutdata + + +if __name__ == "__main__": + runtime_main(TestCollectiveReduce) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py index 3c1cf7d2d1b2ba010511f2056759bae72328c16f..9fae73a2540f4784f180aa1915b8593807b2a0b6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py @@ -219,6 +219,9 @@ class TestDistBase(unittest.TestCase): self.assertTrue( np.allclose( tr1_out, need_result, rtol=1e-05, atol=1e-05)) + elif col_type == "reduce": + need_result = input1 + input2 + self.assertTrue(np.allclose(tr0_out, need_result)) elif col_type == "allgather": need_result = np.vstack((input1, input2)) tr_out0 = np.vstack((tr0_out[0], tr0_out[1])) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py index 52952968977b022a5bf01adbc6bd3c7bda209a6f..f63daaf66ac2142a4ad85585bf6ca63e9c47a033 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py @@ -295,6 +295,18 @@ class TestDistBase(unittest.TestCase): self.assertTrue( np.allclose( tr1_out, need_result, rtol=1e-05, atol=1e-05)) + elif col_type == "reduce_sum": + need_result = input1 + input2 + self.assertTrue(np.allclose(tr1_out, need_result)) + elif col_type == "reduce_prod": + need_result = input1 * input2 + self.assertTrue(np.allclose(tr1_out, need_result)) + elif col_type == "reduce_max": + need_result = np.maximum(input1, input2) + self.assertTrue(np.allclose(tr1_out, need_result)) + elif col_type == "reduce_min": + need_result = np.minimum(input1, input2) + self.assertTrue(np.allclose(tr1_out, need_result)) elif col_type == "allgather": need_result = np.vstack((input1, input2)) self.assertTrue(np.allclose(tr0_out, need_result)) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py new file mode 100644 index 0000000000000000000000000000000000000000..dc4b09933068480b7d09ca32a7fb33dded8175d7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import numpy as np +import paddle + +from test_collective_api_base_mlu import TestDistBase + +paddle.enable_static() + + +class TestCollectiveReduceAPI(TestDistBase): + def _setup_config(self): + pass + + def test_reduce_cncl_fp16(self): + self.check_with_place("collective_reduce_api.py", "reduce", "float16") + + def test_reduce_cncl_fp32(self): + self.check_with_place("collective_reduce_api.py", "reduce", "float32") + + def test_reduce_cncl_int32(self): + self.check_with_place("collective_reduce_api.py", "reduce", "int32") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py new file mode 100644 index 0000000000000000000000000000000000000000..5da899c581f0b77790846c92ebb8204d19800ffe --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import unittest +import numpy as np +import paddle + +from test_collective_base_mlu import TestDistBase + +paddle.enable_static() + + +class TestCReduceOp(TestDistBase): + def _setup_config(self): + pass + + def test_reduce_max_fp32(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", + "float32") + + def test_reduce_max_fp16(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", + "float16") + + def test_reduce_max_int32(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", "int32") + + def test_reduce_max_int16(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", "int16") + + def test_reduce_max_int8(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", "int8") + + def test_reduce_max_uint8(self): + self.check_with_place("collective_reduce_op.py", "reduce_max", "uint8") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py new file mode 100644 index 0000000000000000000000000000000000000000..21fea55eff7db4940be1241ddb1fda906c372e51 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import unittest +import numpy as np +import paddle + +from test_collective_base_mlu import TestDistBase + +paddle.enable_static() + + +class TestCReduceOp(TestDistBase): + def _setup_config(self): + pass + + def test_reduce_min_fp32(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", + "float32") + + def test_reduce_min_fp16(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", + "float16") + + def test_reduce_min_int32(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", "int32") + + def test_reduce_min_int16(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", "int16") + + def test_reduce_min_int8(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", "int8") + + def test_reduce_min_uint8(self): + self.check_with_place("collective_reduce_op.py", "reduce_min", "uint8") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py new file mode 100644 index 0000000000000000000000000000000000000000..86d52a8c3262758fe5695b33165b3ae87f1730b1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import unittest +import numpy as np +import paddle + +from test_collective_base_mlu import TestDistBase + +paddle.enable_static() + + +class TestCReduceOp(TestDistBase): + def _setup_config(self): + pass + + def test_reduce_prod_fp32(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", + "float32") + + def test_reduce_prod_fp16(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", + "float16") + + def test_reduce_prod_int32(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", "int32") + + def test_reduce_prod_int16(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", "int16") + + def test_reduce_prod_int8(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", "int8") + + def test_reduce_prod_uint8(self): + self.check_with_place("collective_reduce_op.py", "reduce_prod", "uint8") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py new file mode 100644 index 0000000000000000000000000000000000000000..7028a0f29e84973f0b27dbdb5fd078a9ad3628f0 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import unittest +import numpy as np +import paddle + +from test_collective_base_mlu import TestDistBase + +paddle.enable_static() + + +class TestCReduceOp(TestDistBase): + def _setup_config(self): + pass + + def test_reduce_sum_fp32(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", + "float32") + + def test_reduce_sum_fp16(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", + "float16") + + def test_reduce_sum_int32(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", "int32") + + def test_reduce_sum_int16(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", "int16") + + def test_reduce_sum_int8(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", "int8") + + def test_reduce_sum_uint8(self): + self.check_with_place("collective_reduce_op.py", "reduce_sum", "uint8") + + +if __name__ == '__main__': + unittest.main()