diff --git a/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc b/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..6d1f8701c3d3daac05a4413fda59fcb396689e6b --- /dev/null +++ b/paddle/phi/kernels/legacy/cpu/elementwise_kernel.cc @@ -0,0 +1,146 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/common/bfloat16.h" +#include "paddle/phi/common/complex.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/elementwise.h" +#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h" + +namespace phi { + +template +void MaximumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + // allocate memory for out + dev_ctx.template Alloc(out); + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::MaximumFunctor(), out); +} + +template +void MinimumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + // allocate memory for out + dev_ctx.template Alloc(out); + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::MinimumFunctor(), out); +} + +template +void RemainderRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + // allocate memory for out + dev_ctx.template Alloc(out); + auto x_dims = x.dims(); + auto y_dims = y.dims(); + if (x_dims.size() >= y_dims.size()) { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::RemainderFunctor(), out); + } else { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::InverseRemainderFunctor(), out); + } +} + +template +void FloorDivideRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + // allocate memory for out + dev_ctx.template Alloc(out); + auto x_dims = x.dims(); + auto y_dims = y.dims(); + if (x_dims.size() >= y_dims.size()) { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::FloorDivideFunctor(), out); + } else { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::InverseFloorDivideFunctor(), out); + } +} + +template +void ElementwisePowRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + // allocate memory for out + dev_ctx.template Alloc(out); + auto x_dims = x.dims(); + auto y_dims = y.dims(); + if (x_dims.size() >= y_dims.size()) { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::ElementwisePowFunctor(), out); + } else { + funcs::ElementwiseCompute, T>( + dev_ctx, x, y, axis, funcs::ElementwiseInversePowFunctor(), out); + } +} + +} // namespace phi + +PD_REGISTER_KERNEL(maximum_raw, + CPU, + ALL_LAYOUT, + phi::MaximumRawKernel, + float, + double, + int, + int64_t, + phi::dtype::bfloat16) {} +PD_REGISTER_KERNEL(minimum_raw, + CPU, + ALL_LAYOUT, + phi::MinimumRawKernel, + float, + double, + int, + int64_t, + phi::dtype::bfloat16) {} +PD_REGISTER_KERNEL(remainder_raw, + CPU, + ALL_LAYOUT, + phi::RemainderRawKernel, + float, + double, + int, + int64_t) {} +PD_REGISTER_KERNEL(floor_divide_raw, + CPU, + ALL_LAYOUT, + phi::FloorDivideRawKernel, + int, + int64_t) {} +PD_REGISTER_KERNEL(elementwise_pow_raw, + CPU, + ALL_LAYOUT, + phi::ElementwisePowRawKernel, + float, + double, + int, + int64_t) {} diff --git a/paddle/phi/kernels/legacy/kps/elementwise_kernel.cu b/paddle/phi/kernels/legacy/kps/elementwise_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..ec856ffa53b094367b45f2c6a0c838aec992dee8 --- /dev/null +++ b/paddle/phi/kernels/legacy/kps/elementwise_kernel.cu @@ -0,0 +1,176 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/backends/gpu/gpu_context.h" +#ifndef PADDLE_WITH_XPU_KP +#include "paddle/phi/common/complex.h" +#include "paddle/phi/common/float16.h" +#endif +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h" + +namespace phi { + +template +void MaximumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + std::vector inputs; + inputs.reserve(2); + std::vector outputs; + outputs.reserve(1); + inputs.emplace_back(&x); + inputs.emplace_back(&y); + outputs.emplace_back(out); + dev_ctx.template Alloc(out); + funcs::BroadcastKernel( + dev_ctx, inputs, &outputs, axis, funcs::MaximumFunctor()); +} + +template +void MinimumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + std::vector inputs; + inputs.reserve(2); + std::vector outputs; + outputs.reserve(1); + inputs.emplace_back(&x); + inputs.emplace_back(&y); + outputs.emplace_back(out); + dev_ctx.template Alloc(out); + funcs::BroadcastKernel( + dev_ctx, inputs, &outputs, axis, funcs::MinimumFunctor()); +} + +template +void RemainderRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + std::vector inputs; + inputs.reserve(2); + std::vector outputs; + outputs.reserve(1); + inputs.emplace_back(&x); + inputs.emplace_back(&y); + outputs.emplace_back(out); + dev_ctx.template Alloc(out); + funcs::BroadcastKernel( + dev_ctx, inputs, &outputs, axis, funcs::RemainderFunctor()); +} + +template +void FloorDivideRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + std::vector inputs; + inputs.reserve(2); + std::vector outputs; + outputs.reserve(1); + inputs.emplace_back(&x); + inputs.emplace_back(&y); + outputs.emplace_back(out); + dev_ctx.template Alloc(out); + funcs::BroadcastKernel( + dev_ctx, inputs, &outputs, axis, funcs::FloorDivideFunctor()); +} + +template +void ElementwisePowRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + std::vector inputs; + inputs.reserve(2); + std::vector outputs; + outputs.reserve(1); + inputs.emplace_back(&x); + inputs.emplace_back(&y); + outputs.emplace_back(out); + dev_ctx.template Alloc(out); + funcs::BroadcastKernel( + dev_ctx, inputs, &outputs, axis, funcs::ElementwisePowFunctor()); +} + +} // namespace phi + +#ifdef PADDLE_WITH_XPU_KP +PD_REGISTER_KERNEL(maximum_raw, KPS, ALL_LAYOUT, phi::MaximumRawKernel, float) { +} +PD_REGISTER_KERNEL(minimum_raw, KPS, ALL_LAYOUT, phi::MinimumRawKernel, float) { +} +PD_REGISTER_KERNEL( + floor_divide_raw, KPS, ALL_LAYOUT, phi::FloorDivideRawKernel, int) {} +PD_REGISTER_KERNEL( + elementwise_pow_raw, KPS, ALL_LAYOUT, phi::ElementwisePowRawKernel, float) { +} + +#else +using float16 = phi::dtype::float16; +using bfloat16 = phi::dtype::bfloat16; + +PD_REGISTER_KERNEL(maximum_raw, + KPS, + ALL_LAYOUT, + phi::MaximumRawKernel, + float, + double, + int, + int64_t, + float16, + bfloat16) {} +PD_REGISTER_KERNEL(minimum_raw, + KPS, + ALL_LAYOUT, + phi::MinimumRawKernel, + float, + double, + int, + int64_t, + float16, + bfloat16) {} +PD_REGISTER_KERNEL(remainder_raw, + KPS, + ALL_LAYOUT, + phi::RemainderRawKernel, + float, + double, + int, + float16, + int64_t) {} +PD_REGISTER_KERNEL(floor_divide_raw, + KPS, + ALL_LAYOUT, + phi::FloorDivideRawKernel, + int, + int64_t) {} +PD_REGISTER_KERNEL(elementwise_pow_raw, + KPS, + ALL_LAYOUT, + phi::ElementwisePowRawKernel, + float, + double, + int, + float16, + int64_t) {} +#endif diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..00aee2d41b15373ec1528e45e0b0a57bc613bdb2 --- /dev/null +++ b/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc @@ -0,0 +1,150 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/backends/xpu/xpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/elementwise_kernel_impl.h" +#include "paddle/phi/kernels/xpu/elementwise.h" + +namespace phi { + +template +void MaximumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + auto f = [](xpu::Context* ctx, + const XPUType* x, + const XPUType* y, + XPUType* z, + const std::vector& xshape, + const std::vector& yshape) { + return xpu::broadcast_max(ctx, x, y, z, xshape, yshape); + }; + + XPUElementwise(dev_ctx, x, y, axis, out, f); +} + +template +void MinimumRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + auto f = [](xpu::Context* ctx, + const XPUType* x, + const XPUType* y, + XPUType* z, + const std::vector& xshape, + const std::vector& yshape) { + return xpu::broadcast_min(ctx, x, y, z, xshape, yshape); + }; + + XPUElementwise(dev_ctx, x, y, axis, out, f); +} + +template +void RemainderRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + auto f = [](xpu::Context* ctx, + const XPUType* x, + const XPUType* y, + XPUType* z, + const std::vector& xshape, + const std::vector& yshape) { + return xpu::broadcast_mod(ctx, x, y, z, xshape, yshape); + }; + + XPUElementwise(dev_ctx, x, y, axis, out, f); +} + +template +void FloorDivideRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + auto f = [](xpu::Context* ctx, + const XPUType* x, + const XPUType* y, + XPUType* z, + const std::vector& xshape, + const std::vector& yshape) { + return xpu::broadcast_floordiv(ctx, x, y, z, xshape, yshape); + }; + + XPUElementwise(dev_ctx, x, y, axis, out, f); +} + +template +void ElementwisePowRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& y, + int axis, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + auto f = [](xpu::Context* ctx, + const XPUType* x, + const XPUType* y, + XPUType* z, + const std::vector& xshape, + const std::vector& yshape) { + return xpu::broadcast_pow(ctx, x, y, z, xshape, yshape); + }; + + XPUElementwise(dev_ctx, x, y, axis, out, f); +} + +} // namespace phi + +PD_REGISTER_KERNEL(floor_divide_raw, + XPU, + ALL_LAYOUT, + phi::FloorDivideRawKernel, + float, + phi::dtype::float16) {} +PD_REGISTER_KERNEL(maximum_raw, + XPU, + ALL_LAYOUT, + phi::MaximumRawKernel, + float, + phi::dtype::float16) {} +PD_REGISTER_KERNEL(minimum_raw, + XPU, + ALL_LAYOUT, + phi::MinimumRawKernel, + float, + phi::dtype::float16) {} +PD_REGISTER_KERNEL(remainder_raw, + XPU, + ALL_LAYOUT, + phi::RemainderRawKernel, + float, + phi::dtype::float16, + int32_t, + int64_t) {} +PD_REGISTER_KERNEL(elementwise_pow_raw, + XPU, + ALL_LAYOUT, + phi::ElementwisePowRawKernel, + float, + phi::dtype::float16) {} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 27ca1cb73007ff21694d43db29fe9e901d47645e..fdd53551be919b38486b5af67beb678faf64ccdb 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -269,90 +269,6 @@ class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op): ) -class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - -class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - -class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - - -class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -393,48 +309,6 @@ class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): } -class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) - ) - } - - -class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.public_python_api = paddle.maximum - self.prim_op_type = "prim" - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index 9657f0d5e9f213888b202c1f7806cbdc3c109852..788471b3a27e110c7d2f9e890f253896ec72523c 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -115,44 +115,6 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp): self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - self.python_api = broadcast_wrapper(shape=[100, 1, 1]) - x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - -class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - self.python_api = broadcast_wrapper(shape=[1, 100, 1]) - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - - class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" @@ -171,25 +133,6 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): } -class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - self.python_api = broadcast_wrapper(shape=[1, 25, 4, 1]) - x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64) - sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype( - np.float64 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1) - ) - } - - class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" @@ -246,10 +189,7 @@ class TestElementwiseMinOpFP16(unittest.TestCase): self.check_main((13, 17), (13, 17)) self.check_main((10, 3, 4), (1,)) self.check_main((100,), (100,)) - self.check_main((100, 3, 2), (100,), 0) - self.check_main((2, 100, 3), (100,), 1) self.check_main((2, 3, 100), (100,)) - self.check_main((2, 25, 4, 1), (25, 4), 1) self.check_main((2, 10, 2, 5), (2, 10, 1, 5)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py index ac0e8c6c142097caa344fe81e04a6d0b594c95eb..9eba287b2206d6bb9d8653439ef1bedfa4fda141 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py @@ -172,73 +172,6 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} -class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.python_api = paddle.pow - - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float64"), - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1)) - } - - def test_check_grad_normal(self): - if hasattr(self, 'attrs'): - self.check_grad(['X', 'Y'], 'Out', check_dygraph=False) - else: - self.check_grad(['X', 'Y'], 'Out') - - -class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.python_api = paddle.pow - - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float64"), - } - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.power( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - def test_check_grad_normal(self): - if hasattr(self, 'attrs'): - self.check_grad(['X', 'Y'], 'Out', check_dygraph=False) - else: - self.check_grad(['X', 'Y'], 'Out') - - -class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.python_api = paddle.pow - - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64"), - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power( - self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1) - ) - } - - def test_check_grad_normal(self): - if hasattr(self, 'attrs'): - self.check_grad(['X', 'Y'], 'Out', check_dygraph=False) - else: - self.check_grad(['X', 'Y'], 'Out') - - class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): def setUp(self): self.op_type = "elementwise_pow" diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py index 49f49a3a97e25992bc128cd100bd34e02eaf4fd9..66982e9a2c5e53348f2ea25fab117ec2e1005633 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py @@ -108,38 +108,6 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper): 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) } - class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - self.dtype - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - self.dtype - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def init_input_output(self): x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype) @@ -155,22 +123,6 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper): ) } - class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - self.dtype - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) - ) - } - class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py index 64a10d435cc3bb8d826c140e45e3717bfbd3d7e8..c79cc9b8e130c1344e069f81bfa05b9b42aaa50b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py @@ -107,36 +107,6 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper): 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) } - class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - self.dtype - ) - self.attrs = {'axis': 0} - self.inputs = {'X': x, 'Y': y} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - self.dtype - ) - self.attrs = {'axis': 1} - self.inputs = {'X': x, 'Y': y} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) @@ -151,21 +121,6 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper): ) } - class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): - x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype( - self.dtype - ) - self.attrs = {'axis': 1} - self.inputs = {'X': x, 'Y': y} - self.outputs = { - 'Out': np.minimum( - self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1) - ) - } - class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py index 3197c5a0484c1ff6cf2aef4a4477fbf90677de95..c00ea8db5c859f91cb705018a192ee1e06ab1286 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py @@ -72,26 +72,6 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): place = paddle.XPUPlace(0) self.check_output_with_place(place) - class TestElementwiseModOp_broadcast_1(ElementwiseModOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(2, 100, 3).astype(self.dtype), - } - - self.attrs = {'axis': 1} - self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']} - - class TestElementwiseModOp_broadcast_2(ElementwiseModOp): - def init_input_output(self): - self.inputs = { - 'X': np.random.rand(22, 128, 3).astype(self.dtype), - 'Y': np.random.rand(22, 128, 3).astype(self.dtype), - } - - self.attrs = {'axis': 1} - self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']} - class TestRemainderOp(unittest.TestCase): def test_dygraph(self): with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py index eb082c95dc6f57ade5e80129e8ec8b72c3c08a48..431ca838c1ab792ce6c434cdca78496cae471672 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py @@ -98,47 +98,6 @@ class XPUTestElementwisePowOp(XPUOpTestWrapper): } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): - def compute_input_output(self): - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power( - self.inputs['X'], self.inputs['Y'].reshape(100, 1) - ) - } - - class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): - def compute_input_output(self): - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - } - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.power( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): - def compute_input_output(self): - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype( - self.dtype - ), - 'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype), - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power( - self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1) - ) - } - class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): def compute_input_output(self): self.inputs = {