diff --git a/paddle/fluid/operators/elementwise/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc index ead7b122be2b4c479263e9ac0e9833186d8f3cfa..ab3c44da4a16d9b47b216af5bbccfa6eb11c6601 100644 --- a/paddle/fluid/operators/elementwise/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -87,6 +87,13 @@ class ElementwiseMaxCompositeGradOpMaker auto* dy_ptr = this->GetOutputPtr(&dy); std::string dy_name = this->GetOutputName(dy); VLOG(6) << "Runing maximum_grad composite func"; + int axis = static_cast(this->Attr("axis")); + PADDLE_ENFORCE_EQ( + axis, + -1, + phi::errors::InvalidArgument( + "We only support axis = -1 in composite maximum_grad but we got: ", + axis)); prim::maximum_grad(x, y, out_grad, dx_ptr, dy_ptr); this->RecoverOutputName(dx, dx_name); this->RecoverOutputName(dy, dy_name); diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc index cfc93e791fda4be85df48d51b472c858eb436e6d..fad0e3008ec5e77d46f8f3d1a08eaa42ee34e970 100644 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -15,6 +15,9 @@ limitations under the License. */ #include #include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" +#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" +#include "paddle/fluid/prim/utils/static/desc_tensor.h" namespace paddle { namespace framework { @@ -68,6 +71,35 @@ class ElementwiseFMinOpMaker : public ElementwiseOpMaker { } }; +class ElementwiseMinCompositeGradOpMaker + : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; + + public: + void Apply() override { + paddle::Tensor x = this->GetSingleForwardInput("X"); + paddle::Tensor y = this->GetSingleForwardInput("Y"); + paddle::Tensor out_grad = this->GetSingleOutputGrad("Out"); + paddle::Tensor dx = this->GetSingleInputGrad("X"); + auto* dx_ptr = this->GetOutputPtr(&dx); + std::string dx_name = this->GetOutputName(dx); + paddle::Tensor dy = this->GetSingleInputGrad("Y"); + auto* dy_ptr = this->GetOutputPtr(&dy); + std::string dy_name = this->GetOutputName(dy); + VLOG(6) << "Runing minimum_grad composite func"; + int axis = static_cast(this->Attr("axis")); + PADDLE_ENFORCE_EQ( + axis, + -1, + phi::errors::InvalidArgument( + "We only support axis = -1 in composite minimum_grad but we got: ", + axis)); + prim::minimum_grad(x, y, out_grad, dx_ptr, dy_ptr); + this->RecoverOutputName(dx, dx_name); + this->RecoverOutputName(dy, dy_name); + } +}; + template class ElementwiseMinGradOpMaker : public framework::SingleGradOpMaker { public: @@ -112,7 +144,8 @@ REGISTER_OPERATOR(elementwise_min, ops::ElementwiseMinOpMaker, ops::ElementwiseOpInferVarType, ops::ElementwiseMinGradOpMaker, - ops::ElementwiseMinGradOpMaker); + ops::ElementwiseMinGradOpMaker, + ops::ElementwiseMinCompositeGradOpMaker); REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad); diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index c5d56dc82b5c4e6cf565ef7b55c149c4069445a8..45f62db4c70e48dc4d689cb40ccec2a4e4dded5e 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -1571,6 +1571,51 @@ void gelu_grad(const Tensor& x, } } +template +void minimum_grad(const Tensor& x, + const Tensor& y, + const Tensor& out_grad, + Tensor* x_grad, + Tensor* y_grad) { + if (x_grad) { + auto x_tmp = cast(less_than(x, y), out_grad.dtype()); + auto dx_res = out_grad * x_tmp; + if (y.dims() != x.dims()) { + // Maybe need reduce here + auto reduce_dim = get_reduce_dims(x.dims(), y.dims()); + if (!reduce_dim.size()) { + set_output(dx_res, x_grad); + } else { + auto dx_reduce_res = + dx_res.sum(phi::vectorize(reduce_dim), x.dtype(), false); + auto dx_tmp = reshape(dx_reduce_res, phi::vectorize(x.dims())); + set_output(dx_tmp, x_grad); + } + } else { + set_output(dx_res, x_grad); + } + } + + if (y_grad) { + auto y_tmp = cast(greater_equal(x, y), out_grad.dtype()); + auto dy_res = out_grad * y_tmp; + if (x.dims() != y.dims()) { + // Maybe need reduce here + phi::DDim reduce_dim = get_reduce_dims(y.dims(), x.dims()); + if (!reduce_dim.size()) { + set_output(dy_res, y_grad); + } else { + auto dy_reduce_res = + dy_res.sum(phi::vectorize(reduce_dim), y.dtype(), false); + auto dy_tmp = reshape(dy_reduce_res, phi::vectorize(y.dims())); + set_output(dy_tmp, y_grad); + } + } else { + set_output(dy_res, y_grad); + } + } +} + template void tile_grad(const Tensor& x, const Tensor& out_grad, diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 60e5354dd5354f624f590990fdc4d0b584f614af..42175a73bc86d2a2c122d03b5380ef14be8a7ca4 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -546,6 +546,7 @@ param: [x, y] kernel : func : minimum_grad + composite : minimum_grad(x, y, out_grad, axis, x_grad, y_grad) - backward_op : mish_grad forward : mish (Tensor x, float threshold) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index a9e7504d671d027f76073db81fd55c10d6486214..257e3d1a6b72cce73d579da15c495f1296ba3ca0 100755 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1115,7 +1115,8 @@ set(TEST_CINN_OPS test_group_norm_op test_tile_op test_roll_op - test_sum_op) + test_sum_op + test_elementwise_min_op) foreach(TEST_CINN_OPS ${TEST_CINN_OPS}) if(WITH_CINN) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index 15075991efb16b99989fd3747de8149cd9f9e4da..fb03a6831ad492d233ec872f905a0073584b24a7 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -34,6 +34,9 @@ class TestElementwiseOp(OpTest): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() # If x and y have the same value, the min() is not differentiable. # So we generate test data by the following method # to avoid them being too close to each other. @@ -47,23 +50,60 @@ class TestElementwiseOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out') + if hasattr(self, 'attrs'): + if self.attrs['axis'] == -1: + self.check_grad(['X', 'Y'], 'Out', check_prim=True) + else: + self.check_grad(['X', 'Y'], 'Out') + else: + self.check_grad(['X', 'Y'], 'Out', check_prim=True) def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") - ) + if hasattr(self, 'attrs') and self.attrs['axis'] != -1: + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + ) + else: + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + check_prim=True, + ) def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') - ) + if hasattr(self, 'attrs') and self.attrs['axis'] != -1: + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_dygraph=False, + ) + else: + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_prim=True, + ) + + def if_enable_cinn(self): + pass class TestElementwiseFP16Op(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() self.dtype = np.float16 # If x and y have the same value, the min() is not differentiable. # So we generate test data by the following method @@ -74,66 +114,81 @@ class TestElementwiseFP16Op(TestElementwiseOp): self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out') - - def test_check_grad_ingore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y')) - class TestElementwiseMinOp_ZeroDim1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.1, 1, []).astype("float64") y = np.random.uniform(0.1, 1, []).astype("float64") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + def if_enable_cinn(self): + self.enable_cinn = False + class TestElementwiseMinFP16Op_ZeroDim1(TestElementwiseFP16Op): def init_data(self): self.x = np.random.uniform(0.1, 1, []).astype(np.float16) self.y = np.random.uniform(0.1, 1, []).astype(np.float16) + def if_enable_cinn(self): + self.enable_cinn = False + class TestElementwiseMinOp_ZeroDim2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") y = np.random.uniform(0.1, 1, []).astype("float64") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + def if_enable_cinn(self): + self.enable_cinn = False + class TestElementwiseMinFP16Op_ZeroDim2(TestElementwiseFP16Op): def init_data(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") self.y = np.random.uniform(0.1, 1, []).astype("float16") + def if_enable_cinn(self): + self.enable_cinn = False + class TestElementwiseMinOp_ZeroDim3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.1, 1, []).astype("float64") y = np.random.uniform(0.1, 1, [13, 17]).astype("float64") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} + def if_enable_cinn(self): + self.enable_cinn = False + class TestElementwiseMinFP16Op_ZeroDim3(TestElementwiseFP16Op): def init_data(self): self.x = np.random.uniform(0.1, 1, []).astype("float16") self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + def if_enable_cinn(self): + self.enable_cinn = False + @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." @@ -142,6 +197,9 @@ class TestElementwiseMinOp_scalar(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float64") y = np.array([0.5]).astype("float64") self.inputs = {'X': x, 'Y': y} @@ -155,6 +213,9 @@ class TestElementwiseMinFP16Op_scalar(TestElementwiseFP16Op): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(np.float16) y = np.array([0.5]).astype(np.float16) self.inputs = {'X': x, 'Y': y} @@ -165,6 +226,9 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.random((100,)).astype("float64") sgn = np.random.choice([-1, 1], (100,)).astype("float64") y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64") @@ -176,6 +240,9 @@ class TestElementwiseMinFP16Op_Vector(TestElementwiseFP16Op): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.random((100,)).astype(np.float16) sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype(np.float16) @@ -187,6 +254,9 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = broadcast_wrapper(shape=[1, 1, 100]) + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64) sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( @@ -205,6 +275,9 @@ class TestElementwiseMinFP16Op_broadcast_2(TestElementwiseFP16Op): def setUp(self): self.op_type = "elementwise_min" self.python_api = broadcast_wrapper(shape=[1, 1, 100]) + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float16) sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( @@ -223,6 +296,9 @@ class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.prim_op_type = "prim" + self.public_python_api = paddle.minimum + self.if_enable_cinn() x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float64) sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float64) y = x + sgn * np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float64) @@ -235,6 +311,9 @@ class TestElementwiseMinFP16Op_broadcast_4(TestElementwiseFP16Op): def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum + self.public_python_api = paddle.minimum + self.prim_op_type = "prim" + self.if_enable_cinn() x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float16) sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float16) y = x + sgn * np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float16) @@ -268,7 +347,7 @@ class TestElementwiseBF16Op(OpTest): self.python_api = paddle.minimum self.public_python_api = paddle.minimum self.prim_op_type = "prim" - self.enable_cinn = False + self.if_enable_cinn() self.dtype = np.uint16 self.inputs = { 'X': convert_float_to_uint16(self.x), @@ -282,17 +361,82 @@ class TestElementwiseBF16Op(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', numeric_grad_delta=0.05) + places = self._get_places() + for place in places: + if type(place) is paddle.fluid.libpaddle.CPUPlace: + check_prim = False + else: + check_prim = True + + self.check_grad_with_place( + place, + inputs_to_check=['X', 'Y'], + output_names='Out', + no_grad_set=None, + numeric_grad_delta=0.05, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + check_prim=check_prim, + only_check_prim=False, + atol=1e-5, + check_cinn=False, + ) def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X") - ) + places = self._get_places() + for place in places: + if type(place) is paddle.fluid.libpaddle.CPUPlace: + check_prim = False + else: + check_prim = True + + self.check_grad_with_place( + place, + inputs_to_check=['Y'], + output_names='Out', + no_grad_set=set("X"), + numeric_grad_delta=0.05, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + check_prim=check_prim, + only_check_prim=False, + atol=1e-5, + check_cinn=False, + ) def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y') - ) + places = self._get_places() + for place in places: + if type(place) is paddle.fluid.libpaddle.CPUPlace: + check_prim = False + else: + check_prim = True + + self.check_grad_with_place( + place, + inputs_to_check=['Y'], + output_names='Out', + no_grad_set=set("X"), + numeric_grad_delta=0.05, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + check_prim=check_prim, + only_check_prim=False, + atol=1e-5, + check_cinn=False, + ) + + def if_enable_cinn(self): + self.enable_cinn = False class TestElementwiseMinBF16Op_ZeroDim1(TestElementwiseBF16Op):