diff --git a/paddle/fluid/operators/expand_v2_op.cc b/paddle/fluid/operators/expand_v2_op.cc index 981cd110351292d59cb84674101c92c2328d5be0..292f706cb186b09fb4e358fd317b5d4248d5c3a3 100644 --- a/paddle/fluid/operators/expand_v2_op.cc +++ b/paddle/fluid/operators/expand_v2_op.cc @@ -16,7 +16,11 @@ limitations under the License. */ #include #include #include + +#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/unary.h" #define MAX_RANK_SUPPORTED 6 @@ -29,70 +33,6 @@ class ExpandV2Op : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandV2"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandV2"); - auto x_dims = ctx->GetInputDim("X"); - auto expand_shape = ctx->Attrs().Get>("shape"); - - if (expand_shape.size() == 0) { - expand_shape = std::vector(x_dims.size(), -1); - } - - PADDLE_ENFORCE_GE( - expand_shape.size(), static_cast(x_dims.size()), - platform::errors::InvalidArgument( - "The number of elements (%d) of 'shape' for " - "expand_v2 op must be greater than or equal to the rank " - "(%d) of the input.", - expand_shape.size(), static_cast(x_dims.size()))); - PADDLE_ENFORCE_LE(expand_shape.size(), MAX_RANK_SUPPORTED, - platform::errors::InvalidArgument( - "The number of elements (%d) of 'shape' for " - "must not be greater than %d.", - expand_shape.size(), MAX_RANK_SUPPORTED)); - PADDLE_ENFORCE_GE(expand_shape.size(), 1, - platform::errors::InvalidArgument( - "The number of elements (%d) of 'shape' for " - "must be a positive integer.", - expand_shape.size())); - - auto out_rank = - std::max(static_cast(x_dims.size()), expand_shape.size()); - std::vector out_shape(out_rank); - auto x_dim_vec = phi::vectorize(x_dims); - auto diff = expand_shape.size() - x_dim_vec.size(); - x_dim_vec.insert(x_dim_vec.begin(), diff, -1); - for (size_t i = 0; i < expand_shape.size(); ++i) { - if (x_dims[i] == -1) { - out_shape[i] = -1; - } else if (expand_shape[i] == -1) { - if (static_cast(x_dims.size()) > i) { - out_shape[i] = x_dims[i]; - } else { - out_shape[i] = -1; - } - } else if (expand_shape[i] == -2) { - // We use -2 to represent the element in expand_shape is a var. - out_shape[i] = -1; - } else { - PADDLE_ENFORCE_GT( - expand_shape[i], 0, - platform::errors::InvalidArgument( - "The %uth element of 'shape' for expand_v2 op must be " - "greater than 0, but the value given is %d.", - i, expand_shape[i])); - out_shape[i] = expand_shape[i]; - } - } - - ctx->SetOutputDim("Out", phi::make_ddim(out_shape)); - if (out_shape[0] == x_dims[0]) { - ctx->ShareLoD("X", "Out"); - } - } - protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { @@ -291,10 +231,14 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(ExpandV2GradNoNeedBufVarsInferer, "X"); } // namespace operators } // namespace paddle +DECLARE_INFER_SHAPE_FUNCTOR(expand_v2, ExpandInferShapeFunctor, + PD_INFER_META(phi::ExpandInferMeta)); + namespace ops = paddle::operators; REGISTER_OPERATOR(expand_v2, ops::ExpandV2Op, ops::ExpandV2OpMaker, ops::ExpandV2GradOpMaker, - ops::ExpandV2GradOpMaker); + ops::ExpandV2GradOpMaker, + ExpandInferShapeFunctor); REGISTER_OPERATOR(expand_v2_grad, ops::ExpandV2GradOp, ops::ExpandV2DoubleGradOpMaker, ops::ExpandV2DoubleGradOpMaker, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index c6e2cb761911e0c8d6856f9438099593a1d53d90..24d13bcc4b2767a311ebfabcd27230cea841ee72 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -405,6 +405,78 @@ void EighInferMeta(const MetaTensor& x, out_v->set_dims(input_dim); } +void ExpandInferMeta(const MetaTensor& x, + const IntArray& shape, + MetaTensor* out) { +#define MAX_RANK_SUPPORTED 6 + auto x_dims = x.dims(); + auto expand_shape = shape.GetData(); + + if (expand_shape.size() == 0) { + expand_shape = std::vector(x_dims.size(), -1); + } + + PADDLE_ENFORCE_GE( + expand_shape.size(), + static_cast(x_dims.size()), + phi::errors::InvalidArgument( + "The number of elements (%d) of 'shape' for " + "expand_v2 op must be greater than or equal to the rank " + "(%d) of the input.", + expand_shape.size(), + static_cast(x_dims.size()))); + PADDLE_ENFORCE_LE( + expand_shape.size(), + MAX_RANK_SUPPORTED, + phi::errors::InvalidArgument("The number of elements (%d) of 'shape' for " + "must not be greater than %d.", + expand_shape.size(), + MAX_RANK_SUPPORTED)); + PADDLE_ENFORCE_GE( + expand_shape.size(), + 1, + phi::errors::InvalidArgument("The number of elements (%d) of 'shape' for " + "must be a positive integer.", + expand_shape.size())); + + auto out_rank = + std::max(static_cast(x_dims.size()), expand_shape.size()); + std::vector out_shape(out_rank); + auto x_dim_vec = phi::vectorize(x_dims); + auto diff = expand_shape.size() - x_dim_vec.size(); + x_dim_vec.insert(x_dim_vec.begin(), diff, -1); + for (size_t i = 0; i < expand_shape.size(); ++i) { + if (x_dims[i] == -1) { + out_shape[i] = -1; + } else if (expand_shape[i] == -1) { + if (static_cast(x_dims.size()) > i) { + out_shape[i] = x_dims[i]; + } else { + out_shape[i] = -1; + } + } else if (expand_shape[i] == -2) { + // We use -2 to represent the element in expand_shape is a var. + out_shape[i] = -1; + } else { + PADDLE_ENFORCE_GT( + expand_shape[i], + 0, + phi::errors::InvalidArgument( + "The %uth element of 'shape' for expand_v2 op must be " + "greater than 0, but the value given is %d.", + i, + expand_shape[i])); + out_shape[i] = expand_shape[i]; + } + } + + out->set_dims(make_ddim(out_shape)); + out->set_dtype(x.dtype()); + if (out_shape[0] == x_dims[0]) { + out->share_lod(x); + } +} + void FlattenInferMeta(const MetaTensor& x, int start_axis, int stop_axis, diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index c49e4c88dd89910e41d94e1cf4d5a5d2cd368bcd..ac5040388b3341b322fb9d42b4c3f9323a454c42 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -85,6 +85,10 @@ void EighInferMeta(const MetaTensor& x, MetaTensor* out_w, MetaTensor* out_v); +void ExpandInferMeta(const MetaTensor& x, + const IntArray& shape, + MetaTensor* out); + void FlattenInferMeta(const MetaTensor& x, int start_axis, int stop_axis, diff --git a/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h b/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h index 4e82cccac3422e882625b92afdf84895ae43a716..17b6d7516e070311b8c05f236ceffd88ced067f6 100644 --- a/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/poisson_grad_kernel_impl.h @@ -20,7 +20,9 @@ namespace phi { template -void PoissonGradKernel(const Context& ctx, DenseTensor* x_grad) { +void PoissonGradKernel(const Context& ctx, + const DenseTensor& out_grad, + DenseTensor* x_grad) { ctx.template Alloc(x_grad); phi::funcs::SetConstant functor; functor(ctx, x_grad, static_cast(0)); diff --git a/paddle/phi/kernels/poisson_grad_kernel.h b/paddle/phi/kernels/poisson_grad_kernel.h index 21720474f4a12b2cd624c97cefaf0fe395a458f4..3ef60d7a5167682383a01079c8f809cc1ea32fe9 100644 --- a/paddle/phi/kernels/poisson_grad_kernel.h +++ b/paddle/phi/kernels/poisson_grad_kernel.h @@ -20,6 +20,8 @@ namespace phi { template -void PoissonGradKernel(const Context& ctx, DenseTensor* x_grad); +void PoissonGradKernel(const Context& ctx, + const DenseTensor& out_grad, + DenseTensor* x_grad); } // namespace phi diff --git a/paddle/phi/ops/compat/poisson_sig.cc b/paddle/phi/ops/compat/poisson_sig.cc index cb6ae28804669327af6a99b7732ef9c73d30bbe4..e45640c11b6ee97a626552f30eec10d0bc083b80 100644 --- a/paddle/phi/ops/compat/poisson_sig.cc +++ b/paddle/phi/ops/compat/poisson_sig.cc @@ -18,7 +18,8 @@ namespace phi { KernelSignature PoissonGradOpArgumentMapping( const ArgumentMappingContext& ctx) { - return KernelSignature("poisson_grad", {}, {}, {GradVarName("X")}); + return KernelSignature( + "poisson_grad", {GradVarName("Out")}, {}, {GradVarName("X")}); } } // namespace phi diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index b1c1d1b9f2b9304c5d174d201ac25bd67ee56e69..58d8610ee352d90836d1da1fa677884fe209dba6 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -1024,6 +1024,7 @@ class TestSqrtBF16(OpTest): class TestRsqrt(TestActivation): def setUp(self): self.op_type = "rsqrt" + self.python_api = paddle.rsqrt self.init_dtype() np.random.seed(1024) @@ -1036,7 +1037,8 @@ class TestRsqrt(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.0005) + self.check_grad( + ['X'], 'Out', max_relative_error=0.0005, check_eager=True) class TestAbs(TestActivation): diff --git a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py index 2da5b770d052ccca07f2ca3f9f7f3d2002248525..29e3436948e98c4b3dda57b28fea7301394455ff 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py @@ -28,12 +28,13 @@ def create_test_not_equal_class(op_type, typename, callback): x = np.random.random(size=(10, 7)).astype(typename) y = np.random.random(size=(10, 7)).astype(typename) z = callback(x, y) + self.python_api = paddle.tensor.equal_all self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': z} self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_eager=True) cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_equal_all') Cls.__name__ = cls_name @@ -46,12 +47,13 @@ def create_test_not_shape_equal_class(op_type, typename, callback): x = np.random.random(size=(10, 7)).astype(typename) y = np.random.random(size=(10)).astype(typename) z = callback(x, y) + self.python_api = paddle.tensor.equal_all self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': z} self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_eager=True) cls_name = "{0}_{1}_{2}".format(op_type, typename, 'not_shape_equal_all') Cls.__name__ = cls_name @@ -63,12 +65,13 @@ def create_test_equal_class(op_type, typename, callback): def setUp(self): x = y = np.random.random(size=(10, 7)).astype(typename) z = callback(x, y) + self.python_api = paddle.tensor.equal_all self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': z} self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_eager=True) cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all') Cls.__name__ = cls_name @@ -82,12 +85,13 @@ def create_test_dim1_class(op_type, typename, callback): x = np.array([True, False, True]).astype(typename) x = np.array([False, False, True]).astype(typename) z = callback(x, y) + self.python_api = paddle.tensor.equal_all self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': z} self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_eager=True) cls_name = "{0}_{1}_{2}".format(op_type, typename, 'equal_all') Cls.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 70b3fda79b50fbb330f7969f0f3afa45cb16cda1..fd46b41c5f07e2b1481ba657451bd8545fc8478b 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -40,10 +40,10 @@ class TestExpandV2OpRank1(OpTest): self.expand_times = [1] def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): diff --git a/python/paddle/fluid/tests/unittests/test_poisson_op.py b/python/paddle/fluid/tests/unittests/test_poisson_op.py index 2123d4e0e7e35984f01b39633b76cb2c6337bb50..812770fac3ae4ba436d85e5af28e1bacd8728641 100644 --- a/python/paddle/fluid/tests/unittests/test_poisson_op.py +++ b/python/paddle/fluid/tests/unittests/test_poisson_op.py @@ -18,6 +18,7 @@ import numpy as np from op_test import OpTest import math import os +from paddle.fluid.framework import _test_eager_guard paddle.enable_static() paddle.seed(100) @@ -96,11 +97,18 @@ class TestPoissonAPI(unittest.TestCase): self.assertTrue(np.min(y_np) >= 0) def test_dygraph(self): - paddle.disable_static() - x = paddle.randn([10, 10], dtype='float32') - y = paddle.poisson(x) - self.assertTrue(np.min(y.numpy()) >= 0) - paddle.enable_static() + with paddle.fluid.dygraph.base.guard(): + x = paddle.randn([10, 10], dtype='float32') + y = paddle.poisson(x) + self.assertTrue(np.min(y.numpy()) >= 0) + + with _test_eager_guard(): + x = paddle.randn([10, 10], dtype='float32') + x.stop_gradient = False + y = paddle.poisson(x) + y.backward() + self.assertTrue(np.min(y.numpy()) >= 0) + self.assertTrue(np.array_equal(np.zeros_like(x), x.gradient())) def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' diff --git a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py index 45e88d681d8e095bdfe732de2f66eb0720cb7346..4e79e8dca138e2fff37ea9ca72a04f227a0103bd 100644 --- a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py @@ -47,6 +47,7 @@ class TestTriangularSolveOp(OpTest): def setUp(self): self.op_type = "triangular_solve" + self.python_api = paddle.tensor.linalg.triangular_solve self.config() self.inputs = { @@ -62,10 +63,10 @@ class TestTriangularSolveOp(OpTest): self.outputs = {'Out': self.output} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_eager=True) # 2D(broadcast) + 3D, test 'transpose' diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index f09fd3c02a978c06eaea13fc34a0265327e22da4..b84f59e9e15bd949a1fd66ee62f64a97f07d5a30 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -2752,6 +2752,10 @@ def triangular_solve(x, print(out) # [7, -2, -5] """ + if in_dygraph_mode(): + return _C_ops.final_state_triangular_solve(x, y, upper, transpose, + unitriangular) + if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index b5868c186f2317d932ee82cae1727fcff034b09f..a51861a67aa7b9a664c953e0401427572e9ce719 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -63,6 +63,9 @@ def equal_all(x, y, name=None): result2 = paddle.equal_all(x, z) print(result2) # result2 = [False ] """ + if in_dygraph_mode(): + return _C_ops.final_state_equal_all(x, y) + if paddle.in_dynamic_mode(): return _C_ops.equal_all(x, y) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 389b5dbd7dbec7433d03724f68737beaa6ad058e..3a79abd2dc06e67507275dc4ab35adb06b293b03 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2000,6 +2000,9 @@ def expand(x, shape, name=None): print(out) # [[1, 2, 3], [1, 2, 3]] """ + if in_dygraph_mode(): + return _C_ops.final_state_expand(x, shape) + if paddle.in_dynamic_mode(): return _C_ops.expand_v2(x, 'shape', shape) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 5ab85905ba61d058e7c73d961dcf76441bd843cd..0808eb6c69d5a7c3c8048a3070fdb722cc2ea044 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -597,6 +597,14 @@ kernel : func : equal +- api : equal_all + args : (Tensor x, Tensor y) + output : Tensor + infer_meta : + func : CompareAllInferMeta + kernel : + func : equal_all + # erf - api : erf args : (Tensor x) @@ -627,6 +635,16 @@ func : exp backward : exp_grad +# expand +- api : expand + args : (Tensor x, IntArray shape) + output : Tensor + infer_meta : + func : ExpandInferMeta + kernel : + func : expand + backward : expand_grad + # expand_as - api : expand_as args : (Tensor x, Tensor y, int[] target_shape) @@ -1494,7 +1512,7 @@ func : pixel_shuffle backward : pixel_shuffle_grad -# poisson // no need grad +# poisson - api : poisson args : (Tensor x) output : Tensor @@ -1502,6 +1520,7 @@ func : UnchangedInferMeta kernel : func : poisson + backward : poisson_grad - api : pool2d args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) @@ -1684,6 +1703,16 @@ func : round backward : round_grad +- api : rsqrt + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : rsqrt + inplace : (x -> out) + backward : rsqrt_grad + - api : scale args : (Tensor x, Scalar scale, float bias, bool bias_after_scale) output : Tensor @@ -2037,7 +2066,7 @@ func : TriangularSolveInferMeta kernel : func : triangular_solve - # backward : triangular_solve_grad + backward : triangular_solve_grad - api : tril_triu args : (Tensor x, int diagonal, bool lower) diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index b12d242149acb417683891721170ee84e807f532..7ceea240d1814201bdf193c3156e3b2326701b30 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -488,6 +488,16 @@ func : expand_as_grad no_need_buffer : x +- backward_api : expand_grad + forward : expand (Tensor x, IntArray shape) -> Tensor(out) + args : (Tensor x, Tensor out_grad, IntArray shape) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : expand_grad + - backward_api : expm1_grad forward : expm1 (Tensor x) -> Tensor(out) args : (Tensor out, Tensor out_grad) @@ -1144,6 +1154,16 @@ kernel : func : pixel_shuffle_grad +- backward_api : poisson_grad + forward : poisson (Tensor x) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [out_grad] + kernel : + func : poisson_grad + - backward_api : pool2d_grad forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) @@ -1697,6 +1717,16 @@ func : transpose_grad backward : transpose_double_grad +- backward_api : triangular_solve_grad + forward : triangular_solve (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular) -> Tensor(out) + args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper, bool tranpose, bool unitriangular) + output : Tensor(x_grad), Tensor(y_grad) + infer_meta : + func : GeneralBinaryGradInferMeta + param : [x, y] + kernel : + func : triangular_solve_grad + - backward_api : tril_triu_grad forward : tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out) args : (Tensor out_grad, int diagonal, bool lower)