diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 5573ecf33687b4a8aba54ee8d8bcc6f2d5485890..04e37a9b0379aa35743242721092d26ad2f334ef 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -2326,7 +2326,7 @@ class TestPow(TestActivation): def setUp(self): self.op_type = "pow" self.python_api = paddle.pow - self.check_eager = False + self.check_eager = True self.init_dtype() np.random.seed(1024) @@ -2337,6 +2337,9 @@ class TestPow(TestActivation): self.attrs = {'factor': 3.0} self.outputs = {'Out': out} + def test_check_output(self): + self.check_output(check_eager=self.check_eager) + def test_check_grad(self): if self.dtype == np.float16: return diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 2a8ca51693ecfad55f2239d7619e355c6dd7f3f8..c6973255f2644549907b856a1eadd3019fc2de20 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest): def setUp(self): self.op_type = "elementwise_mod" + self.python_api = paddle.remainder self.axis = -1 self.init_dtype() self.init_input_output() @@ -43,7 +44,10 @@ class TestElementwiseModOp(OpTest): self.outputs = {'Out': self.out} def test_check_output(self): - self.check_output() + if self.attrs['axis'] == -1: + self.check_output(check_eager=True) + else: + self.check_output(check_eager=False) def init_input_output(self): self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) @@ -76,7 +80,10 @@ class TestElementwiseModOpFloat(TestElementwiseModOp): self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y) def test_check_output(self): - self.check_output() + if self.attrs['axis'] == -1: + self.check_output(check_eager=True) + else: + self.check_output(check_eager=False) class TestElementwiseModOpDouble(TestElementwiseModOpFloat): diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 9ec2d1acdb5f3975331656afe323ca43e88d93ee..3d7dc2da052f35ae213ebdb65e4864a7f89d81c9 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -43,10 +43,10 @@ class TestGatherOp(OpTest): self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_eager=False) + self.check_grad(['X'], 'Out', check_eager=True) def config(self): """ @@ -136,10 +136,10 @@ class TestGatherBF16Op(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False) + self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True) def config(self): """ @@ -165,10 +165,10 @@ class TestGatherOp1(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_eager=False) + self.check_grad(['X'], 'Out', check_eager=True) def config(self): """ diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index f6bbadf98726f73fd45adf1009ac5cd89726cb0a..30e559151ed9e8f12df74adddeb5c75388308e19 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1391,9 +1391,9 @@ def gather(x, index, axis=None, name=None): if axis is None: axis = 0 - #if in_dygraph_mode(): - #return _C_ops.final_state_gather(x, index, axis) - if _non_static_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_gather(x, index, axis) + if _in_legacy_dygraph(): axis = axis.item() if isinstance(axis, paddle.Tensor) else axis return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ccd5efbd580af83387571afd26ac49b099cab8d4..adca732dfdaa0b409ee1675ee5a7464d5e41f703 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -150,41 +150,38 @@ def pow(x, y, name=None): """ # in dynamic graph mode - #if in_dygraph_mode(): - #if isinstance(y, (int, float)): - #return _C_ops.final_state_pow(x, y) - #elif isinstance(y, (paddle.Tensor, Variable)): - #return _elementwise_op_in_dygraph( - #x, y, axis=-1, act=None, op_name='elementwise_pow') - #else: - #raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) - - #if _in_legacy_dygraph(): - if _non_static_mode(): + if in_dygraph_mode(): if isinstance(y, (int, float)): - return _C_ops.pow(x, 'factor', y) + return _C_ops.final_state_pow(x, y) elif isinstance(y, (paddle.Tensor, Variable)): return _elementwise_op_in_dygraph( x, y, axis=-1, act=None, op_name='elementwise_pow') else: raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) - # in static graph mode - else: + if _in_legacy_dygraph(): if isinstance(y, (int, float)): - helper = LayerHelper('pow', **locals()) - inputs = {'X': x} - attrs = {'factor': y} - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) - return out + return _C_ops.pow(x, 'factor', y) elif isinstance(y, (paddle.Tensor, Variable)): - # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here - helper = LayerHelper('elementwise_pow', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - return _elementwise_op(LayerHelper('elementwise_pow', **locals())) + return _elementwise_op_in_dygraph( + x, y, axis=-1, act=None, op_name='elementwise_pow') else: - raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y))) + raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) + # in static graph mode + if isinstance(y, (int, float)): + helper = LayerHelper('pow', **locals()) + inputs = {'X': x} + attrs = {'factor': y} + out = helper.create_variable_for_type_inference(dtype=x.dtype) + helper.append_op( + type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) + return out + elif isinstance(y, (paddle.Tensor, Variable)): + # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here + helper = LayerHelper('elementwise_pow', **locals()) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + return _elementwise_op(LayerHelper('elementwise_pow', **locals())) + else: + raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y))) OP_NAMEMAPPING = { @@ -192,6 +189,7 @@ OP_NAMEMAPPING = { 'elementwise_min': 'final_state_minimum', 'elementwise_pow': 'final_state_elementwise_pow', 'elementwise_floordiv': 'final_state_floor_divide', + 'elementwise_mod': 'final_state_modulo', } @dygraph_only diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 0b855b0f967ba2f5ea93399b60803ec8b1061d31..139eb3556b05871d3932de6545faee24b4396b2a 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -632,6 +632,16 @@ data_type : dtype > x backend : place > x +- api : gather + args : (Tensor x, Tensor index, Scalar axis=0) + output : Tensor(out) + infer_meta : + func : GatherInferMeta + kernel : + func : gather + data_type: x + backward : gather_grad + - api : gather_nd args : (Tensor x, Tensor index) output : Tensor @@ -1220,6 +1230,16 @@ func : pool3d backward : pool3d_grad +- api : pow + args : (Tensor x, Scalar s) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : pow + backward : pow_grad + - api : prelu args : (Tensor x, Tensor alpha, str data_format, str mode) output : Tensor(out) diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index d3d589d00f7f21e1a6ce9cadba8051db9aaa9109..6ce0ae1b78a852e489d23b4b0bf3f25ac441e882 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -178,7 +178,7 @@ output : Tensor(x_grad), Tensor(filter_grad) infer_meta : func : ConvTransposeGradInferMeta - kernel : + kernel : func : conv2d_transpose_grad - backward_api : conv3d_transpose_grad @@ -389,6 +389,17 @@ kernel : func : frobenius_norm_grad +- backward_api : gather_grad + forward : gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out) + args : (Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + data_type: x + func : gather_grad + - backward_api : gather_nd_grad forward : gather_nd (Tensor x, Tensor index) -> Tensor(out) args : (Tensor x, Tensor index, Tensor out_grad) @@ -803,6 +814,16 @@ kernel : func : pool3d_grad +- backward_api : pow_grad + forward : pow(Tensor x, Scalar s) -> Tensor(out) + args : (Tensor x, Tensor out_grad, Scalar s=-1) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : pow_grad + - backward_api : prelu_grad forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out) args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)