diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 82d2e741e9de852823726f91a6f2d7370c8d0b0e..4fd429fbd333405628d2ac011bf6ba4fba17481a 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -37,11 +37,17 @@ inline bool NeedTransformDataType(const DataType& input, inline bool NeedTransformPlace(const paddle::platform::Place& input, const Backend& target, const TransformFlag& transform_flag) { - bool ret = - input.GetType() == AllocationType::GPUPINNED || - (transform_flag.need_trans_backend() && target != Backend::ALL_BACKEND && - phi::TransToPhiBackend(input) != - (target != Backend::GPUDNN ? target : Backend::GPU)); + // NOTE(dev): The default value of TransformFlag is True, if it is set with + // False + // somewhere such as api.yaml or backward.yaml that means we should skip data + // transform. Because "stop_transform_" has highest priority. + if (!transform_flag.need_trans_backend()) { + return false; + } + bool ret = input.GetType() == AllocationType::GPUPINNED || + (target != Backend::ALL_BACKEND && + phi::TransToPhiBackend(input) != + (target != Backend::GPUDNN ? target : Backend::GPU)); return ret; } diff --git a/paddle/phi/ops/compat/expand_sig.cc b/paddle/phi/ops/compat/expand_sig.cc index 3b2e468267da03ba97917a4899508f1fa3b9b283..9b0a1f5ab7df4aad1eec5deafc4203a8b1116399 100644 --- a/paddle/phi/ops/compat/expand_sig.cc +++ b/paddle/phi/ops/compat/expand_sig.cc @@ -17,6 +17,11 @@ namespace phi { KernelSignature ExpandOpArgumentMapping(const ArgumentMappingContext& ctx) { + const auto& shape = paddle::any_cast>(ctx.Attr("shape")); + // Infer output shape by Attr("shape") in CompileTime if it is specified. + if (!ctx.IsRuntime() && !shape.empty()) { + return KernelSignature("expand", {"X"}, {"shape"}, {"Out"}); + } if (ctx.HasInput("Shape")) { return KernelSignature("expand", {"X"}, {"Shape"}, {"Out"}); } else if (ctx.InputSize("expand_shapes_tensor") > 0) { @@ -27,6 +32,12 @@ KernelSignature ExpandOpArgumentMapping(const ArgumentMappingContext& ctx) { } KernelSignature ExpandGradOpArgumentMapping(const ArgumentMappingContext& ctx) { + const auto& shape = paddle::any_cast>(ctx.Attr("shape")); + // Infer output shape by Attr("shape") in CompileTime if it is specified. + if (!ctx.IsRuntime() && !shape.empty()) { + return KernelSignature( + "expand_grad", {"X", "Out@GRAD"}, {"shape"}, {"X@GRAD"}); + } if (ctx.HasInput("Shape")) { return KernelSignature("expand_grad", {"X", GradVarName("Out")}, diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index fd46b41c5f07e2b1481ba657451bd8545fc8478b..592a635ddcccc587cba766e00525fc9c8f3c6639 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -231,6 +231,18 @@ class TestExpandV2API(unittest.TestCase): assert np.array_equal(res_3, np.tile(input, (1, 1))) +class TestExpandInferShape(unittest.TestCase): + def test_shape_with_var(self): + with program_guard(Program(), Program()): + x = paddle.static.data(shape=[-1, 1, 3], name='x') + fake_var = paddle.randn([2, 3]) + target_shape = [ + -1, paddle.shape(fake_var)[0], paddle.shape(fake_var)[1] + ] + out = paddle.expand(x, shape=target_shape) + self.assertListEqual(list(out.shape), [-1, -1, -1]) + + if __name__ == "__main__": paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 05a310a9c50332cb133d064ed2c9dec560631664..d3fea677a47d92e5762796e0f8f0d723638084c4 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -22,6 +22,7 @@ import unittest import numpy as np from op_test import OpTest from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.fluid.framework import _test_eager_guard class TestFullOp(unittest.TestCase): @@ -133,5 +134,19 @@ class TestFullLikeOp3(TestFullLikeOp1): self.dtype = np.int64 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFullLikeOp4(unittest.TestCase): + def test_skip_data_transform(self): + paddle.disable_static() + with _test_eager_guard(): + x = paddle.to_tensor( + [1., 2., 3., 4.], place=paddle.CUDAPinnedPlace()) + out = paddle.full_like(x, 1.) + self.assertTrue( + (out.numpy() == np.ones([4]).astype(np.float32)).all(), True) + paddle.enable_static() + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 0808eb6c69d5a7c3c8048a3070fdb722cc2ea044..4779750b5b44078976a0aac81a38128a500c7c67 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -448,7 +448,7 @@ - api : deformable_conv args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) output : Tensor(out) - infer_meta : + infer_meta : func : DeformableConvInferMeta kernel : func : deformable_conv @@ -781,6 +781,8 @@ param : [x, value, dtype] data_type : dtype > x backend : place > x + data_transform : + skip_transform : x - api : gather args : (Tensor x, Tensor index, Scalar axis=0)