From 96c6dde120f9963702bf61417f316f03517e20f0 Mon Sep 17 00:00:00 2001 From: feifei-111 <2364819892@qq.com> Date: Wed, 3 Aug 2022 14:49:51 +0800 Subject: [PATCH] [ Dy2Static ] Fix dy2staic: cpu, cuda, assign([Var, Var, ]) (#44731) * fix dy2staic: cpu, cuda, assign([Var, Var, ]) * fix1 * fix2 Co-authored-by: xiongkun --- python/paddle/fluid/layers/math_op_patch.py | 20 ++++ .../test_cpu_cuda_to_tensor.py | 91 +++++++++++++++++++ python/paddle/tensor/creation.py | 25 ++++- 3 files changed, 134 insertions(+), 2 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index f6810fc0630..af9b4b368dd 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -128,6 +128,24 @@ def monkey_patch_variable(): var.stop_gradient = True return var + @static_only + def cpu(self): + """ + Variable should not have cpu() and cuda() interface. + But this interface can greatly facilitate dy2static. + We do nothing here. + """ + return self + + @static_only + def cuda(self): + """ + Variable should not have cpu() and cuda() interface. + But this interface can greatly facilitate dy2static. + We do nothing here. + """ + return self + def astype(self, dtype): """ **Notes**: @@ -368,6 +386,8 @@ def monkey_patch_variable(): # b=-a ('__neg__', _neg_), ('astype', astype), + ('cpu', cpu), + ('cuda', cuda), ('append', append), ('dim', lambda x: len(x.shape)), ('ndimension', lambda x: len(x.shape)), diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py new file mode 100644 index 00000000000..7fc7002aca3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py @@ -0,0 +1,91 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import paddle +from paddle import fluid +import numpy as np + + +class TestCpuCuda(unittest.TestCase): + + def test_cpu_cuda(self): + + def func(x): + x = paddle.to_tensor([1, 2, 3, 4]) + x = x.cuda() + x = x.cpu() + return x + + x = paddle.to_tensor([3]) + print(paddle.jit.to_static(func).code) + print(paddle.jit.to_static(func)(x)) + + +class TestToTensor(unittest.TestCase): + + def test_to_tensor_with_variable_list(self): + + def func(x): + ones = paddle.to_tensor([1]) + twos = paddle.to_tensor([2]) + x = paddle.to_tensor([ones, twos, 3, 4]) + return x + + x = paddle.to_tensor([3]) + print(paddle.jit.to_static(func).code) + self.assertTrue( + np.allclose( + paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4]))) + + +class TestToTensor1(unittest.TestCase): + + def test_to_tensor_with_variable_list(self): + + def func(x): + ones = paddle.to_tensor([1]) + twos = paddle.to_tensor([2]) + """ we ignore the [3] and [4], they will be assign to a variable, and is regard as scalar. + TODO: deal with this case after 0-dim tensor is developed. + """ + x = paddle.to_tensor([ones, twos, [3], [4]]) + return x + + x = paddle.to_tensor([3]) + print(paddle.jit.to_static(func).code) + self.assertTrue( + np.allclose( + paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4]))) + + +class TestToTensor2(unittest.TestCase): + + def test_to_tensor_with_variable_list(self): + + def func(x): + x = paddle.to_tensor([[1], [2], [3], [4]]) + return x + + x = paddle.to_tensor([3]) + print(paddle.jit.to_static(func).code) + self.assertTrue( + np.allclose( + paddle.jit.to_static(func)(x).numpy(), + np.array([[1], [2], [3], [4]]))) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 12d722281f3..ef492620956 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1535,11 +1535,32 @@ def assign(x, output=None): inputs={'X': [input]}, outputs={'Out': [output]}) elif isinstance(input, np.ndarray): - # Not support [var, var, ...] currently. + # We now support the form of [var, VAR...] if the Var.shape=[1,] if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input): + # We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types. + if not all( + [x.shape == (1, ) for x in input if isinstance(x, Variable)]): + raise TypeError( + "Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable." + ) + + def convert_scalar(x): + if not isinstance(x, Variable): + return assign(x) + return x + + to_stack_list = list(map(convert_scalar, input)) + ret = paddle.stack(to_stack_list) + ret = paddle.squeeze(ret, -1) + return ret + + if input.dtype == 'object': + """ may be this form [[Var], [Var], [3], [4]], we reject them. + """ raise TypeError( - "Required type(input) numpy.ndarray, but found `list(Variable)` in input." + "The type of received input == `object`, it is not supported to convert to tensor, such as [[Var], [Var], [3], [4]]" ) + dtype = convert_np_dtype_to_dtype_(input.dtype) if dtype == core.VarDesc.VarType.FP64: # Setting FP64 numpy data is not supported in Paddle, so we -- GitLab