From 162b4d6c13f6f38a234423bc984fb41710796475 Mon Sep 17 00:00:00 2001 From: Zhou Wei <52485244+zhouwei25@users.noreply.github.com> Date: Sun, 27 Sep 2020 11:47:36 +0800 Subject: [PATCH] remove to_variable from 2.0 (#27528) --- python/paddle/__init__.py | 1 - python/paddle/fluid/dygraph/nn.py | 7 +--- .../tests/unittests/test_activation_op.py | 8 ++-- .../fluid/tests/unittests/test_adamax_api.py | 2 +- .../fluid/tests/unittests/test_adamw_op.py | 4 +- .../unittests/test_adaptive_avg_pool2d.py | 4 +- .../unittests/test_adaptive_avg_pool3d.py | 4 +- .../unittests/test_adaptive_max_pool2d.py | 4 +- .../unittests/test_adaptive_max_pool3d.py | 4 +- .../fluid/tests/unittests/test_addmm_op.py | 6 +-- .../fluid/tests/unittests/test_arange.py | 6 +-- .../fluid/tests/unittests/test_cholesky_op.py | 2 +- .../fluid/tests/unittests/test_clip_op.py | 6 +-- .../fluid/tests/unittests/test_concat_op.py | 6 +-- .../unittests/test_cosine_similarity_api.py | 16 ++++---- .../fluid/tests/unittests/test_cumsum_op.py | 3 +- .../tests/unittests/test_default_dtype.py | 1 - .../unittests/test_directory_migration.py | 2 +- .../test_flatten_contiguous_range_op.py | 2 +- .../tests/unittests/test_imperative_basic.py | 4 +- .../test_imperative_selected_rows.py | 2 +- .../tests/unittests/test_isfinite_v2_op.py | 2 +- .../tests/unittests/test_jit_save_load.py | 14 +++---- .../tests/unittests/test_kldiv_loss_op.py | 2 +- .../fluid/tests/unittests/test_l1_loss.py | 8 ++-- .../fluid/tests/unittests/test_log_softmax.py | 4 +- .../fluid/tests/unittests/test_logsumexp.py | 4 +- .../fluid/tests/unittests/test_max_op.py | 2 +- .../fluid/tests/unittests/test_maximum_op.py | 8 ++-- .../fluid/tests/unittests/test_mean_op.py | 2 +- .../fluid/tests/unittests/test_min_op.py | 2 +- .../fluid/tests/unittests/test_randn_op.py | 2 +- .../tests/unittests/test_retain_graph.py | 4 +- .../tests/unittests/test_transformer_api.py | 40 +++++++++---------- .../tests/unittests/test_zeros_like_op.py | 2 +- python/paddle/tensor/linalg.py | 18 +++------ python/paddle/tensor/math.py | 3 +- tools/wlist.json | 5 ++- 38 files changed, 102 insertions(+), 114 deletions(-) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index e749cf88b6a..40275a2ce71 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -230,7 +230,6 @@ from .framework import CPUPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPinnedPlace #DEFINE_ALIAS -from .framework import to_variable #DEFINE_ALIAS from .framework import grad #DEFINE_ALIAS from .framework import no_grad #DEFINE_ALIAS from .framework import save #DEFINE_ALIAS diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index a14c3a81c12..05269028acc 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -3230,14 +3230,11 @@ class Flatten(layers.Layer): .. code-block:: python import paddle - from paddle import to_variable import numpy as np + paddle.disable_static() inp_np = np.ones([5, 2, 3, 4]).astype('float32') - - paddle.disable_static() - - inp_np = to_variable(inp_np) + inp_np = paddle.to_tensor(inp_np) flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2) flatten_res = flatten(inp_np) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 791f1ee2dfa..ad7539e76e4 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -228,7 +228,7 @@ class TestTanhAPI(unittest.TestCase): def test_dygraph_api(self): paddle.disable_static(self.place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out1 = F.tanh(x) out2 = paddle.tanh(x) th = paddle.nn.Tanh() @@ -573,7 +573,7 @@ class TestHardShrinkAPI(unittest.TestCase): def test_dygraph_api(self): paddle.disable_static(self.place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out1 = F.hardshrink(x) hd = paddle.nn.Hardshrink() out2 = hd(x) @@ -639,7 +639,7 @@ class TestHardtanhAPI(unittest.TestCase): def test_dygraph_api(self): paddle.disable_static(self.place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out1 = F.hardtanh(x) m = paddle.nn.Hardtanh() out2 = m(x) @@ -1063,7 +1063,7 @@ class TestLeakyReluAPI(unittest.TestCase): def test_dygraph_api(self): paddle.disable_static(self.place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out1 = F.leaky_relu(x) m = paddle.nn.LeakyReLU() out2 = m(x) diff --git a/python/paddle/fluid/tests/unittests/test_adamax_api.py b/python/paddle/fluid/tests/unittests/test_adamax_api.py index 5a33e11d286..6d2ec0eefbb 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_api.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_api.py @@ -25,7 +25,7 @@ class TestAdamaxAPI(unittest.TestCase): def test_adamax_api_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = paddle.to_variable(value) + a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.Adamax( learning_rate=0.01, diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index cce24b57d2c..b799508f6b8 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -22,7 +22,7 @@ class TestAdamWOp(unittest.TestCase): def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = paddle.to_variable(value) + a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.AdamW( learning_rate=0.01, @@ -37,7 +37,7 @@ class TestAdamWOp(unittest.TestCase): def test_adamw_op_coverage(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = paddle.to_variable(value) + a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.AdamW( learning_rate=0.0, diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index e3c70884ebc..b8c5bd29491 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -147,7 +147,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3]) @@ -245,7 +245,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) out_1 = adaptive_avg_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index a3c9dd91a69..bb36aaebf08 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -162,7 +162,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[3, 3, 3]) @@ -262,7 +262,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( output_size=[3, 3, 3]) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index d78788eb1e7..dfa6f3226c8 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -147,7 +147,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_max_pool2d( x=x, return_indices=False, output_size=[3, 3]) @@ -240,7 +240,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) out_1 = adaptive_max_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index a7de0a5c6a7..1fa703688cd 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -162,7 +162,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_max_pool3d( x=x, output_size=[3, 3, 3]) @@ -257,7 +257,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - x = paddle.to_variable(self.x_np) + x = paddle.to_tensor(self.x_np) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( output_size=[3, 3, 3]) diff --git a/python/paddle/fluid/tests/unittests/test_addmm_op.py b/python/paddle/fluid/tests/unittests/test_addmm_op.py index 6e66c0c0029..6238d7dd4a1 100644 --- a/python/paddle/fluid/tests/unittests/test_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_addmm_op.py @@ -244,9 +244,9 @@ class TestAddMMAPI(unittest.TestCase): def test_error1(): data_x_wrong = np.ones((2, 3)).astype(np.float32) - x = paddle.to_variable(data_x_wrong) - y = paddle.to_variable(data_y) - input = paddle.to_variable(data_input) + x = paddle.to_tensor(data_x_wrong) + y = paddle.to_tensor(data_y) + input = paddle.to_tensor(data_input) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) self.assertRaises(ValueError, test_error1) ''' diff --git a/python/paddle/fluid/tests/unittests/test_arange.py b/python/paddle/fluid/tests/unittests/test_arange.py index 29003d28e44..d62c08b072b 100644 --- a/python/paddle/fluid/tests/unittests/test_arange.py +++ b/python/paddle/fluid/tests/unittests/test_arange.py @@ -98,9 +98,9 @@ class TestArangeImperative(unittest.TestCase): x2 = paddle.tensor.arange(5) x3 = paddle.tensor.creation.arange(5) - start = paddle.to_variable(np.array([0], 'float32')) - end = paddle.to_variable(np.array([5], 'float32')) - step = paddle.to_variable(np.array([1], 'float32')) + start = paddle.to_tensor(np.array([0], 'float32')) + end = paddle.to_tensor(np.array([5], 'float32')) + step = paddle.to_tensor(np.array([1], 'float32')) x4 = paddle.arange(start, end, step, 'int64') paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_op.py index ab08a0aacbf..2fcec657c14 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_op.py @@ -96,7 +96,7 @@ class TestDygraph(unittest.TestCase): a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 - x = paddle.to_variable(x_data) + x = paddle.to_tensor(x_data) out = paddle.cholesky(x, upper=False) diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index b56d9f6668e..2946798a82f 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -168,9 +168,9 @@ class TestClipAPI(unittest.TestCase): paddle.disable_static(place) data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = paddle.to_variable(data, dtype='float32') - v_min = paddle.to_variable(np.array([0.2], dtype=np.float32)) - v_max = paddle.to_variable(np.array([0.8], dtype=np.float32)) + images = paddle.to_tensor(data, dtype='float32') + v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32)) + v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32)) out_1 = paddle.clip(images, min=0.2, max=0.8) out_2 = paddle.clip(images, min=0.2, max=0.9) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index b4dbba7eead..14c10e7aa20 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -285,9 +285,9 @@ class TestConcatAPI(unittest.TestCase): in2 = np.array([[11, 12, 13], [14, 15, 16]]) in3 = np.array([[21, 22], [23, 24]]) paddle.disable_static() - x1 = paddle.to_variable(in1) - x2 = paddle.to_variable(in2) - x3 = paddle.to_variable(in3) + x1 = paddle.to_tensor(in1) + x2 = paddle.to_tensor(in2) + x3 = paddle.to_tensor(in3) out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) out2 = paddle.concat(x=[x1, x2], axis=0) np_out1 = np.concatenate([in1, in2, in3], axis=-1) diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index 1e25613fa63..a8899d9f022 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -75,8 +75,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x2 = np.random.rand(*shape).astype(np.float32) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) - tesnor_x1 = paddle.to_variable(np_x1) - tesnor_x2 = paddle.to_variable(np_x2) + tesnor_x1 = paddle.to_tensor(np_x1) + tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) self.assertTrue(np.allclose(y.numpy(), np_out)) @@ -92,8 +92,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x2 = np.random.rand(*shape).astype(np.float32) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) - tesnor_x1 = paddle.to_variable(np_x1) - tesnor_x2 = paddle.to_variable(np_x2) + tesnor_x1 = paddle.to_tensor(np_x1) + tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) self.assertTrue(np.allclose(y.numpy(), np_out)) @@ -110,8 +110,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x2 = np.random.rand(*shape2).astype(np.float32) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) - tesnor_x1 = paddle.to_variable(np_x1) - tesnor_x2 = paddle.to_variable(np_x2) + tesnor_x1 = paddle.to_tensor(np_x1) + tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) self.assertTrue(np.allclose(y.numpy(), np_out)) @@ -129,8 +129,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) cos_sim_func = nn.CosineSimilarity(axis=axis, eps=eps) - tesnor_x1 = paddle.to_variable(np_x1) - tesnor_x2 = paddle.to_variable(np_x2) + tesnor_x1 = paddle.to_tensor(np_x1) + tesnor_x2 = paddle.to_tensor(np_x2) y = cos_sim_func(tesnor_x1, tesnor_x2) self.assertTrue(np.allclose(y.numpy(), np_out)) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index ad121fac8cc..818e15bb319 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -21,13 +21,12 @@ import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -from paddle import to_variable class TestCumsumOp(unittest.TestCase): def run_cases(self): data_np = np.arange(12).reshape(3, 4) - data = to_variable(data_np) + data = paddle.to_tensor(data_np) y = paddle.cumsum(data) z = np.cumsum(data_np) diff --git a/python/paddle/fluid/tests/unittests/test_default_dtype.py b/python/paddle/fluid/tests/unittests/test_default_dtype.py index 057933fc7a7..29ca9a93985 100644 --- a/python/paddle/fluid/tests/unittests/test_default_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_default_dtype.py @@ -20,7 +20,6 @@ import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import Linear import paddle.fluid.core as core -from paddle import to_variable class TestDefaultType(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_directory_migration.py b/python/paddle/fluid/tests/unittests/test_directory_migration.py index 529fff158c5..2f35b45aa67 100644 --- a/python/paddle/fluid/tests/unittests/test_directory_migration.py +++ b/python/paddle/fluid/tests/unittests/test_directory_migration.py @@ -36,7 +36,7 @@ class TestDirectory(unittest.TestCase): def test_new_directory(self): new_directory = [ 'paddle.enable_static', 'paddle.disable_static', - 'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad', + 'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad', 'paddle.no_grad', 'paddle.save', 'paddle.load', 'paddle.static.save', 'paddle.static.load', 'paddle.distributed.ParallelEnv', diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index 642044bb4b1..e0e487eff11 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -195,7 +195,7 @@ class TestFlattenPython(unittest.TestCase): def test_Negative(): paddle.disable_static() - img = paddle.to_variable(x) + img = paddle.to_tensor(x) out = paddle.flatten(img, start_axis=-2, stop_axis=-1) return out.numpy().shape diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 22f16287c33..7378975aa37 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -211,7 +211,7 @@ class TestImperative(unittest.TestCase): paddle.disable_static() self.assertTrue(paddle.in_dynamic_mode()) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - var_inp = paddle.to_variable(np_inp) + var_inp = paddle.to_tensor(np_inp) mlp = MLP(input_size=2) out = mlp(var_inp) dy_out1 = out.numpy() @@ -221,7 +221,7 @@ class TestImperative(unittest.TestCase): self.assertFalse(paddle.in_dynamic_mode()) paddle.disable_static() self.assertTrue(paddle.in_dynamic_mode()) - var_inp = paddle.to_variable(np_inp) + var_inp = paddle.to_tensor(np_inp) mlp = MLP(input_size=2) out = mlp(var_inp) dy_out2 = out.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 59ddb365e53..97f7162e997 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -54,7 +54,7 @@ class TestSimpleNet(unittest.TestCase): # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) input_word = np.array([[1, 2], [2, 1]]).astype('int64') - input = paddle.to_variable(input_word) + input = paddle.to_tensor(input_word) simplenet = SimpleNet(20, 32, dtype) adam = SGDOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 8a868e751f0..281dc7caded 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -41,7 +41,7 @@ def run_dygraph(x_np, op_str, use_gpu=True): if use_gpu and fluid.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) paddle.disable_static(place) - x = paddle.to_variable(x_np) + x = paddle.to_tensor(x_np) dygraph_result = getattr(paddle.tensor, op_str)(x) return dygraph_result diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index 7e6ca8076de..99404246185 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -543,9 +543,9 @@ class TestJitSaveMultiCases(unittest.TestCase): loaded_layer = paddle.jit.load(model_path) loaded_layer.eval() # inference & compare - x = paddle.to_variable(np.random.random((1, 784)).astype('float32')) + x = paddle.to_tensor(np.random.random((1, 784)).astype('float32')) if with_label: - y = paddle.to_variable(np.random.random((1, 1)).astype('int64')) + y = paddle.to_tensor(np.random.random((1, 1)).astype('int64')) pred, _ = layer(x, y) pred = pred.numpy() else: @@ -677,7 +677,7 @@ class TestJitSaveMultiCases(unittest.TestCase): model_path = "test_not_prune_output_spec_name_warning" configs = paddle.SaveLoadConfig() - out = paddle.to_variable(np.random.random((1, 1)).astype('float')) + out = paddle.to_tensor(np.random.random((1, 1)).astype('float')) configs.output_spec = [out] paddle.jit.save(layer, model_path, configs=configs) @@ -709,7 +709,7 @@ class TestJitSaveMultiCases(unittest.TestCase): model_path = "test_prune_to_static_after_train" configs = paddle.SaveLoadConfig() - out = paddle.to_variable(np.random.random((1, 1)).astype('float')) + out = paddle.to_tensor(np.random.random((1, 1)).astype('float')) configs.output_spec = [out] with self.assertRaises(ValueError): paddle.jit.save( @@ -730,7 +730,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): def test_save_load_empty_layer(self): layer = EmptyLayer() - x = paddle.to_variable(np.random.random((10)).astype('float32')) + x = paddle.to_tensor(np.random.random((10)).astype('float32')) out = layer(x) paddle.jit.save(layer, self.model_path) load_layer = paddle.jit.load(self.model_path) @@ -746,8 +746,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): def test_save_load_no_param_layer(self): layer = NoParamLayer() - x = paddle.to_variable(np.random.random((5)).astype('float32')) - y = paddle.to_variable(np.random.random((5)).astype('float32')) + x = paddle.to_tensor(np.random.random((5)).astype('float32')) + y = paddle.to_tensor(np.random.random((5)).astype('float32')) out = layer(x, y) paddle.jit.save(layer, self.model_path) load_layer = paddle.jit.load(self.model_path) diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 041fe4e9043..3a3b7071e04 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -90,7 +90,7 @@ class TestKLDivLossDygraph(unittest.TestCase): with paddle.fluid.dygraph.guard(): kldiv_criterion = paddle.nn.KLDivLoss(reduction) pred_loss = kldiv_criterion( - paddle.to_variable(x), paddle.to_variable(target)) + paddle.to_tensor(x), paddle.to_tensor(target)) self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss)) def test_kl_loss_batchmean(self): diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index 6a15fe49477..3c37397cae1 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -26,8 +26,8 @@ class TestFunctionalL1Loss(unittest.TestCase): self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) def run_imperative(self): - input = paddle.to_variable(self.input_np) - label = paddle.to_variable(self.label_np) + input = paddle.to_tensor(self.input_np) + label = paddle.to_tensor(self.label_np) dy_result = paddle.nn.functional.l1_loss(input, label) expected = np.mean(np.abs(self.input_np - self.label_np)) self.assertTrue(np.allclose(dy_result.numpy(), expected)) @@ -106,8 +106,8 @@ class TestClassL1Loss(unittest.TestCase): self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) def run_imperative(self): - input = paddle.to_variable(self.input_np) - label = paddle.to_variable(self.label_np) + input = paddle.to_tensor(self.input_np) + label = paddle.to_tensor(self.label_np) l1_loss = paddle.nn.loss.L1Loss() dy_result = l1_loss(input, label) expected = np.mean(np.abs(self.input_np - self.label_np)) diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index e3d7003eced..9ac4895f499 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -96,7 +96,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): # test dygrapg api paddle.disable_static() - x = paddle.to_variable(self.x) + x = paddle.to_tensor(self.x) y = logsoftmax(x) self.assertTrue(np.allclose(y.numpy(), ref_out)) paddle.enable_static() @@ -127,7 +127,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): self.assertTrue(np.allclose(out[0], ref_out)) paddle.disable_static() - x = paddle.to_variable(self.x) + x = paddle.to_tensor(self.x) y = F.log_softmax(x, axis, dtype) self.assertTrue(np.allclose(y.numpy(), ref_out), True) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index cf9203dffcb..9032293070a 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -111,7 +111,7 @@ class TestLogsumexpAPI(unittest.TestCase): self.assertTrue(np.allclose(res[0], out_ref)) paddle.disable_static(self.place) - x = paddle.to_variable(self.x) + x = paddle.to_tensor(self.x) out = paddle.logsumexp(x, axis, keepdim) self.assertTrue(np.allclose(out.numpy(), out_ref)) paddle.enable_static() @@ -126,7 +126,7 @@ class TestLogsumexpAPI(unittest.TestCase): def test_alias(self): paddle.disable_static(self.place) - x = paddle.to_variable(self.x) + x = paddle.to_tensor(self.x) out1 = paddle.logsumexp(x) out2 = paddle.tensor.logsumexp(x) out3 = paddle.tensor.math.logsumexp(x) diff --git a/python/paddle/fluid/tests/unittests/test_max_op.py b/python/paddle/fluid/tests/unittests/test_max_op.py index c9afc4bec66..4786d790b14 100644 --- a/python/paddle/fluid/tests/unittests/test_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_op.py @@ -80,7 +80,7 @@ class ApiMaxTest(unittest.TestCase): def test_imperative_api(self): paddle.disable_static() np_x = np.array([10, 10]).astype('float64') - x = paddle.to_variable(np_x) + x = paddle.to_tensor(np_x) z = paddle.max(x, axis=0) np_z = z.numpy() z_expected = np.array(np.max(np_x, axis=0)) diff --git a/python/paddle/fluid/tests/unittests/test_maximum_op.py b/python/paddle/fluid/tests/unittests/test_maximum_op.py index 5645597007a..54657d7900e 100644 --- a/python/paddle/fluid/tests/unittests/test_maximum_op.py +++ b/python/paddle/fluid/tests/unittests/test_maximum_op.py @@ -61,8 +61,8 @@ class ApiMaximumTest(unittest.TestCase): def test_dynamic_api(self): paddle.disable_static() np_x = np.array([10, 10]).astype('float64') - x = paddle.to_variable(self.input_x) - y = paddle.to_variable(self.input_y) + x = paddle.to_tensor(self.input_x) + y = paddle.to_tensor(self.input_y) z = paddle.maximum(x, y) np_z = z.numpy() z_expected = np.array(np.maximum(self.input_x, self.input_y)) @@ -73,8 +73,8 @@ class ApiMaximumTest(unittest.TestCase): np_x = np.random.rand(5, 4, 3, 2).astype("float64") np_y = np.random.rand(4, 3).astype("float64") - x = paddle.to_variable(self.input_x) - y = paddle.to_variable(self.input_y) + x = paddle.to_tensor(self.input_x) + y = paddle.to_tensor(self.input_y) result_1 = paddle.maximum(x, y, axis=1) result_2 = paddle.maximum(x, y, axis=-2) self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 29e79b096cf..f0094e703cd 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -204,7 +204,7 @@ class TestMeanAPI(unittest.TestCase): paddle.disable_static(self.place) def test_case(x, axis=None, keepdim=False): - x_tensor = paddle.to_variable(x) + x_tensor = paddle.to_tensor(x) out = paddle.mean(x_tensor, axis, keepdim) if isinstance(axis, list): axis = tuple(axis) diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py index b9eff05c5ea..9c15d721635 100644 --- a/python/paddle/fluid/tests/unittests/test_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -80,7 +80,7 @@ class ApiMinTest(unittest.TestCase): def test_imperative_api(self): paddle.disable_static() np_x = np.array([10, 10]).astype('float64') - x = paddle.to_variable(np_x) + x = paddle.to_tensor(np_x) z = paddle.min(x, axis=0) np_z = z.numpy() z_expected = np.array(np.min(np_x, axis=0)) diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index 9d2c03f3bba..4ddd98a8a73 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -63,7 +63,7 @@ class TestRandnOpForDygraph(unittest.TestCase): dim_2 = paddle.fill_constant([1], "int32", 50) x3 = paddle.randn(shape=[dim_1, dim_2, 784]) - var_shape = paddle.to_variable(np.array(shape)) + var_shape = paddle.to_tensor(np.array(shape)) x4 = paddle.randn(var_shape) for out in [x1, x2, x3, x4]: diff --git a/python/paddle/fluid/tests/unittests/test_retain_graph.py b/python/paddle/fluid/tests/unittests/test_retain_graph.py index 9abbee17385..98c7e3800c2 100644 --- a/python/paddle/fluid/tests/unittests/test_retain_graph.py +++ b/python/paddle/fluid/tests/unittests/test_retain_graph.py @@ -105,8 +105,8 @@ class TestRetainGraph(unittest.TestCase): A = np.random.rand(2, 3, 32, 32).astype('float32') B = np.random.rand(2, 3, 32, 32).astype('float32') - realA = paddle.to_variable(A) - realB = paddle.to_variable(B) + realA = paddle.to_tensor(A) + realB = paddle.to_tensor(B) fakeB = g(realA) optim_d.clear_gradients() diff --git a/python/paddle/fluid/tests/unittests/test_transformer_api.py b/python/paddle/fluid/tests/unittests/test_transformer_api.py index 7c7a71a3be1..067d1ea5f73 100644 --- a/python/paddle/fluid/tests/unittests/test_transformer_api.py +++ b/python/paddle/fluid/tests/unittests/test_transformer_api.py @@ -487,24 +487,24 @@ class TestTransformer(unittest.TestCase): dropout=dropout, weight_attr=[None], bias_attr=[False]) - src = paddle.to_variable( + src = paddle.to_tensor( np.random.rand(batch_size, source_length, d_model).astype( "float32")) - tgt = paddle.to_variable( + tgt = paddle.to_tensor( np.random.rand(batch_size, target_length, d_model).astype( "float32")) src_mask = np.zeros((batch_size, n_head, source_length, source_length)).astype("float32") src_mask[0][0][0][0] = -np.inf - src_mask = paddle.to_variable(src_mask) + src_mask = paddle.to_tensor(src_mask) tgt_mask = np.zeros((batch_size, n_head, target_length, target_length)).astype("float32") tgt_mask[0][0][0][0] = -1e9 memory_mask = np.zeros((batch_size, n_head, target_length, source_length)).astype("float32") memory_mask[0][0][0][0] = -1e9 - tgt_mask, memory_mask = paddle.to_variable( - tgt_mask), paddle.to_variable(memory_mask) + tgt_mask, memory_mask = paddle.to_tensor( + tgt_mask), paddle.to_tensor(memory_mask) trans_output = transformer(src, tgt, src_mask, tgt_mask, memory_mask) @@ -521,24 +521,24 @@ class TestTransformer(unittest.TestCase): dropout=dropout, weight_attr=[None, None], bias_attr=[False, False]) - src = paddle.to_variable( + src = paddle.to_tensor( np.random.rand(batch_size, source_length, d_model).astype( "float32")) - tgt = paddle.to_variable( + tgt = paddle.to_tensor( np.random.rand(batch_size, target_length, d_model).astype( "float32")) src_mask = np.zeros((batch_size, n_head, source_length, source_length)).astype("float32") src_mask[0][0][0][0] = -np.inf - src_mask = paddle.to_variable(src_mask) + src_mask = paddle.to_tensor(src_mask) tgt_mask = np.zeros((batch_size, n_head, target_length, target_length)).astype("float32") tgt_mask[0][0][0][0] = -1e9 memory_mask = np.zeros((batch_size, n_head, target_length, source_length)).astype("float32") memory_mask[0][0][0][0] = -1e9 - tgt_mask, memory_mask = paddle.to_variable( - tgt_mask), paddle.to_variable(memory_mask) + tgt_mask, memory_mask = paddle.to_tensor( + tgt_mask), paddle.to_tensor(memory_mask) trans_output = transformer(src, tgt, src_mask, tgt_mask, memory_mask) @@ -555,24 +555,24 @@ class TestTransformer(unittest.TestCase): dropout=dropout, weight_attr=[None, None, None], bias_attr=[False, False, True]) - src = paddle.to_variable( + src = paddle.to_tensor( np.random.rand(batch_size, source_length, d_model).astype( "float32")) - tgt = paddle.to_variable( + tgt = paddle.to_tensor( np.random.rand(batch_size, target_length, d_model).astype( "float32")) src_mask = np.zeros((batch_size, n_head, source_length, source_length)).astype("float32") src_mask[0][0][0][0] = -np.inf - src_mask = paddle.to_variable(src_mask) + src_mask = paddle.to_tensor(src_mask) tgt_mask = np.zeros((batch_size, n_head, target_length, target_length)).astype("float32") tgt_mask[0][0][0][0] = -1e9 memory_mask = np.zeros((batch_size, n_head, target_length, source_length)).astype("float32") memory_mask[0][0][0][0] = -1e9 - tgt_mask, memory_mask = paddle.to_variable( - tgt_mask), paddle.to_variable(memory_mask) + tgt_mask, memory_mask = paddle.to_tensor( + tgt_mask), paddle.to_tensor(memory_mask) trans_output = transformer(src, tgt, src_mask, tgt_mask, memory_mask) @@ -588,24 +588,24 @@ class TestTransformer(unittest.TestCase): dim_feedforward=dim_feedforward, dropout=dropout, bias_attr=False) - src = paddle.to_variable( + src = paddle.to_tensor( np.random.rand(batch_size, source_length, d_model).astype( "float32")) - tgt = paddle.to_variable( + tgt = paddle.to_tensor( np.random.rand(batch_size, target_length, d_model).astype( "float32")) src_mask = np.zeros((batch_size, n_head, source_length, source_length)).astype("float32") src_mask[0][0][0][0] = -np.inf - src_mask = paddle.to_variable(src_mask) + src_mask = paddle.to_tensor(src_mask) tgt_mask = np.zeros((batch_size, n_head, target_length, target_length)).astype("float32") tgt_mask[0][0][0][0] = -1e9 memory_mask = np.zeros((batch_size, n_head, target_length, source_length)).astype("float32") memory_mask[0][0][0][0] = -1e9 - tgt_mask, memory_mask = paddle.to_variable( - tgt_mask), paddle.to_variable(memory_mask) + tgt_mask, memory_mask = paddle.to_tensor( + tgt_mask), paddle.to_tensor(memory_mask) trans_output = transformer(src, tgt, src_mask, tgt_mask, memory_mask) diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 21e618a4620..2cea3072809 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -63,7 +63,7 @@ class TestZerosLikeImpeartive(unittest.TestCase): place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() paddle.disable_static(place) - x = paddle.to_variable(np.ones(shape)) + x = paddle.to_tensor(np.ones(shape)) for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: out = zeros_like(x, dtype) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 26624d3b5ff..15580b6618e 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -707,20 +707,14 @@ def cross(x, y, axis=None, name=None): Examples: .. code-block:: python import paddle - from paddle import to_variable - import numpy as np - paddle.disable_static() - data_x = np.array([[1.0, 1.0, 1.0], - [2.0, 2.0, 2.0], - [3.0, 3.0, 3.0]]) - data_y = np.array([[1.0, 1.0, 1.0], - [1.0, 1.0, 1.0], - [1.0, 1.0, 1.0]]) - x = to_variable(data_x) - y = to_variable(data_y) - + x = paddle.to_tensor([[1.0, 1.0, 1.0], + [2.0, 2.0, 2.0], + [3.0, 3.0, 3.0]]) + y = paddle.to_tensor([[1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) print(z1.numpy()) # [[-1. -1. -1.] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 966544c7abb..ce32fb76f5c 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1650,12 +1650,11 @@ def cumsum(x, axis=None, dtype=None, name=None): .. code-block:: python import paddle - from paddle import to_variable import numpy as np paddle.disable_static() data_np = np.arange(12).reshape(3, 4) - data = to_variable(data_np) + data = paddle.to_tensor(data_np) y = paddle.cumsum(data) print(y.numpy()) diff --git a/tools/wlist.json b/tools/wlist.json index 0ed0b4e4069..9b36ac6adc7 100644 --- a/tools/wlist.json +++ b/tools/wlist.json @@ -251,9 +251,10 @@ "BilinearTensorProduct", "GroupNorm", "SpectralNorm", - "TreeConv", + "TreeConv" + ], + "wlist_temp":[ "prroi_pool", - "to_tensor", "ChunkEvaluator", "EditDistance", "ErrorClipByValue", -- GitLab