test_egr_python_api.py 45.3 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16 17
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle.fluid.core as core
import paddle
import numpy as np
18
from paddle.fluid.framework import EagerParamBase, _current_expected_place, _disable_legacy_dygraph, _test_eager_guard, in_dygraph_mode
19
import unittest
20
import copy
21 22 23


class EagerScaleTestCase(unittest.TestCase):
24

25
    def test_scale_base(self):
J
Jiabin Yang 已提交
26
        with _test_eager_guard():
27 28 29 30 31 32 33 34 35 36 37 38
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
            print(tensor)
            tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
            for i in range(0, 100):
                tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
            print(tensor)
            self.assertEqual(tensor.shape, [4, 16, 16, 32])
            self.assertEqual(tensor.stop_gradient, True)

    def test_retain_grad_and_run_backward(self):
J
Jiabin Yang 已提交
39
        with _test_eager_guard():
40 41 42 43 44 45 46 47 48
            paddle.set_device("cpu")

            input_data = np.ones([4, 16, 16, 32]).astype('float32')
            data_eager = paddle.to_tensor(input_data, 'float32',
                                          core.CPUPlace(), False)

            grad_data = np.ones([4, 16, 16, 32]).astype('float32')
            grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())

49
            data_eager.retain_grads()
50 51

            out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
52
            self.assertIsNone(data_eager.grad)
53
            out_eager.backward(grad_eager, False)
54
            self.assertIsNotNone(data_eager.grad)
55
            np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
    def test_retain_grad_and_run_backward_raises(self):
        with _test_eager_guard():
            paddle.set_device("cpu")

            input_data = np.ones([4, 16, 16, 32]).astype('float32')
            data_eager = paddle.to_tensor(input_data, 'float32',
                                          core.CPUPlace(), False)

            grad_data = np.ones([4, 16, 16, 32]).astype('float32')
            grad_data2 = np.ones([4, 16]).astype('float32')
            grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
            grad_eager2 = paddle.to_tensor(grad_data2, 'float32',
                                           core.CPUPlace())

            data_eager.retain_grads()

            out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
74
            self.assertIsNone(data_eager.grad)
75 76 77 78 79 80 81 82 83 84
            with self.assertRaisesRegexp(
                    AssertionError,
                    "The type of grad_tensor must be paddle.Tensor"):
                out_eager.backward(grad_data, False)

            with self.assertRaisesRegexp(
                    AssertionError,
                    "Tensor shape not match, Tensor of grad_tensor /*"):
                out_eager.backward(grad_eager2, False)

85 86

class EagerDtypeTestCase(unittest.TestCase):
87

J
Jiabin Yang 已提交
88 89
    def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
        with _test_eager_guard():
90 91
            arr = np.random.random([4, 16, 16, 32]).astype(dtype)
            tensor = paddle.to_tensor(arr, dtype)
J
Jiabin Yang 已提交
92
            self.assertEqual(tensor.dtype, proto_dtype)
93
            np.testing.assert_array_equal(arr, tensor.numpy())
94 95

    def test_dtype_base(self):
J
Jiabin Yang 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109
        print("Test_dtype")
        self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)
        self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)
        self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)
        self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)
        self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)
        self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)
        self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)
        self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)
        self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)
        self.check_to_tesnsor_and_numpy('complex64',
                                        core.VarDesc.VarType.COMPLEX64)
        self.check_to_tesnsor_and_numpy('complex128',
                                        core.VarDesc.VarType.COMPLEX128)
110 111


112
class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
113

114
    def constructor(self, place):
115
        egr_tensor = core.eager.Tensor()
116 117
        self.assertEqual(egr_tensor.persistable, False)
        self.assertTrue("generated" in egr_tensor.name)
118
        self.assertEqual(egr_tensor.shape, [0])
119 120 121
        self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor.stop_gradient, True)

122 123 124
        egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32,
                                        [4, 16, 16, 32], "test_eager_tensor",
                                        core.VarDesc.VarType.LOD_TENSOR, True)
125 126 127 128 129 130
        self.assertEqual(egr_tensor0.persistable, True)
        self.assertEqual(egr_tensor0.name, "test_eager_tensor")
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)

        arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
131 132
        egr_tensor1 = core.eager.Tensor(arr0, place, True, False,
                                        "numpy_tensor1", False)
133 134 135 136 137 138
        self.assertEqual(egr_tensor1.persistable, True)
        self.assertEqual(egr_tensor1.name, "numpy_tensor1")
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, False)
        self.assertTrue(egr_tensor1.place._equals(place))
139
        np.testing.assert_array_equal(egr_tensor1.numpy(), arr0)
140 141

        arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
142 143
        egr_tensor2 = core.eager.Tensor(arr1, place, False, True,
                                        "numpy_tensor2", True)
144 145 146 147 148 149
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertEqual(egr_tensor2.name, "numpy_tensor2")
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)
        self.assertEqual(egr_tensor2.stop_gradient, True)
        self.assertTrue(egr_tensor2.place._equals(place))
150
        np.testing.assert_array_equal(egr_tensor2.numpy(), arr1)
151 152

        arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
153
        egr_tensor3 = core.eager.Tensor(arr2)
154 155 156 157 158 159 160 161
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)
        self.assertTrue(
            egr_tensor3.place._equals(
                paddle.fluid.framework._current_expected_place()))
162
        np.testing.assert_array_equal(egr_tensor3.numpy(), arr2)
163 164

        egr_tensor3.stop_gradient = False
165
        egr_tensor4 = core.eager.Tensor(egr_tensor3)
166 167 168 169 170 171 172 173
        self.assertEqual(egr_tensor4.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
        self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)
        self.assertEqual(egr_tensor4.stop_gradient, True)
        self.assertTrue(
            egr_tensor4.place._equals(
                paddle.fluid.framework._current_expected_place()))
174
        np.testing.assert_array_equal(egr_tensor4.numpy(), egr_tensor3.numpy())
175 176

        arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
177
        egr_tensor5 = core.eager.Tensor(arr4, place)
178 179 180 181 182 183
        self.assertEqual(egr_tensor5.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)
        self.assertTrue(egr_tensor5.place._equals(place))
184
        np.testing.assert_array_equal(egr_tensor5.numpy(), arr4)
185

186
        egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
187 188 189 190 191 192
        self.assertEqual(egr_tensor6.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)
        self.assertEqual(egr_tensor6.place.is_cpu_place(), True)
193
        np.testing.assert_array_equal(egr_tensor6.numpy(), egr_tensor5.numpy())
194

195
        egr_tensor7 = core.eager.Tensor(arr4, place, True)
196 197 198 199 200 201
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)
        self.assertTrue(egr_tensor7.place._equals(place))
202
        np.testing.assert_array_equal(egr_tensor7.numpy(), arr4)
203

204
        egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
205 206 207 208 209 210
        self.assertEqual(egr_tensor8.persistable, False)
        self.assertEqual(egr_tensor8.name, "egr_tensor8")
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, True)
        self.assertTrue(egr_tensor8.place._equals(place))
211
        np.testing.assert_array_equal(egr_tensor8.numpy(), egr_tensor5.numpy())
212

213
        egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
214 215 216 217 218 219
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, True)
        self.assertTrue(egr_tensor9.place._equals(place))
220
        np.testing.assert_array_equal(egr_tensor9.numpy(), arr4)
221

222 223 224
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
225
        egr_tensor10 = core.eager.Tensor(t, place)
226 227 228 229 230 231
        self.assertEqual(egr_tensor10.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [3, 3])
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, True)
        self.assertTrue(egr_tensor10.place._equals(place))
232
        np.testing.assert_array_equal(egr_tensor10.numpy(), x)
233

234
        egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
235 236 237 238 239 240
        self.assertEqual(egr_tensor11.persistable, False)
        self.assertTrue("framework_constructed" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [3, 3])
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, True)
        self.assertTrue(egr_tensor11.place._equals(place))
241
        np.testing.assert_array_equal(egr_tensor11.numpy(), x)
242

243
        egr_tensor12 = core.eager.Tensor(t)
244 245 246 247 248 249
        self.assertEqual(egr_tensor12.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [3, 3])
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, True)
        self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
250
        np.testing.assert_array_equal(egr_tensor12.numpy(), x)
251

252 253 254
        zero_dim_param = EagerParamBase(shape=[], dtype="float32")
        self.assertEqual(zero_dim_param.shape, [])

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
        with self.assertRaisesRegexp(
                ValueError, "The shape of Parameter should not be None"):
            eager_param = EagerParamBase(shape=None, dtype="float32")

        with self.assertRaisesRegexp(
                ValueError, "The dtype of Parameter should not be None"):
            eager_param = EagerParamBase(shape=[1, 1], dtype=None)

        with self.assertRaisesRegexp(
                ValueError,
                "Each dimension of shape for Parameter must be greater than 0, but received /*"
        ):
            eager_param = EagerParamBase(shape=[-1], dtype="float32")

        eager_param = EagerParamBase(shape=[1, 1], dtype="float32")
        self.assertTrue(eager_param.trainable)
        eager_param.trainable = False
        self.assertFalse(eager_param.trainable)
        with self.assertRaisesRegexp(
                ValueError,
                "The type of trainable MUST be bool, but the type is /*"):
            eager_param.trainable = "False"

278 279 280
        eager_param_2 = EagerParamBase(shape=paddle.shape(
            paddle.to_tensor([1, 2, 3, 4])),
                                       dtype="float32")
281 282 283 284 285 286 287 288
        self.assertTrue(eager_param_2.trainable)
        eager_param_2.trainable = False
        self.assertFalse(eager_param_2.trainable)
        with self.assertRaisesRegexp(
                ValueError,
                "The type of trainable MUST be bool, but the type is /*"):
            eager_param_2.trainable = "False"

289 290 291 292 293 294 295 296 297 298
    def test_constructor(self):
        print("Test_constructor")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
        with _test_eager_guard():
            for p in place_list:
                self.constructor(p)

299
    def constructor_with_kwargs(self, place):
300
        # init Tensor by Python array
301 302
        arr = np.random.rand(4, 16, 16, 32).astype('float32')

303
        egr_tensor0 = core.eager.Tensor(value=arr)
304 305 306 307 308 309 310 311 312
        self.assertEqual(egr_tensor0.persistable, False)
        self.assertTrue("generated" in egr_tensor0.name)
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertTrue(
            egr_tensor0.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor0.stop_gradient, True)

313
        egr_tensor1 = core.eager.Tensor(value=arr, place=place)
314 315 316 317 318 319 320
        self.assertEqual(egr_tensor1.persistable, False)
        self.assertTrue("generated" in egr_tensor1.name)
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor1.place._equals(place))
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, True)

321
        egr_tensor2 = core.eager.Tensor(arr, place=place)
322 323 324 325 326 327 328
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertTrue("generated" in egr_tensor2.name)
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor2.place._equals(place))
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor2.stop_gradient, True)

329 330 331
        egr_tensor3 = core.eager.Tensor(arr,
                                        place=place,
                                        name="new_eager_tensor")
332 333 334 335 336 337 338
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("new_eager_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor3.place._equals(place))
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)

339 340 341 342
        egr_tensor4 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor")
343 344 345 346 347 348 349
        self.assertEqual(egr_tensor4.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor4.place._equals(place))
        self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor4.stop_gradient, True)

350 351 352 353 354
        egr_tensor5 = core.eager.Tensor(arr,
                                        core.CPUPlace(),
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
355 356 357 358 359 360 361
        self.assertEqual(egr_tensor5.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor5.place.is_cpu_place())
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)

362 363 364 365 366
        egr_tensor6 = core.eager.Tensor(arr,
                                        place=core.CPUPlace(),
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
367 368 369 370 371 372 373
        self.assertEqual(egr_tensor6.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor6.place.is_cpu_place())
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)

374 375 376 377 378
        egr_tensor7 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
379 380 381 382 383 384 385
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor7.place._equals(place))
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)

386 387 388 389 390 391
        egr_tensor8 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True,
                                        stop_gradient=False)
392 393 394 395 396 397 398
        self.assertEqual(egr_tensor8.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor8.name)
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor8.place._equals(place))
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, False)

399 400 401 402 403 404
        egr_tensor9 = core.eager.Tensor(arr,
                                        place,
                                        True,
                                        True,
                                        "new_eager_tensor",
                                        stop_gradient=False)
405 406 407 408 409 410 411
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor9.place._equals(place))
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, False)

412 413 414 415 416 417
        egr_tensor10 = core.eager.Tensor(arr,
                                         place,
                                         True,
                                         True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
418 419 420 421 422 423 424
        self.assertEqual(egr_tensor10.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor10.place._equals(place))
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, False)

425 426 427 428 429 430
        egr_tensor11 = core.eager.Tensor(arr,
                                         place,
                                         True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
431 432 433 434 435 436 437
        self.assertEqual(egr_tensor11.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor11.place._equals(place))
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, False)

438 439 440 441 442 443
        egr_tensor12 = core.eager.Tensor(arr,
                                         place,
                                         persistable=True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
444 445 446 447 448 449 450
        self.assertEqual(egr_tensor12.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor12.place._equals(place))
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, False)

451 452 453 454 455 456
        egr_tensor13 = core.eager.Tensor(value=arr,
                                         place=place,
                                         persistable=True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
457 458 459 460 461 462 463 464
        self.assertEqual(egr_tensor13.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor13.name)
        self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor13.place._equals(place))
        self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor13.stop_gradient, False)

        # special case
465 466 467 468 469
        egr_tensor14 = core.eager.Tensor(dtype=core.VarDesc.VarType.FP32,
                                         dims=[4, 16, 16, 32],
                                         name="special_eager_tensor",
                                         type=core.VarDesc.VarType.LOD_TENSOR,
                                         persistable=True)
470 471 472 473 474
        self.assertEqual(egr_tensor14.persistable, True)
        self.assertEqual(egr_tensor14.name, "special_eager_tensor")
        self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)

475 476
        # init Tensor by Tensor
        egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
477 478 479 480 481 482 483 484
        self.assertEqual(egr_tensor15.persistable, True)
        self.assertTrue("generated" in egr_tensor15.name)
        self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor15.stop_gradient, True)
        self.assertTrue(
            egr_tensor15.place._equals(
                paddle.fluid.framework._current_expected_place()))
485
        np.testing.assert_array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())
486

487 488
        egr_tensor16 = core.eager.Tensor(value=egr_tensor4,
                                         name="new_eager_tensor")
489 490 491 492 493 494 495 496
        self.assertEqual(egr_tensor16.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor16.name)
        self.assertEqual(egr_tensor16.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor16.stop_gradient, True)
        self.assertTrue(
            egr_tensor16.place._equals(
                paddle.fluid.framework._current_expected_place()))
497
        np.testing.assert_array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())
498

499
        egr_tensor17 = core.eager.Tensor(
500 501
            value=egr_tensor4,
            place=place,
502 503
            name="new_eager_tensor",
        )
504 505 506 507 508 509
        self.assertEqual(egr_tensor17.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor17.name)
        self.assertEqual(egr_tensor17.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor17.stop_gradient, True)
        self.assertTrue(egr_tensor17.place._equals(place))
510
        np.testing.assert_array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())
511

512
        egr_tensor18 = core.eager.Tensor(
513 514
            egr_tensor4,
            place=place,
515 516
            name="new_eager_tensor",
        )
517 518 519 520 521 522
        self.assertEqual(egr_tensor18.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor18.name)
        self.assertEqual(egr_tensor18.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor18.stop_gradient, True)
        self.assertTrue(egr_tensor18.place._equals(place))
523
        np.testing.assert_array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())
524

525
        egr_tensor19 = core.eager.Tensor(
526 527
            egr_tensor4,
            place,
528 529
            name="new_eager_tensor",
        )
530 531 532 533 534 535
        self.assertEqual(egr_tensor19.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor19.name)
        self.assertEqual(egr_tensor19.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor19.stop_gradient, True)
        self.assertTrue(egr_tensor19.place._equals(place))
536
        np.testing.assert_array_equal(egr_tensor19.numpy(), egr_tensor4.numpy())
537 538 539 540 541

        # init eager tensor by framework tensor
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
542
        egr_tensor20 = core.eager.Tensor(value=t)
543 544 545 546 547 548 549 550
        self.assertEqual(egr_tensor20.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor20.name)
        self.assertEqual(egr_tensor20.shape, [3, 3])
        self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor20.stop_gradient, True)
        self.assertTrue(
            egr_tensor20.place._equals(
                paddle.fluid.framework._current_expected_place()))
551
        np.testing.assert_array_equal(egr_tensor20.numpy(), x)
552

553
        egr_tensor21 = core.eager.Tensor(value=t, place=place)
554 555 556 557 558 559
        self.assertEqual(egr_tensor21.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor21.name)
        self.assertEqual(egr_tensor21.shape, [3, 3])
        self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor21.stop_gradient, True)
        self.assertTrue(egr_tensor21.place._equals(place))
560
        np.testing.assert_array_equal(egr_tensor21.numpy(), x)
561

562
        egr_tensor22 = core.eager.Tensor(t, place=place)
563 564 565 566 567 568
        self.assertEqual(egr_tensor22.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor22.name)
        self.assertEqual(egr_tensor22.shape, [3, 3])
        self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor22.stop_gradient, True)
        self.assertTrue(egr_tensor22.place._equals(place))
569
        np.testing.assert_array_equal(egr_tensor22.numpy(), x)
570

571
        egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
572 573 574 575 576 577
        self.assertEqual(egr_tensor23.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor23.name)
        self.assertEqual(egr_tensor23.shape, [3, 3])
        self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor23.stop_gradient, True)
        self.assertTrue(egr_tensor23.place._equals(place))
578
        np.testing.assert_array_equal(egr_tensor23.numpy(), x)
579

580 581 582
        egr_tensor24 = core.eager.Tensor(value=t,
                                         place=place,
                                         name="from_framework_tensor")
583 584 585 586 587 588
        self.assertEqual(egr_tensor24.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor24.name)
        self.assertEqual(egr_tensor24.shape, [3, 3])
        self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor24.stop_gradient, True)
        self.assertTrue(egr_tensor24.place._equals(place))
589
        np.testing.assert_array_equal(egr_tensor24.numpy(), x)
590 591 592

        # Bad usage
        # SyntaxError: positional argument follows keyword argument
593
        # egr_tensor25 = core.eager.Tensor(value=t, place)
594 595 596 597 598 599 600 601 602 603 604

    def test_constructor_with_kwargs(self):
        print("Test_constructor_with_kwargs")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
        with _test_eager_guard():
            for p in place_list:
                self.constructor_with_kwargs(p)

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
    def test_copy_and_copy_to(self):
        print("Test_copy_and_copy_to")
        with _test_eager_guard():
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
            self.assertEqual(tensor.stop_gradient, True)
            tensor.stop_gradient = False
            print("Set persistable")
            tensor.persistable = False
            tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,
                                       core.CPUPlace())
            tensor1.persistable = True
            self.assertEqual(tensor1.stop_gradient, True)
623
            np.testing.assert_array_equal(tensor.numpy(), arr)
624 625
            print("Test copy_")
            tensor.copy_(tensor1, True)
626
            self.assertEqual(tensor.persistable, False)
627 628
            self.assertEqual(tensor.shape, [4, 16])
            self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
629
            np.testing.assert_array_equal(tensor.numpy(), arr1)
630 631 632 633

            print("Test _copy_to")
            tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                       core.CPUPlace())
634
            np.testing.assert_array_equal(tensor2.numpy(), arr2)
635 636 637 638
            self.assertTrue(tensor2.place.is_cpu_place())
            tensor2.persistable = True
            tensor2.stop_gradient = False
            if core.is_compiled_with_cuda():
639
                tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
640
                np.testing.assert_array_equal(tensor3.numpy(), arr2)
J
Jiabin Yang 已提交
641 642
                self.assertEqual(tensor3.persistable, True)
                self.assertEqual(tensor3.stop_gradient, True)
643
                self.assertTrue(tensor3.place.is_gpu_place())
J
Jiabin Yang 已提交
644 645

                tensor4 = tensor2.cuda(0, True)
646
                np.testing.assert_array_equal(tensor4.numpy(), arr2)
J
Jiabin Yang 已提交
647 648 649 650 651
                self.assertEqual(tensor4.persistable, True)
                self.assertEqual(tensor4.stop_gradient, False)
                self.assertTrue(tensor4.place.is_gpu_place())

                tensor5 = tensor4.cpu()
652
                np.testing.assert_array_equal(tensor5.numpy(), arr2)
J
Jiabin Yang 已提交
653 654 655 656 657 658
                self.assertEqual(tensor5.persistable, True)
                self.assertEqual(tensor5.stop_gradient, False)
                self.assertTrue(tensor5.place.is_cpu_place())

                tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
                tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
659 660
                np.testing.assert_array_equal(tensor10.numpy(),
                                              tensor11.numpy())
661
            else:
662
                tensor3 = tensor2._copy_to(core.CPUPlace(), True)
663
                np.testing.assert_array_equal(tensor3.numpy(), arr2)
J
Jiabin Yang 已提交
664 665
                self.assertEqual(tensor3.persistable, True)
                self.assertEqual(tensor3.stop_gradient, True)
666 667
                self.assertTrue(tensor3.place.is_cpu_place())

J
Jiabin Yang 已提交
668
                tensor4 = tensor2.cpu()
669
                np.testing.assert_array_equal(tensor4.numpy(), arr2)
J
Jiabin Yang 已提交
670 671 672 673
                self.assertEqual(tensor4.persistable, True)
                self.assertEqual(tensor4.stop_gradient, False)
                self.assertTrue(tensor4.place.is_cpu_place())

674 675
    def test_share_buffer_to(self):
        with _test_eager_guard():
676 677 678 679 680 681 682 683
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = None
            tensor2 = None
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
B
Baibaifan 已提交
684
            tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace())
685 686 687 688 689 690
            if core.is_compiled_with_cuda():
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CUDAPlace(0))
            else:
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CPUPlace())
691 692
            np.testing.assert_array_equal(tensor.numpy(), arr)
            np.testing.assert_array_equal(tensor2.numpy(), arr2)
693
            tensor2._share_buffer_to(tensor)
694 695
            np.testing.assert_array_equal(tensor.numpy(), arr2)
            np.testing.assert_array_equal(tensor2.numpy(), arr2)
696 697 698
            self.assertTrue(tensor._is_shared_buffer_with(tensor2))
            self.assertTrue(tensor2._is_shared_buffer_with(tensor))
            tensor._share_buffer_to(tensor3)
699
            np.testing.assert_array_equal(tensor3.numpy(), arr2)
700 701
            self.assertTrue(tensor3._is_shared_buffer_with(tensor))

702 703 704 705 706 707 708 709 710 711
    def test_share_underline_tensor_to(self):
        with _test_eager_guard():
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = None
            tensor2 = None
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
712
            tensor3 = core.eager.Tensor()
713 714 715 716 717 718
            if core.is_compiled_with_cuda():
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CUDAPlace(0))
            else:
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CPUPlace())
719 720
            np.testing.assert_array_equal(tensor.numpy(), arr)
            np.testing.assert_array_equal(tensor2.numpy(), arr2)
721
            tensor2._share_underline_tensor_to(tensor)
722 723
            np.testing.assert_array_equal(tensor.numpy(), arr2)
            np.testing.assert_array_equal(tensor2.numpy(), arr2)
724 725 726
            self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
            self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
            tensor._share_underline_tensor_to(tensor3)
727
            np.testing.assert_array_equal(tensor3.numpy(), arr2)
728 729
            self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))

730
    def test_properties(self):
J
Jiabin Yang 已提交
731 732
        print("Test_properties")
        with _test_eager_guard():
733 734
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
J
Jiabin Yang 已提交
735 736
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
737 738 739 740 741 742 743 744 745
            self.assertEqual(tensor.shape, [4, 16, 16, 32])
            tensor.name = 'tensor_name_test'
            self.assertEqual(tensor.name, 'tensor_name_test')
            self.assertEqual(tensor.persistable, False)
            tensor.persistable = True
            self.assertEqual(tensor.persistable, True)
            tensor.persistable = False
            self.assertEqual(tensor.persistable, False)
            self.assertTrue(tensor.place.is_cpu_place())
746
            self.assertEqual(tensor._place_str, 'Place(cpu)')
747 748 749 750 751
            self.assertEqual(tensor.stop_gradient, True)
            tensor.stop_gradient = False
            self.assertEqual(tensor.stop_gradient, False)
            tensor.stop_gradient = True
            self.assertEqual(tensor.stop_gradient, True)
752
            self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
753

J
Jiabin Yang 已提交
754 755
    def test_global_properties(self):
        print("Test_global_properties")
J
Jiabin Yang 已提交
756 757
        _disable_legacy_dygraph()
        self.assertTrue(in_dygraph_mode())
J
Jiabin Yang 已提交
758
        with _test_eager_guard():
J
Jiabin Yang 已提交
759 760
            self.assertTrue(in_dygraph_mode())
        self.assertFalse(in_dygraph_mode())
J
Jiabin Yang 已提交
761 762 763 764 765

    def test_place_guard(self):
        if core.is_compiled_with_cuda():
            paddle.set_device("gpu:0")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
766
                self.assertTrue(
767 768
                    isinstance(_current_expected_place(),
                               type(core.CPUPlace())))
J
Jiabin Yang 已提交
769 770 771
        else:
            paddle.set_device("cpu")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
772
                self.assertTrue(
773 774
                    isinstance(_current_expected_place(),
                               type(core.CPUPlace())))
J
Jiabin Yang 已提交
775

776 777 778 779
    def test_value(self):
        with _test_eager_guard():
            arr = np.random.rand(4, 16, 16, 32).astype('float64')

780
            egr_tensor0 = core.eager.Tensor(value=arr)
781 782 783 784 785 786 787 788 789 790 791 792 793 794
            self.assertEqual(egr_tensor0.persistable, False)
            self.assertTrue("generated" in egr_tensor0.name)
            self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
            self.assertTrue(
                egr_tensor0.place._equals(
                    paddle.fluid.framework._current_expected_place()))
            self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
            self.assertEqual(egr_tensor0.stop_gradient, True)
            self.assertTrue(egr_tensor0.value().get_tensor()._dtype(),
                            core.VarDesc.VarType.FP64)
            self.assertTrue(egr_tensor0.value().get_tensor()._place(),
                            paddle.fluid.framework._current_expected_place())
            self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())

795 796 797
    def test_set_value(self):
        with _test_eager_guard():
            ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
798
            egr_tensor = core.eager.Tensor(value=ori_arr)
799 800
            self.assertEqual(egr_tensor.stop_gradient, True)
            self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
801
            np.testing.assert_array_equal(egr_tensor.numpy(), ori_arr)
802 803
            ori_place = egr_tensor.place

J
Jiabin Yang 已提交
804
            new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
805 806
            self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))

J
Jiabin Yang 已提交
807
            egr_tensor.set_value(new_arr)
808 809
            self.assertEqual(egr_tensor.stop_gradient, True)
            self.assertTrue(egr_tensor.place._equals(ori_place))
J
Jiabin Yang 已提交
810
            self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
811
            np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
812

J
Jiabin Yang 已提交
813 814 815
    def test_sharding_related_api(self):
        with _test_eager_guard():
            arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
816
            egr_tensor1 = core.eager.Tensor(arr0, core.CPUPlace(), True, False,
J
Jiabin Yang 已提交
817 818 819 820 821 822 823 824 825 826 827 828 829
                                            "numpy_tensor1", False)
            self.assertEqual(egr_tensor1._numel(), 32768)
            self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)

    def test_copy_gradient_from(self):
        with _test_eager_guard():
            np_x = np.random.random((2, 2))
            np_y = np.random.random((2, 2))
            x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
            y = paddle.to_tensor(np_y, dtype="float64")
            out = x + x
            out.backward()
            x._copy_gradient_from(y)
830
            np.testing.assert_array_equal(x.grad.numpy(), np_y)
J
Jiabin Yang 已提交
831 832 833 834 835 836 837 838 839

    def test_clear(self):
        with _test_eager_guard():
            np_x = np.random.random((3, 8, 8))
            x = paddle.to_tensor(np_x, dtype="float64")
            self.assertTrue(x._is_initialized())
            x._clear()
            self.assertFalse(x._is_initialized())

840

841
class EagerParamBaseUsageTestCase(unittest.TestCase):
842

843 844 845 846 847 848 849 850 851 852
    def test_print(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(3, 3, bias_attr=False)
            print(linear.weight)

    def test_copy(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(1, 3)
            linear_copy = copy.deepcopy(linear)
            linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
853 854 855 856
            np.testing.assert_array_equal(linear.weight.numpy(),
                                          linear_copy.weight.numpy())
            np.testing.assert_array_equal(linear.weight.numpy(),
                                          linear_copy2.numpy())
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876

    def func_fp16_initilaizer(self):
        paddle.set_default_dtype("float16")
        linear1 = paddle.nn.Linear(1, 3, bias_attr=False)
        linear2 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.Uniform())
        linear3 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer())
        linear4 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.MSRAInitializer())
        res = [
877 878 879 880
            linear1.weight.numpy(),
            linear2.weight.numpy(),
            linear3.weight.numpy(),
            linear4.weight.numpy()
881 882 883 884 885 886 887 888 889 890 891 892 893 894
        ]
        paddle.set_default_dtype("float32")
        return res

    def test_fp16_initializer(self):
        res1 = list()
        res2 = list()
        paddle.seed(102)
        paddle.framework.random._manual_program_seed(102)
        with _test_eager_guard():
            res1 = self.func_fp16_initilaizer()
        res2 = self.func_fp16_initilaizer()

        for i in range(len(res1)):
895
            np.testing.assert_array_equal(res1[i], res2[i])
896 897

    def func_layer_helper_base(self, value):
898 899
        base = paddle.fluid.layer_helper_base.LayerHelperBase(
            "test_layer", "test_layer")
900 901 902 903 904 905 906 907 908 909 910 911 912 913
        return base.to_variable(value).numpy()

    def func_base_to_variable(self, value):
        paddle.fluid.dygraph.base.to_variable(value)

    def test_to_variable(self):
        value = np.random.rand(4, 16, 16, 32).astype('float32')
        res1 = None
        res3 = None
        with _test_eager_guard():
            res1 = self.func_layer_helper_base(value)
            res3 = self.func_base_to_variable(value)
        res2 = self.func_layer_helper_base(value)
        res4 = self.func_base_to_variable(value)
914 915
        np.testing.assert_array_equal(res1, res2)
        np.testing.assert_array_equal(res3, res4)
916

917
    def test_backward_with_single_tensor(self):
918 919
        with _test_eager_guard():
            arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
920
            egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
921 922 923 924 925 926 927 928
            egr_tensor12.retain_grads()
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            self.assertEqual(egr_tensor12.persistable, False)
            self.assertTrue("generated_tensor" in egr_tensor12.name)
            self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
            self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
            self.assertEqual(egr_tensor12.stop_gradient, True)
            self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
929 930
            np.testing.assert_array_equal(egr_tensor12.numpy(), arr4)
            np.testing.assert_array_equal(egr_tensor12.gradient(), None)
931
            egr_tensor12.stop_gradient = False
932
            egr_tensor12.backward()
933
            np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
934

935 936 937 938 939 940 941
    def test_set_value(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(1, 3)
            ori_place = linear.weight.place
            new_weight = np.ones([1, 3]).astype('float32')
            self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))

J
Jiabin Yang 已提交
942
            linear.weight.set_value(new_weight)
943
            np.testing.assert_array_equal(linear.weight.numpy(), new_weight)
944 945
            self.assertTrue(linear.weight.place._equals(ori_place))

946

947
class EagerGuardTestCase(unittest.TestCase):
948

949 950 951
    def test__test_eager_guard(self):
        tracer = paddle.fluid.dygraph.tracer.Tracer()
        with _test_eager_guard(tracer):
J
Jiabin Yang 已提交
952
            self.assertTrue(in_dygraph_mode())
953 954


955 956
if __name__ == "__main__":
    unittest.main()