test_egr_python_api.py 45.8 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16 17
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle.fluid.core as core
import paddle
import numpy as np
J
Jiabin Yang 已提交
18
from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_legacy_dygraph, in_dygraph_mode, _current_expected_place, _disable_legacy_dygraph
J
Jiabin Yang 已提交
19
from paddle.fluid.data_feeder import convert_dtype
20
import unittest
21
import copy
22
import paddle.compat as cpt
23 24 25


class EagerScaleTestCase(unittest.TestCase):
26

27
    def test_scale_base(self):
J
Jiabin Yang 已提交
28
        with _test_eager_guard():
29 30 31 32 33 34 35 36 37 38 39 40
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
            print(tensor)
            tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
            for i in range(0, 100):
                tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
            print(tensor)
            self.assertEqual(tensor.shape, [4, 16, 16, 32])
            self.assertEqual(tensor.stop_gradient, True)

    def test_retain_grad_and_run_backward(self):
J
Jiabin Yang 已提交
41
        with _test_eager_guard():
42 43 44 45 46 47 48 49 50
            paddle.set_device("cpu")

            input_data = np.ones([4, 16, 16, 32]).astype('float32')
            data_eager = paddle.to_tensor(input_data, 'float32',
                                          core.CPUPlace(), False)

            grad_data = np.ones([4, 16, 16, 32]).astype('float32')
            grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())

51
            data_eager.retain_grads()
52 53

            out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
54
            self.assertIsNone(data_eager.grad)
55
            out_eager.backward(grad_eager, False)
56
            self.assertIsNotNone(data_eager.grad)
57 58
            self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data))

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
    def test_retain_grad_and_run_backward_raises(self):
        with _test_eager_guard():
            paddle.set_device("cpu")

            input_data = np.ones([4, 16, 16, 32]).astype('float32')
            data_eager = paddle.to_tensor(input_data, 'float32',
                                          core.CPUPlace(), False)

            grad_data = np.ones([4, 16, 16, 32]).astype('float32')
            grad_data2 = np.ones([4, 16]).astype('float32')
            grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
            grad_eager2 = paddle.to_tensor(grad_data2, 'float32',
                                           core.CPUPlace())

            data_eager.retain_grads()

            out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
76
            self.assertIsNone(data_eager.grad)
77 78 79 80 81 82 83 84 85 86
            with self.assertRaisesRegexp(
                    AssertionError,
                    "The type of grad_tensor must be paddle.Tensor"):
                out_eager.backward(grad_data, False)

            with self.assertRaisesRegexp(
                    AssertionError,
                    "Tensor shape not match, Tensor of grad_tensor /*"):
                out_eager.backward(grad_eager2, False)

87 88

class EagerDtypeTestCase(unittest.TestCase):
89

J
Jiabin Yang 已提交
90 91
    def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
        with _test_eager_guard():
92 93
            arr = np.random.random([4, 16, 16, 32]).astype(dtype)
            tensor = paddle.to_tensor(arr, dtype)
J
Jiabin Yang 已提交
94
            self.assertEqual(tensor.dtype, proto_dtype)
95 96 97
            self.assertTrue(np.array_equal(arr, tensor.numpy()))

    def test_dtype_base(self):
J
Jiabin Yang 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111
        print("Test_dtype")
        self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)
        self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)
        self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)
        self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)
        self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)
        self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)
        self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)
        self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)
        self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)
        self.check_to_tesnsor_and_numpy('complex64',
                                        core.VarDesc.VarType.COMPLEX64)
        self.check_to_tesnsor_and_numpy('complex128',
                                        core.VarDesc.VarType.COMPLEX128)
112 113


114
class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
115

116
    def constructor(self, place):
117
        egr_tensor = core.eager.Tensor()
118 119
        self.assertEqual(egr_tensor.persistable, False)
        self.assertTrue("generated" in egr_tensor.name)
120
        self.assertEqual(egr_tensor.shape, [0])
121 122 123
        self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor.stop_gradient, True)

124 125 126
        egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32,
                                        [4, 16, 16, 32], "test_eager_tensor",
                                        core.VarDesc.VarType.LOD_TENSOR, True)
127 128 129 130 131 132
        self.assertEqual(egr_tensor0.persistable, True)
        self.assertEqual(egr_tensor0.name, "test_eager_tensor")
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)

        arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
133 134
        egr_tensor1 = core.eager.Tensor(arr0, place, True, False,
                                        "numpy_tensor1", False)
135 136 137 138 139 140 141 142 143
        self.assertEqual(egr_tensor1.persistable, True)
        self.assertEqual(egr_tensor1.name, "numpy_tensor1")
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, False)
        self.assertTrue(egr_tensor1.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))

        arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
144 145
        egr_tensor2 = core.eager.Tensor(arr1, place, False, True,
                                        "numpy_tensor2", True)
146 147 148 149 150 151 152 153 154
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertEqual(egr_tensor2.name, "numpy_tensor2")
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)
        self.assertEqual(egr_tensor2.stop_gradient, True)
        self.assertTrue(egr_tensor2.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))

        arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
155
        egr_tensor3 = core.eager.Tensor(arr2)
156 157 158 159 160 161 162 163 164 165 166
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)
        self.assertTrue(
            egr_tensor3.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))

        egr_tensor3.stop_gradient = False
167
        egr_tensor4 = core.eager.Tensor(egr_tensor3)
168 169 170 171 172 173 174 175
        self.assertEqual(egr_tensor4.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
        self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)
        self.assertEqual(egr_tensor4.stop_gradient, True)
        self.assertTrue(
            egr_tensor4.place._equals(
                paddle.fluid.framework._current_expected_place()))
176 177
        self.assertTrue(np.array_equal(egr_tensor4.numpy(),
                                       egr_tensor3.numpy()))
178 179

        arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
180
        egr_tensor5 = core.eager.Tensor(arr4, place)
181 182 183 184 185 186 187 188
        self.assertEqual(egr_tensor5.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)
        self.assertTrue(egr_tensor5.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))

189
        egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
190 191 192 193 194 195
        self.assertEqual(egr_tensor6.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)
        self.assertEqual(egr_tensor6.place.is_cpu_place(), True)
196 197
        self.assertTrue(np.array_equal(egr_tensor6.numpy(),
                                       egr_tensor5.numpy()))
198

199
        egr_tensor7 = core.eager.Tensor(arr4, place, True)
200 201 202 203 204 205 206 207
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)
        self.assertTrue(egr_tensor7.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))

208
        egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
209 210 211 212 213 214
        self.assertEqual(egr_tensor8.persistable, False)
        self.assertEqual(egr_tensor8.name, "egr_tensor8")
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, True)
        self.assertTrue(egr_tensor8.place._equals(place))
215 216
        self.assertTrue(np.array_equal(egr_tensor8.numpy(),
                                       egr_tensor5.numpy()))
217

218
        egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
219 220 221 222 223 224 225 226
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, True)
        self.assertTrue(egr_tensor9.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))

227 228 229
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
230
        egr_tensor10 = core.eager.Tensor(t, place)
231 232 233 234 235 236 237 238
        self.assertEqual(egr_tensor10.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [3, 3])
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, True)
        self.assertTrue(egr_tensor10.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))

239
        egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
240 241 242 243 244 245 246 247
        self.assertEqual(egr_tensor11.persistable, False)
        self.assertTrue("framework_constructed" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [3, 3])
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, True)
        self.assertTrue(egr_tensor11.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))

248
        egr_tensor12 = core.eager.Tensor(t)
249 250 251 252 253 254 255 256
        self.assertEqual(egr_tensor12.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [3, 3])
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, True)
        self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
        self.assertTrue(np.array_equal(egr_tensor12.numpy(), x))

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
        with self.assertRaisesRegexp(
                ValueError, "The shape of Parameter should not be None"):
            eager_param = EagerParamBase(shape=None, dtype="float32")

        with self.assertRaisesRegexp(
                ValueError, "The dtype of Parameter should not be None"):
            eager_param = EagerParamBase(shape=[1, 1], dtype=None)

        with self.assertRaisesRegexp(
                ValueError,
                "The dimensions of shape for Parameter must be greater than 0"):
            eager_param = EagerParamBase(shape=[], dtype="float32")

        with self.assertRaisesRegexp(
                ValueError,
                "Each dimension of shape for Parameter must be greater than 0, but received /*"
        ):
            eager_param = EagerParamBase(shape=[-1], dtype="float32")

        eager_param = EagerParamBase(shape=[1, 1], dtype="float32")
        self.assertTrue(eager_param.trainable)
        eager_param.trainable = False
        self.assertFalse(eager_param.trainable)
        with self.assertRaisesRegexp(
                ValueError,
                "The type of trainable MUST be bool, but the type is /*"):
            eager_param.trainable = "False"

285 286 287
        eager_param_2 = EagerParamBase(shape=paddle.shape(
            paddle.to_tensor([1, 2, 3, 4])),
                                       dtype="float32")
288 289 290 291 292 293 294 295
        self.assertTrue(eager_param_2.trainable)
        eager_param_2.trainable = False
        self.assertFalse(eager_param_2.trainable)
        with self.assertRaisesRegexp(
                ValueError,
                "The type of trainable MUST be bool, but the type is /*"):
            eager_param_2.trainable = "False"

296 297 298 299 300 301 302 303 304 305
    def test_constructor(self):
        print("Test_constructor")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
        with _test_eager_guard():
            for p in place_list:
                self.constructor(p)

306
    def constructor_with_kwargs(self, place):
307
        # init Tensor by Python array
308 309
        arr = np.random.rand(4, 16, 16, 32).astype('float32')

310
        egr_tensor0 = core.eager.Tensor(value=arr)
311 312 313 314 315 316 317 318 319
        self.assertEqual(egr_tensor0.persistable, False)
        self.assertTrue("generated" in egr_tensor0.name)
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertTrue(
            egr_tensor0.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor0.stop_gradient, True)

320
        egr_tensor1 = core.eager.Tensor(value=arr, place=place)
321 322 323 324 325 326 327
        self.assertEqual(egr_tensor1.persistable, False)
        self.assertTrue("generated" in egr_tensor1.name)
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor1.place._equals(place))
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, True)

328
        egr_tensor2 = core.eager.Tensor(arr, place=place)
329 330 331 332 333 334 335
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertTrue("generated" in egr_tensor2.name)
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor2.place._equals(place))
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor2.stop_gradient, True)

336 337 338
        egr_tensor3 = core.eager.Tensor(arr,
                                        place=place,
                                        name="new_eager_tensor")
339 340 341 342 343 344 345
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("new_eager_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor3.place._equals(place))
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)

346 347 348 349
        egr_tensor4 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor")
350 351 352 353 354 355 356
        self.assertEqual(egr_tensor4.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor4.place._equals(place))
        self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor4.stop_gradient, True)

357 358 359 360 361
        egr_tensor5 = core.eager.Tensor(arr,
                                        core.CPUPlace(),
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
362 363 364 365 366 367 368
        self.assertEqual(egr_tensor5.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor5.place.is_cpu_place())
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)

369 370 371 372 373
        egr_tensor6 = core.eager.Tensor(arr,
                                        place=core.CPUPlace(),
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
374 375 376 377 378 379 380
        self.assertEqual(egr_tensor6.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor6.place.is_cpu_place())
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)

381 382 383 384 385
        egr_tensor7 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True)
386 387 388 389 390 391 392
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor7.place._equals(place))
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)

393 394 395 396 397 398
        egr_tensor8 = core.eager.Tensor(arr,
                                        place=place,
                                        persistable=True,
                                        name="new_eager_tensor",
                                        zero_copy=True,
                                        stop_gradient=False)
399 400 401 402 403 404 405
        self.assertEqual(egr_tensor8.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor8.name)
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor8.place._equals(place))
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, False)

406 407 408 409 410 411
        egr_tensor9 = core.eager.Tensor(arr,
                                        place,
                                        True,
                                        True,
                                        "new_eager_tensor",
                                        stop_gradient=False)
412 413 414 415 416 417 418
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor9.place._equals(place))
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, False)

419 420 421 422 423 424
        egr_tensor10 = core.eager.Tensor(arr,
                                         place,
                                         True,
                                         True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
425 426 427 428 429 430 431
        self.assertEqual(egr_tensor10.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor10.place._equals(place))
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, False)

432 433 434 435 436 437
        egr_tensor11 = core.eager.Tensor(arr,
                                         place,
                                         True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
438 439 440 441 442 443 444
        self.assertEqual(egr_tensor11.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor11.place._equals(place))
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, False)

445 446 447 448 449 450
        egr_tensor12 = core.eager.Tensor(arr,
                                         place,
                                         persistable=True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
451 452 453 454 455 456 457
        self.assertEqual(egr_tensor12.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor12.place._equals(place))
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, False)

458 459 460 461 462 463
        egr_tensor13 = core.eager.Tensor(value=arr,
                                         place=place,
                                         persistable=True,
                                         zero_copy=True,
                                         name="new_eager_tensor",
                                         stop_gradient=False)
464 465 466 467 468 469 470 471
        self.assertEqual(egr_tensor13.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor13.name)
        self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor13.place._equals(place))
        self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor13.stop_gradient, False)

        # special case
472 473 474 475 476
        egr_tensor14 = core.eager.Tensor(dtype=core.VarDesc.VarType.FP32,
                                         dims=[4, 16, 16, 32],
                                         name="special_eager_tensor",
                                         type=core.VarDesc.VarType.LOD_TENSOR,
                                         persistable=True)
477 478 479 480 481
        self.assertEqual(egr_tensor14.persistable, True)
        self.assertEqual(egr_tensor14.name, "special_eager_tensor")
        self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)

482 483
        # init Tensor by Tensor
        egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
484 485 486 487 488 489 490 491 492 493 494
        self.assertEqual(egr_tensor15.persistable, True)
        self.assertTrue("generated" in egr_tensor15.name)
        self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor15.stop_gradient, True)
        self.assertTrue(
            egr_tensor15.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertTrue(
            np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()))

495 496
        egr_tensor16 = core.eager.Tensor(value=egr_tensor4,
                                         name="new_eager_tensor")
497 498 499 500 501 502 503 504 505 506 507
        self.assertEqual(egr_tensor16.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor16.name)
        self.assertEqual(egr_tensor16.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor16.stop_gradient, True)
        self.assertTrue(
            egr_tensor16.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertTrue(
            np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()))

508
        egr_tensor17 = core.eager.Tensor(
509 510
            value=egr_tensor4,
            place=place,
511 512
            name="new_eager_tensor",
        )
513 514 515 516 517 518 519 520 521
        self.assertEqual(egr_tensor17.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor17.name)
        self.assertEqual(egr_tensor17.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor17.stop_gradient, True)
        self.assertTrue(egr_tensor17.place._equals(place))
        self.assertTrue(
            np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()))

522
        egr_tensor18 = core.eager.Tensor(
523 524
            egr_tensor4,
            place=place,
525 526
            name="new_eager_tensor",
        )
527 528 529 530 531 532 533 534 535
        self.assertEqual(egr_tensor18.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor18.name)
        self.assertEqual(egr_tensor18.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor18.stop_gradient, True)
        self.assertTrue(egr_tensor18.place._equals(place))
        self.assertTrue(
            np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()))

536
        egr_tensor19 = core.eager.Tensor(
537 538
            egr_tensor4,
            place,
539 540
            name="new_eager_tensor",
        )
541 542 543 544 545 546 547 548 549 550 551 552 553
        self.assertEqual(egr_tensor19.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor19.name)
        self.assertEqual(egr_tensor19.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor19.stop_gradient, True)
        self.assertTrue(egr_tensor19.place._equals(place))
        self.assertTrue(
            np.array_equal(egr_tensor19.numpy(), egr_tensor4.numpy()))

        # init eager tensor by framework tensor
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
554
        egr_tensor20 = core.eager.Tensor(value=t)
555 556 557 558 559 560 561 562 563 564
        self.assertEqual(egr_tensor20.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor20.name)
        self.assertEqual(egr_tensor20.shape, [3, 3])
        self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor20.stop_gradient, True)
        self.assertTrue(
            egr_tensor20.place._equals(
                paddle.fluid.framework._current_expected_place()))
        self.assertTrue(np.array_equal(egr_tensor20.numpy(), x))

565
        egr_tensor21 = core.eager.Tensor(value=t, place=place)
566 567 568 569 570 571 572 573
        self.assertEqual(egr_tensor21.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor21.name)
        self.assertEqual(egr_tensor21.shape, [3, 3])
        self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor21.stop_gradient, True)
        self.assertTrue(egr_tensor21.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor21.numpy(), x))

574
        egr_tensor22 = core.eager.Tensor(t, place=place)
575 576 577 578 579 580 581 582
        self.assertEqual(egr_tensor22.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor22.name)
        self.assertEqual(egr_tensor22.shape, [3, 3])
        self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor22.stop_gradient, True)
        self.assertTrue(egr_tensor22.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor22.numpy(), x))

583
        egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
584 585 586 587 588 589 590 591
        self.assertEqual(egr_tensor23.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor23.name)
        self.assertEqual(egr_tensor23.shape, [3, 3])
        self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor23.stop_gradient, True)
        self.assertTrue(egr_tensor23.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor23.numpy(), x))

592 593 594
        egr_tensor24 = core.eager.Tensor(value=t,
                                         place=place,
                                         name="from_framework_tensor")
595 596 597 598 599 600 601 602 603 604
        self.assertEqual(egr_tensor24.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor24.name)
        self.assertEqual(egr_tensor24.shape, [3, 3])
        self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor24.stop_gradient, True)
        self.assertTrue(egr_tensor24.place._equals(place))
        self.assertTrue(np.array_equal(egr_tensor24.numpy(), x))

        # Bad usage
        # SyntaxError: positional argument follows keyword argument
605
        # egr_tensor25 = core.eager.Tensor(value=t, place)
606 607 608 609 610 611 612 613 614 615 616

    def test_constructor_with_kwargs(self):
        print("Test_constructor_with_kwargs")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
        with _test_eager_guard():
            for p in place_list:
                self.constructor_with_kwargs(p)

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
    def test_copy_and_copy_to(self):
        print("Test_copy_and_copy_to")
        with _test_eager_guard():
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
            self.assertEqual(tensor.stop_gradient, True)
            tensor.stop_gradient = False
            print("Set persistable")
            tensor.persistable = False
            tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,
                                       core.CPUPlace())
            tensor1.persistable = True
            self.assertEqual(tensor1.stop_gradient, True)
            self.assertTrue(np.array_equal(tensor.numpy(), arr))
            print("Test copy_")
            tensor.copy_(tensor1, True)
638
            self.assertEqual(tensor.persistable, False)
639 640 641 642 643 644 645 646 647 648 649 650
            self.assertEqual(tensor.shape, [4, 16])
            self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
            self.assertTrue(np.array_equal(tensor.numpy(), arr1))

            print("Test _copy_to")
            tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                       core.CPUPlace())
            self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
            self.assertTrue(tensor2.place.is_cpu_place())
            tensor2.persistable = True
            tensor2.stop_gradient = False
            if core.is_compiled_with_cuda():
651
                tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
652
                self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
J
Jiabin Yang 已提交
653 654
                self.assertEqual(tensor3.persistable, True)
                self.assertEqual(tensor3.stop_gradient, True)
655
                self.assertTrue(tensor3.place.is_gpu_place())
J
Jiabin Yang 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

                tensor4 = tensor2.cuda(0, True)
                self.assertTrue(np.array_equal(tensor4.numpy(), arr2))
                self.assertEqual(tensor4.persistable, True)
                self.assertEqual(tensor4.stop_gradient, False)
                self.assertTrue(tensor4.place.is_gpu_place())

                tensor5 = tensor4.cpu()
                self.assertTrue(np.array_equal(tensor5.numpy(), arr2))
                self.assertEqual(tensor5.persistable, True)
                self.assertEqual(tensor5.stop_gradient, False)
                self.assertTrue(tensor5.place.is_cpu_place())

                tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
                tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
671
                self.assertTrue(
J
Jiabin Yang 已提交
672
                    np.array_equal(tensor10.numpy(), tensor11.numpy()))
673
            else:
674
                tensor3 = tensor2._copy_to(core.CPUPlace(), True)
675
                self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
J
Jiabin Yang 已提交
676 677
                self.assertEqual(tensor3.persistable, True)
                self.assertEqual(tensor3.stop_gradient, True)
678 679
                self.assertTrue(tensor3.place.is_cpu_place())

J
Jiabin Yang 已提交
680 681 682 683 684 685
                tensor4 = tensor2.cpu()
                self.assertTrue(np.array_equal(tensor4.numpy(), arr2))
                self.assertEqual(tensor4.persistable, True)
                self.assertEqual(tensor4.stop_gradient, False)
                self.assertTrue(tensor4.place.is_cpu_place())

686 687
    def test_share_buffer_to(self):
        with _test_eager_guard():
688 689 690 691 692 693 694 695
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = None
            tensor2 = None
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
B
Baibaifan 已提交
696
            tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace())
697 698 699 700 701 702
            if core.is_compiled_with_cuda():
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CUDAPlace(0))
            else:
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CPUPlace())
703
            self.assertTrue(np.array_equal(tensor.numpy(), arr))
704 705 706 707 708 709 710 711 712 713
            self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
            tensor2._share_buffer_to(tensor)
            self.assertTrue(np.array_equal(tensor.numpy(), arr2))
            self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
            self.assertTrue(tensor._is_shared_buffer_with(tensor2))
            self.assertTrue(tensor2._is_shared_buffer_with(tensor))
            tensor._share_buffer_to(tensor3)
            self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
            self.assertTrue(tensor3._is_shared_buffer_with(tensor))

714 715 716 717 718 719 720 721 722 723
    def test_share_underline_tensor_to(self):
        with _test_eager_guard():
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            arr1 = np.zeros([4, 16]).astype('float32')
            arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
                [4, 16, 16, 32]).astype('float32')
            tensor = None
            tensor2 = None
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
724
            tensor3 = core.eager.Tensor()
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
            if core.is_compiled_with_cuda():
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CUDAPlace(0))
            else:
                tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
                                           core.CPUPlace())
            self.assertTrue(np.array_equal(tensor.numpy(), arr))
            self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
            tensor2._share_underline_tensor_to(tensor)
            self.assertTrue(np.array_equal(tensor.numpy(), arr2))
            self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
            self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
            self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
            tensor._share_underline_tensor_to(tensor3)
            self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
            self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))

742
    def test_properties(self):
J
Jiabin Yang 已提交
743 744
        print("Test_properties")
        with _test_eager_guard():
745 746
            paddle.set_device("cpu")
            arr = np.ones([4, 16, 16, 32]).astype('float32')
J
Jiabin Yang 已提交
747 748
            tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
                                      core.CPUPlace())
749 750 751 752 753 754 755 756 757
            self.assertEqual(tensor.shape, [4, 16, 16, 32])
            tensor.name = 'tensor_name_test'
            self.assertEqual(tensor.name, 'tensor_name_test')
            self.assertEqual(tensor.persistable, False)
            tensor.persistable = True
            self.assertEqual(tensor.persistable, True)
            tensor.persistable = False
            self.assertEqual(tensor.persistable, False)
            self.assertTrue(tensor.place.is_cpu_place())
758
            self.assertEqual(tensor._place_str, 'Place(cpu)')
759 760 761 762 763
            self.assertEqual(tensor.stop_gradient, True)
            tensor.stop_gradient = False
            self.assertEqual(tensor.stop_gradient, False)
            tensor.stop_gradient = True
            self.assertEqual(tensor.stop_gradient, True)
764
            self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
765

J
Jiabin Yang 已提交
766 767
    def test_global_properties(self):
        print("Test_global_properties")
J
Jiabin Yang 已提交
768 769
        _disable_legacy_dygraph()
        self.assertTrue(in_dygraph_mode())
J
Jiabin Yang 已提交
770
        with _test_eager_guard():
J
Jiabin Yang 已提交
771 772
            self.assertTrue(in_dygraph_mode())
        self.assertFalse(in_dygraph_mode())
J
Jiabin Yang 已提交
773 774 775 776 777

    def test_place_guard(self):
        if core.is_compiled_with_cuda():
            paddle.set_device("gpu:0")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
778
                self.assertTrue(
779 780
                    isinstance(_current_expected_place(),
                               type(core.CPUPlace())))
J
Jiabin Yang 已提交
781 782 783
        else:
            paddle.set_device("cpu")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
784
                self.assertTrue(
785 786
                    isinstance(_current_expected_place(),
                               type(core.CPUPlace())))
J
Jiabin Yang 已提交
787

788 789 790 791
    def test_value(self):
        with _test_eager_guard():
            arr = np.random.rand(4, 16, 16, 32).astype('float64')

792
            egr_tensor0 = core.eager.Tensor(value=arr)
793 794 795 796 797 798 799 800 801 802 803 804 805 806
            self.assertEqual(egr_tensor0.persistable, False)
            self.assertTrue("generated" in egr_tensor0.name)
            self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
            self.assertTrue(
                egr_tensor0.place._equals(
                    paddle.fluid.framework._current_expected_place()))
            self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
            self.assertEqual(egr_tensor0.stop_gradient, True)
            self.assertTrue(egr_tensor0.value().get_tensor()._dtype(),
                            core.VarDesc.VarType.FP64)
            self.assertTrue(egr_tensor0.value().get_tensor()._place(),
                            paddle.fluid.framework._current_expected_place())
            self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())

807 808 809
    def test_set_value(self):
        with _test_eager_guard():
            ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
810
            egr_tensor = core.eager.Tensor(value=ori_arr)
811 812 813 814 815
            self.assertEqual(egr_tensor.stop_gradient, True)
            self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
            self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr))
            ori_place = egr_tensor.place

J
Jiabin Yang 已提交
816
            new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
817 818
            self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))

J
Jiabin Yang 已提交
819
            egr_tensor.set_value(new_arr)
820 821
            self.assertEqual(egr_tensor.stop_gradient, True)
            self.assertTrue(egr_tensor.place._equals(ori_place))
J
Jiabin Yang 已提交
822
            self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
823 824
            self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr))

J
Jiabin Yang 已提交
825 826 827
    def test_sharding_related_api(self):
        with _test_eager_guard():
            arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
828
            egr_tensor1 = core.eager.Tensor(arr0, core.CPUPlace(), True, False,
J
Jiabin Yang 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
                                            "numpy_tensor1", False)
            self.assertEqual(egr_tensor1._numel(), 32768)
            self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)

    def test_copy_gradient_from(self):
        with _test_eager_guard():
            np_x = np.random.random((2, 2))
            np_y = np.random.random((2, 2))
            x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
            y = paddle.to_tensor(np_y, dtype="float64")
            out = x + x
            out.backward()
            x._copy_gradient_from(y)
            self.assertTrue(np.array_equal(x.grad.numpy(), np_y))

    def test_clear(self):
        with _test_eager_guard():
            np_x = np.random.random((3, 8, 8))
            x = paddle.to_tensor(np_x, dtype="float64")
            self.assertTrue(x._is_initialized())
            x._clear()
            self.assertFalse(x._is_initialized())

852

853
class EagerParamBaseUsageTestCase(unittest.TestCase):
854

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
    def test_print(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(3, 3, bias_attr=False)
            print(linear.weight)

    def test_copy(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(1, 3)
            linear_copy = copy.deepcopy(linear)
            linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
            self.assertTrue(
                np.array_equal(linear.weight.numpy(),
                               linear_copy.weight.numpy()))
            self.assertTrue(
                np.array_equal(linear.weight.numpy(), linear_copy2.numpy()))

    def func_fp16_initilaizer(self):
        paddle.set_default_dtype("float16")
        linear1 = paddle.nn.Linear(1, 3, bias_attr=False)
        linear2 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.Uniform())
        linear3 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer())
        linear4 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
            weight_attr=paddle.fluid.initializer.MSRAInitializer())
        res = [
890 891 892 893
            linear1.weight.numpy(),
            linear2.weight.numpy(),
            linear3.weight.numpy(),
            linear4.weight.numpy()
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
        ]
        paddle.set_default_dtype("float32")
        return res

    def test_fp16_initializer(self):
        res1 = list()
        res2 = list()
        paddle.seed(102)
        paddle.framework.random._manual_program_seed(102)
        with _test_eager_guard():
            res1 = self.func_fp16_initilaizer()
        res2 = self.func_fp16_initilaizer()

        for i in range(len(res1)):
            self.assertTrue(np.array_equal(res1[i], res2[i]))

    def func_layer_helper_base(self, value):
911 912
        base = paddle.fluid.layer_helper_base.LayerHelperBase(
            "test_layer", "test_layer")
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
        return base.to_variable(value).numpy()

    def func_base_to_variable(self, value):
        paddle.fluid.dygraph.base.to_variable(value)

    def test_to_variable(self):
        value = np.random.rand(4, 16, 16, 32).astype('float32')
        res1 = None
        res3 = None
        with _test_eager_guard():
            res1 = self.func_layer_helper_base(value)
            res3 = self.func_base_to_variable(value)
        res2 = self.func_layer_helper_base(value)
        res4 = self.func_base_to_variable(value)
        self.assertTrue(np.array_equal(res1, res2))
        self.assertTrue(np.array_equal(res3, res4))

930
    def test_backward_with_single_tensor(self):
931 932
        with _test_eager_guard():
            arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
933
            egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
934 935 936 937 938 939 940 941 942 943
            egr_tensor12.retain_grads()
            arr = np.ones([4, 16, 16, 32]).astype('float32')
            self.assertEqual(egr_tensor12.persistable, False)
            self.assertTrue("generated_tensor" in egr_tensor12.name)
            self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
            self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
            self.assertEqual(egr_tensor12.stop_gradient, True)
            self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
            self.assertTrue(np.array_equal(egr_tensor12.numpy(), arr4))
            self.assertTrue(np.array_equal(egr_tensor12.gradient(), None))
944
            egr_tensor12.stop_gradient = False
945 946
            egr_tensor12.backward()
            self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr))
947

948 949 950 951 952 953 954
    def test_set_value(self):
        with _test_eager_guard():
            linear = paddle.nn.Linear(1, 3)
            ori_place = linear.weight.place
            new_weight = np.ones([1, 3]).astype('float32')
            self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))

J
Jiabin Yang 已提交
955
            linear.weight.set_value(new_weight)
956 957 958
            self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight))
            self.assertTrue(linear.weight.place._equals(ori_place))

959

960
class EagerGuardTestCase(unittest.TestCase):
961

962 963 964
    def test__test_eager_guard(self):
        tracer = paddle.fluid.dygraph.tracer.Tracer()
        with _test_eager_guard(tracer):
J
Jiabin Yang 已提交
965
            self.assertTrue(in_dygraph_mode())
966 967


968 969
if __name__ == "__main__":
    unittest.main()