test_egr_python_api.py 41.2 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17
import copy
import unittest

18
import numpy as np
19 20 21

import paddle
import paddle.fluid.core as core
22 23 24 25 26
from paddle.fluid.framework import (
    EagerParamBase,
    _current_expected_place,
    in_dygraph_mode,
)
27 28 29 30


class EagerScaleTestCase(unittest.TestCase):
    def test_scale_base(self):
31 32 33 34 35 36
        paddle.set_device("cpu")
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
        print(tensor)
        tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
        for i in range(0, 100):
37
            tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
38 39 40
        print(tensor)
        self.assertEqual(tensor.shape, [4, 16, 16, 32])
        self.assertEqual(tensor.stop_gradient, True)
41 42

    def test_retain_grad_and_run_backward(self):
43
        paddle.set_device("cpu")
44

45 46 47 48
        input_data = np.ones([4, 16, 16, 32]).astype('float32')
        data_eager = paddle.to_tensor(
            input_data, 'float32', core.CPUPlace(), False
        )
49

50 51
        grad_data = np.ones([4, 16, 16, 32]).astype('float32')
        grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
52

53
        data_eager.retain_grads()
54

55 56 57 58 59
        out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
        self.assertIsNone(data_eager.grad)
        out_eager.backward(grad_eager, False)
        self.assertIsNotNone(data_eager.grad)
        np.testing.assert_array_equal(data_eager.grad.numpy(), input_data)
60

61
    def test_retain_grad_and_run_backward_raises(self):
62
        paddle.set_device("cpu")
63

64 65 66 67
        input_data = np.ones([4, 16, 16, 32]).astype('float32')
        data_eager = paddle.to_tensor(
            input_data, 'float32', core.CPUPlace(), False
        )
68

69 70 71 72
        grad_data = np.ones([4, 16, 16, 32]).astype('float32')
        grad_data2 = np.ones([4, 16]).astype('float32')
        grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
        grad_eager2 = paddle.to_tensor(grad_data2, 'float32', core.CPUPlace())
73

74
        data_eager.retain_grads()
75

76 77 78 79 80 81
        out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
        self.assertIsNone(data_eager.grad)
        with self.assertRaisesRegexp(
            AssertionError, "The type of grad_tensor must be paddle.Tensor"
        ):
            out_eager.backward(grad_data, False)
82

83 84 85 86 87
        with self.assertRaisesRegexp(
            AssertionError,
            "Tensor shape not match, Tensor of grad_tensor /*",
        ):
            out_eager.backward(grad_eager2, False)
88

89 90

class EagerDtypeTestCase(unittest.TestCase):
J
Jiabin Yang 已提交
91
    def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
92 93 94 95
        arr = np.random.random([4, 16, 16, 32]).astype(dtype)
        tensor = paddle.to_tensor(arr, dtype)
        self.assertEqual(tensor.dtype, proto_dtype)
        np.testing.assert_array_equal(arr, tensor.numpy())
96 97

    def test_dtype_base(self):
J
Jiabin Yang 已提交
98 99 100 101 102 103 104 105 106 107
        print("Test_dtype")
        self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)
        self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)
        self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)
        self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)
        self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)
        self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)
        self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)
        self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)
        self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)
108 109 110 111 112 113
        self.check_to_tesnsor_and_numpy(
            'complex64', core.VarDesc.VarType.COMPLEX64
        )
        self.check_to_tesnsor_and_numpy(
            'complex128', core.VarDesc.VarType.COMPLEX128
        )
114 115


116
class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
117
    def constructor(self, place):
118
        egr_tensor = core.eager.Tensor()
119 120
        self.assertEqual(egr_tensor.persistable, False)
        self.assertTrue("generated" in egr_tensor.name)
121
        self.assertEqual(egr_tensor.shape, [0])
122 123 124
        self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor.stop_gradient, True)

125 126 127 128 129 130 131
        egr_tensor0 = core.eager.Tensor(
            core.VarDesc.VarType.FP32,
            [4, 16, 16, 32],
            "test_eager_tensor",
            core.VarDesc.VarType.LOD_TENSOR,
            True,
        )
132 133 134 135 136 137
        self.assertEqual(egr_tensor0.persistable, True)
        self.assertEqual(egr_tensor0.name, "test_eager_tensor")
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)

        arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
138 139 140
        egr_tensor1 = core.eager.Tensor(
            arr0, place, True, False, "numpy_tensor1", False
        )
141 142 143 144 145 146
        self.assertEqual(egr_tensor1.persistable, True)
        self.assertEqual(egr_tensor1.name, "numpy_tensor1")
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, False)
        self.assertTrue(egr_tensor1.place._equals(place))
147
        np.testing.assert_array_equal(egr_tensor1.numpy(), arr0)
148 149

        arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
150 151 152
        egr_tensor2 = core.eager.Tensor(
            arr1, place, False, True, "numpy_tensor2", True
        )
153 154 155 156 157 158
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertEqual(egr_tensor2.name, "numpy_tensor2")
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)
        self.assertEqual(egr_tensor2.stop_gradient, True)
        self.assertTrue(egr_tensor2.place._equals(place))
159
        np.testing.assert_array_equal(egr_tensor2.numpy(), arr1)
160 161

        arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
162
        egr_tensor3 = core.eager.Tensor(arr2)
163 164 165 166 167 168 169
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)
        self.assertTrue(
            egr_tensor3.place._equals(
170 171 172
                paddle.fluid.framework._current_expected_place()
            )
        )
173
        np.testing.assert_array_equal(egr_tensor3.numpy(), arr2)
174 175

        egr_tensor3.stop_gradient = False
176
        egr_tensor4 = core.eager.Tensor(egr_tensor3)
177 178 179 180 181 182 183
        self.assertEqual(egr_tensor4.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
        self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)
        self.assertEqual(egr_tensor4.stop_gradient, True)
        self.assertTrue(
            egr_tensor4.place._equals(
184 185 186
                paddle.fluid.framework._current_expected_place()
            )
        )
187
        np.testing.assert_array_equal(egr_tensor4.numpy(), egr_tensor3.numpy())
188 189

        arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
190
        egr_tensor5 = core.eager.Tensor(arr4, place)
191 192 193 194 195 196
        self.assertEqual(egr_tensor5.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)
        self.assertTrue(egr_tensor5.place._equals(place))
197
        np.testing.assert_array_equal(egr_tensor5.numpy(), arr4)
198

199
        egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
200 201 202 203 204 205
        self.assertEqual(egr_tensor6.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)
        self.assertEqual(egr_tensor6.place.is_cpu_place(), True)
206
        np.testing.assert_array_equal(egr_tensor6.numpy(), egr_tensor5.numpy())
207

208
        egr_tensor7 = core.eager.Tensor(arr4, place, True)
209 210 211 212 213 214
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)
        self.assertTrue(egr_tensor7.place._equals(place))
215
        np.testing.assert_array_equal(egr_tensor7.numpy(), arr4)
216

217
        egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
218 219 220 221 222 223
        self.assertEqual(egr_tensor8.persistable, False)
        self.assertEqual(egr_tensor8.name, "egr_tensor8")
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, True)
        self.assertTrue(egr_tensor8.place._equals(place))
224
        np.testing.assert_array_equal(egr_tensor8.numpy(), egr_tensor5.numpy())
225

226
        egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
227 228 229 230 231 232
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("generated_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, True)
        self.assertTrue(egr_tensor9.place._equals(place))
233
        np.testing.assert_array_equal(egr_tensor9.numpy(), arr4)
234

235 236 237
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
238
        egr_tensor10 = core.eager.Tensor(t, place)
239 240 241 242 243 244
        self.assertEqual(egr_tensor10.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [3, 3])
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, True)
        self.assertTrue(egr_tensor10.place._equals(place))
245
        np.testing.assert_array_equal(egr_tensor10.numpy(), x)
246

247
        egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
248 249 250 251 252 253
        self.assertEqual(egr_tensor11.persistable, False)
        self.assertTrue("framework_constructed" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [3, 3])
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, True)
        self.assertTrue(egr_tensor11.place._equals(place))
254
        np.testing.assert_array_equal(egr_tensor11.numpy(), x)
255

256
        egr_tensor12 = core.eager.Tensor(t)
257 258 259 260 261 262
        self.assertEqual(egr_tensor12.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [3, 3])
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, True)
        self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
263
        np.testing.assert_array_equal(egr_tensor12.numpy(), x)
264

265 266 267
        zero_dim_param = EagerParamBase(shape=[], dtype="float32")
        self.assertEqual(zero_dim_param.shape, [])

268
        with self.assertRaisesRegexp(
269 270
            ValueError, "The shape of Parameter should not be None"
        ):
271 272 273
            eager_param = EagerParamBase(shape=None, dtype="float32")

        with self.assertRaisesRegexp(
274 275
            ValueError, "The dtype of Parameter should not be None"
        ):
276 277 278
            eager_param = EagerParamBase(shape=[1, 1], dtype=None)

        with self.assertRaisesRegexp(
279 280
            ValueError,
            "Each dimension of shape for Parameter must be greater than 0, but received /*",
281 282 283 284 285 286 287 288
        ):
            eager_param = EagerParamBase(shape=[-1], dtype="float32")

        eager_param = EagerParamBase(shape=[1, 1], dtype="float32")
        self.assertTrue(eager_param.trainable)
        eager_param.trainable = False
        self.assertFalse(eager_param.trainable)
        with self.assertRaisesRegexp(
289 290
            ValueError, "The type of trainable MUST be bool, but the type is /*"
        ):
291 292
            eager_param.trainable = "False"

293 294 295
        eager_param_2 = EagerParamBase(
            shape=paddle.shape(paddle.to_tensor([1, 2, 3, 4])), dtype="float32"
        )
296 297 298 299
        self.assertTrue(eager_param_2.trainable)
        eager_param_2.trainable = False
        self.assertFalse(eager_param_2.trainable)
        with self.assertRaisesRegexp(
300 301
            ValueError, "The type of trainable MUST be bool, but the type is /*"
        ):
302 303
            eager_param_2.trainable = "False"

304 305 306 307 308 309
    def test_constructor(self):
        print("Test_constructor")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
310 311 312

        for p in place_list:
            self.constructor(p)
313

314
    def constructor_with_kwargs(self, place):
315
        # init Tensor by Python array
316 317
        arr = np.random.rand(4, 16, 16, 32).astype('float32')

318
        egr_tensor0 = core.eager.Tensor(value=arr)
319 320 321 322 323
        self.assertEqual(egr_tensor0.persistable, False)
        self.assertTrue("generated" in egr_tensor0.name)
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertTrue(
            egr_tensor0.place._equals(
324 325 326
                paddle.fluid.framework._current_expected_place()
            )
        )
327 328 329
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor0.stop_gradient, True)

330
        egr_tensor1 = core.eager.Tensor(value=arr, place=place)
331 332 333 334 335 336 337
        self.assertEqual(egr_tensor1.persistable, False)
        self.assertTrue("generated" in egr_tensor1.name)
        self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor1.place._equals(place))
        self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor1.stop_gradient, True)

338
        egr_tensor2 = core.eager.Tensor(arr, place=place)
339 340 341 342 343 344 345
        self.assertEqual(egr_tensor2.persistable, False)
        self.assertTrue("generated" in egr_tensor2.name)
        self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor2.place._equals(place))
        self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor2.stop_gradient, True)

346 347 348
        egr_tensor3 = core.eager.Tensor(
            arr, place=place, name="new_eager_tensor"
        )
349 350 351 352 353 354 355
        self.assertEqual(egr_tensor3.persistable, False)
        self.assertTrue("new_eager_tensor" in egr_tensor3.name)
        self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor3.place._equals(place))
        self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor3.stop_gradient, True)

356 357 358
        egr_tensor4 = core.eager.Tensor(
            arr, place=place, persistable=True, name="new_eager_tensor"
        )
359 360 361 362 363 364 365
        self.assertEqual(egr_tensor4.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor4.name)
        self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor4.place._equals(place))
        self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor4.stop_gradient, True)

366 367 368 369 370 371 372
        egr_tensor5 = core.eager.Tensor(
            arr,
            core.CPUPlace(),
            persistable=True,
            name="new_eager_tensor",
            zero_copy=True,
        )
373 374 375 376 377 378 379
        self.assertEqual(egr_tensor5.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor5.name)
        self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor5.place.is_cpu_place())
        self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor5.stop_gradient, True)

380 381 382 383 384 385 386
        egr_tensor6 = core.eager.Tensor(
            arr,
            place=core.CPUPlace(),
            persistable=True,
            name="new_eager_tensor",
            zero_copy=True,
        )
387 388 389 390 391 392 393
        self.assertEqual(egr_tensor6.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor6.name)
        self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor6.place.is_cpu_place())
        self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor6.stop_gradient, True)

394 395 396 397 398 399 400
        egr_tensor7 = core.eager.Tensor(
            arr,
            place=place,
            persistable=True,
            name="new_eager_tensor",
            zero_copy=True,
        )
401 402 403 404 405 406 407
        self.assertEqual(egr_tensor7.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor7.name)
        self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor7.place._equals(place))
        self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor7.stop_gradient, True)

408 409 410 411 412 413 414 415
        egr_tensor8 = core.eager.Tensor(
            arr,
            place=place,
            persistable=True,
            name="new_eager_tensor",
            zero_copy=True,
            stop_gradient=False,
        )
416 417 418 419 420 421 422
        self.assertEqual(egr_tensor8.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor8.name)
        self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor8.place._equals(place))
        self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor8.stop_gradient, False)

423 424 425
        egr_tensor9 = core.eager.Tensor(
            arr, place, True, True, "new_eager_tensor", stop_gradient=False
        )
426 427 428 429 430 431 432
        self.assertEqual(egr_tensor9.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor9.name)
        self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor9.place._equals(place))
        self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor9.stop_gradient, False)

433 434 435
        egr_tensor10 = core.eager.Tensor(
            arr, place, True, True, name="new_eager_tensor", stop_gradient=False
        )
436 437 438 439 440 441 442
        self.assertEqual(egr_tensor10.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor10.name)
        self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor10.place._equals(place))
        self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor10.stop_gradient, False)

443 444 445 446 447 448 449 450
        egr_tensor11 = core.eager.Tensor(
            arr,
            place,
            True,
            zero_copy=True,
            name="new_eager_tensor",
            stop_gradient=False,
        )
451 452 453 454 455 456 457
        self.assertEqual(egr_tensor11.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor11.name)
        self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor11.place._equals(place))
        self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor11.stop_gradient, False)

458 459 460 461 462 463 464 465
        egr_tensor12 = core.eager.Tensor(
            arr,
            place,
            persistable=True,
            zero_copy=True,
            name="new_eager_tensor",
            stop_gradient=False,
        )
466 467 468 469 470 471 472
        self.assertEqual(egr_tensor12.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor12.place._equals(place))
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, False)

473 474 475 476 477 478 479 480
        egr_tensor13 = core.eager.Tensor(
            value=arr,
            place=place,
            persistable=True,
            zero_copy=True,
            name="new_eager_tensor",
            stop_gradient=False,
        )
481 482 483 484 485 486 487 488
        self.assertEqual(egr_tensor13.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor13.name)
        self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32])
        self.assertTrue(egr_tensor13.place._equals(place))
        self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor13.stop_gradient, False)

        # special case
489 490 491 492 493 494 495
        egr_tensor14 = core.eager.Tensor(
            dtype=core.VarDesc.VarType.FP32,
            dims=[4, 16, 16, 32],
            name="special_eager_tensor",
            type=core.VarDesc.VarType.LOD_TENSOR,
            persistable=True,
        )
496 497 498 499 500
        self.assertEqual(egr_tensor14.persistable, True)
        self.assertEqual(egr_tensor14.name, "special_eager_tensor")
        self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)

501 502
        # init Tensor by Tensor
        egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
503 504 505 506 507 508 509
        self.assertEqual(egr_tensor15.persistable, True)
        self.assertTrue("generated" in egr_tensor15.name)
        self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor15.stop_gradient, True)
        self.assertTrue(
            egr_tensor15.place._equals(
510 511 512
                paddle.fluid.framework._current_expected_place()
            )
        )
513
        np.testing.assert_array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())
514

515 516 517
        egr_tensor16 = core.eager.Tensor(
            value=egr_tensor4, name="new_eager_tensor"
        )
518 519 520 521 522 523 524
        self.assertEqual(egr_tensor16.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor16.name)
        self.assertEqual(egr_tensor16.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor16.stop_gradient, True)
        self.assertTrue(
            egr_tensor16.place._equals(
525 526 527
                paddle.fluid.framework._current_expected_place()
            )
        )
528
        np.testing.assert_array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())
529

530
        egr_tensor17 = core.eager.Tensor(
531 532
            value=egr_tensor4,
            place=place,
533 534
            name="new_eager_tensor",
        )
535 536 537 538 539 540
        self.assertEqual(egr_tensor17.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor17.name)
        self.assertEqual(egr_tensor17.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor17.stop_gradient, True)
        self.assertTrue(egr_tensor17.place._equals(place))
541
        np.testing.assert_array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())
542

543
        egr_tensor18 = core.eager.Tensor(
544 545
            egr_tensor4,
            place=place,
546 547
            name="new_eager_tensor",
        )
548 549 550 551 552 553
        self.assertEqual(egr_tensor18.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor18.name)
        self.assertEqual(egr_tensor18.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor18.stop_gradient, True)
        self.assertTrue(egr_tensor18.place._equals(place))
554
        np.testing.assert_array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())
555

556
        egr_tensor19 = core.eager.Tensor(
557 558
            egr_tensor4,
            place,
559 560
            name="new_eager_tensor",
        )
561 562 563 564 565 566
        self.assertEqual(egr_tensor19.persistable, True)
        self.assertTrue("new_eager_tensor" in egr_tensor19.name)
        self.assertEqual(egr_tensor19.shape, egr_tensor4.shape)
        self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype)
        self.assertEqual(egr_tensor19.stop_gradient, True)
        self.assertTrue(egr_tensor19.place._equals(place))
567
        np.testing.assert_array_equal(egr_tensor19.numpy(), egr_tensor4.numpy())
568 569 570 571 572

        # init eager tensor by framework tensor
        x = np.random.rand(3, 3).astype('float32')
        t = paddle.fluid.Tensor()
        t.set(x, paddle.fluid.CPUPlace())
573
        egr_tensor20 = core.eager.Tensor(value=t)
574 575 576 577 578 579 580
        self.assertEqual(egr_tensor20.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor20.name)
        self.assertEqual(egr_tensor20.shape, [3, 3])
        self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor20.stop_gradient, True)
        self.assertTrue(
            egr_tensor20.place._equals(
581 582 583
                paddle.fluid.framework._current_expected_place()
            )
        )
584
        np.testing.assert_array_equal(egr_tensor20.numpy(), x)
585

586
        egr_tensor21 = core.eager.Tensor(value=t, place=place)
587 588 589 590 591 592
        self.assertEqual(egr_tensor21.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor21.name)
        self.assertEqual(egr_tensor21.shape, [3, 3])
        self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor21.stop_gradient, True)
        self.assertTrue(egr_tensor21.place._equals(place))
593
        np.testing.assert_array_equal(egr_tensor21.numpy(), x)
594

595
        egr_tensor22 = core.eager.Tensor(t, place=place)
596 597 598 599 600 601
        self.assertEqual(egr_tensor22.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor22.name)
        self.assertEqual(egr_tensor22.shape, [3, 3])
        self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor22.stop_gradient, True)
        self.assertTrue(egr_tensor22.place._equals(place))
602
        np.testing.assert_array_equal(egr_tensor22.numpy(), x)
603

604
        egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
605 606 607 608 609 610
        self.assertEqual(egr_tensor23.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor23.name)
        self.assertEqual(egr_tensor23.shape, [3, 3])
        self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor23.stop_gradient, True)
        self.assertTrue(egr_tensor23.place._equals(place))
611
        np.testing.assert_array_equal(egr_tensor23.numpy(), x)
612

613 614 615
        egr_tensor24 = core.eager.Tensor(
            value=t, place=place, name="from_framework_tensor"
        )
616 617 618 619 620 621
        self.assertEqual(egr_tensor24.persistable, False)
        self.assertTrue("from_framework_tensor" in egr_tensor24.name)
        self.assertEqual(egr_tensor24.shape, [3, 3])
        self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor24.stop_gradient, True)
        self.assertTrue(egr_tensor24.place._equals(place))
622
        np.testing.assert_array_equal(egr_tensor24.numpy(), x)
623 624 625

        # Bad usage
        # SyntaxError: positional argument follows keyword argument
626
        # egr_tensor25 = core.eager.Tensor(value=t, place)
627 628 629 630 631 632 633

    def test_constructor_with_kwargs(self):
        print("Test_constructor_with_kwargs")
        paddle.set_device("cpu")
        place_list = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            place_list.append(core.CUDAPlace(0))
634 635 636

        for p in place_list:
            self.constructor_with_kwargs(p)
637

638 639
    def test_copy_and_copy_to(self):
        print("Test_copy_and_copy_to")
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724

        paddle.set_device("cpu")
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        arr1 = np.zeros([4, 16]).astype('float32')
        arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
            [4, 16, 16, 32]
        ).astype('float32')
        tensor = paddle.to_tensor(
            arr, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        self.assertEqual(tensor.stop_gradient, True)
        tensor.stop_gradient = False
        print("Set persistable")
        tensor.persistable = False
        tensor1 = paddle.to_tensor(
            arr1, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        tensor1.persistable = True
        self.assertEqual(tensor1.stop_gradient, True)
        np.testing.assert_array_equal(tensor.numpy(), arr)
        print("Test copy_")
        tensor.copy_(tensor1, True)
        self.assertEqual(tensor.persistable, False)
        self.assertEqual(tensor.shape, [4, 16])
        self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
        np.testing.assert_array_equal(tensor.numpy(), arr1)

        print("Test _copy_to")
        tensor2 = paddle.to_tensor(
            arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        np.testing.assert_array_equal(tensor2.numpy(), arr2)
        self.assertTrue(tensor2.place.is_cpu_place())
        tensor2.persistable = True
        tensor2.stop_gradient = False
        if core.is_compiled_with_cuda():
            tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
            np.testing.assert_array_equal(tensor3.numpy(), arr2)
            self.assertEqual(tensor3.persistable, True)
            self.assertEqual(tensor3.stop_gradient, True)
            self.assertTrue(tensor3.place.is_gpu_place())

            tensor4 = tensor2.cuda(0, True)
            np.testing.assert_array_equal(tensor4.numpy(), arr2)
            self.assertEqual(tensor4.persistable, True)
            self.assertEqual(tensor4.stop_gradient, False)
            self.assertTrue(tensor4.place.is_gpu_place())

            tensor5 = tensor4.cpu()
            np.testing.assert_array_equal(tensor5.numpy(), arr2)
            self.assertEqual(tensor5.persistable, True)
            self.assertEqual(tensor5.stop_gradient, False)
            self.assertTrue(tensor5.place.is_cpu_place())

            tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
            tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
            np.testing.assert_array_equal(tensor10.numpy(), tensor11.numpy())
        else:
            tensor3 = tensor2._copy_to(core.CPUPlace(), True)
            np.testing.assert_array_equal(tensor3.numpy(), arr2)
            self.assertEqual(tensor3.persistable, True)
            self.assertEqual(tensor3.stop_gradient, True)
            self.assertTrue(tensor3.place.is_cpu_place())

            tensor4 = tensor2.cpu()
            np.testing.assert_array_equal(tensor4.numpy(), arr2)
            self.assertEqual(tensor4.persistable, True)
            self.assertEqual(tensor4.stop_gradient, False)
            self.assertTrue(tensor4.place.is_cpu_place())

    def test_share_buffer_to(self):
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        arr1 = np.zeros([4, 16]).astype('float32')
        arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
            [4, 16, 16, 32]
        ).astype('float32')
        tensor = None
        tensor2 = None
        tensor = paddle.to_tensor(
            arr, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace())
        if core.is_compiled_with_cuda():
            tensor2 = paddle.to_tensor(
                arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
725
            )
726
        else:
727 728 729
            tensor2 = paddle.to_tensor(
                arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
            )
730 731 732 733 734 735 736 737 738 739
        np.testing.assert_array_equal(tensor.numpy(), arr)
        np.testing.assert_array_equal(tensor2.numpy(), arr2)
        tensor2._share_buffer_to(tensor)
        np.testing.assert_array_equal(tensor.numpy(), arr2)
        np.testing.assert_array_equal(tensor2.numpy(), arr2)
        self.assertTrue(tensor._is_shared_buffer_with(tensor2))
        self.assertTrue(tensor2._is_shared_buffer_with(tensor))
        tensor._share_buffer_to(tensor3)
        np.testing.assert_array_equal(tensor3.numpy(), arr2)
        self.assertTrue(tensor3._is_shared_buffer_with(tensor))
740

741
    def test_share_underline_tensor_to(self):
742 743 744 745 746 747 748 749 750 751 752 753 754 755
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        arr1 = np.zeros([4, 16]).astype('float32')
        arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
            [4, 16, 16, 32]
        ).astype('float32')
        tensor = None
        tensor2 = None
        tensor = paddle.to_tensor(
            arr, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        tensor3 = core.eager.Tensor()
        if core.is_compiled_with_cuda():
            tensor2 = paddle.to_tensor(
                arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)
756
            )
757 758 759 760 761 762 763 764 765 766 767 768 769 770
        else:
            tensor2 = paddle.to_tensor(
                arr2, core.VarDesc.VarType.FP32, core.CPUPlace()
            )
        np.testing.assert_array_equal(tensor.numpy(), arr)
        np.testing.assert_array_equal(tensor2.numpy(), arr2)
        tensor2._share_underline_tensor_to(tensor)
        np.testing.assert_array_equal(tensor.numpy(), arr2)
        np.testing.assert_array_equal(tensor2.numpy(), arr2)
        self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
        self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
        tensor._share_underline_tensor_to(tensor3)
        np.testing.assert_array_equal(tensor3.numpy(), arr2)
        self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))
771

772
    def test_properties(self):
J
Jiabin Yang 已提交
773
        print("Test_properties")
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
        paddle.set_device("cpu")
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        tensor = paddle.to_tensor(
            arr, core.VarDesc.VarType.FP32, core.CPUPlace()
        )
        self.assertEqual(tensor.shape, [4, 16, 16, 32])
        tensor.name = 'tensor_name_test'
        self.assertEqual(tensor.name, 'tensor_name_test')
        self.assertEqual(tensor.persistable, False)
        tensor.persistable = True
        self.assertEqual(tensor.persistable, True)
        tensor.persistable = False
        self.assertEqual(tensor.persistable, False)
        self.assertTrue(tensor.place.is_cpu_place())
        self.assertEqual(tensor._place_str, 'Place(cpu)')
        self.assertEqual(tensor.stop_gradient, True)
        tensor.stop_gradient = False
        self.assertEqual(tensor.stop_gradient, False)
        tensor.stop_gradient = True
        self.assertEqual(tensor.stop_gradient, True)
        self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
795

J
Jiabin Yang 已提交
796 797
    def test_global_properties(self):
        print("Test_global_properties")
J
Jiabin Yang 已提交
798
        self.assertTrue(in_dygraph_mode())
J
Jiabin Yang 已提交
799 800 801 802 803

    def test_place_guard(self):
        if core.is_compiled_with_cuda():
            paddle.set_device("gpu:0")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
804
                self.assertTrue(
805 806
                    isinstance(_current_expected_place(), type(core.CPUPlace()))
                )
J
Jiabin Yang 已提交
807 808 809
        else:
            paddle.set_device("cpu")
            with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
J
Jiabin Yang 已提交
810
                self.assertTrue(
811 812
                    isinstance(_current_expected_place(), type(core.CPUPlace()))
                )
J
Jiabin Yang 已提交
813

814
    def test_value(self):
815 816 817 818 819 820 821 822 823
        arr = np.random.rand(4, 16, 16, 32).astype('float64')

        egr_tensor0 = core.eager.Tensor(value=arr)
        self.assertEqual(egr_tensor0.persistable, False)
        self.assertTrue("generated" in egr_tensor0.name)
        self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
        self.assertTrue(
            egr_tensor0.place._equals(
                paddle.fluid.framework._current_expected_place()
824
            )
825 826 827 828 829 830 831 832 833 834 835 836
        )
        self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
        self.assertEqual(egr_tensor0.stop_gradient, True)
        self.assertTrue(
            egr_tensor0.value().get_tensor()._dtype(),
            core.VarDesc.VarType.FP64,
        )
        self.assertTrue(
            egr_tensor0.value().get_tensor()._place(),
            paddle.fluid.framework._current_expected_place(),
        )
        self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
837

838
    def test_set_value(self):
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
        ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
        egr_tensor = core.eager.Tensor(value=ori_arr)
        self.assertEqual(egr_tensor.stop_gradient, True)
        self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
        np.testing.assert_array_equal(egr_tensor.numpy(), ori_arr)
        ori_place = egr_tensor.place

        new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
        self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))

        egr_tensor.set_value(new_arr)
        self.assertEqual(egr_tensor.stop_gradient, True)
        self.assertTrue(egr_tensor.place._equals(ori_place))
        self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
        np.testing.assert_array_equal(egr_tensor.numpy(), new_arr)
854

J
Jiabin Yang 已提交
855
    def test_sharding_related_api(self):
856 857 858 859 860 861
        arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
        egr_tensor1 = core.eager.Tensor(
            arr0, core.CPUPlace(), True, False, "numpy_tensor1", False
        )
        self.assertEqual(egr_tensor1._numel(), 32768)
        self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
J
Jiabin Yang 已提交
862 863

    def test_copy_gradient_from(self):
864 865 866 867 868 869 870 871
        np_x = np.random.random((2, 2))
        np_y = np.random.random((2, 2))
        x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
        y = paddle.to_tensor(np_y, dtype="float64")
        out = x + x
        out.backward()
        x._copy_gradient_from(y)
        np.testing.assert_array_equal(x.grad.numpy(), np_y)
J
Jiabin Yang 已提交
872 873

    def test_clear(self):
874 875 876 877 878
        np_x = np.random.random((3, 8, 8))
        x = paddle.to_tensor(np_x, dtype="float64")
        self.assertTrue(x._is_initialized())
        x._clear()
        self.assertFalse(x._is_initialized())
J
Jiabin Yang 已提交
879

880
    def test_use_gpudnn(self):
881
        np_x = np.random.random((3, 8, 8))
882 883

        self.assertTrue(in_dygraph_mode())
884
        x = paddle.to_tensor(np_x, dtype="float64")
885 886 887 888
        y = x._use_gpudnn(False)
        np.testing.assert_array_equal(x.numpy(), y.numpy())
        y = x._use_gpudnn(True)
        np.testing.assert_array_equal(x.numpy(), y.numpy())
889

890

891 892
class EagerParamBaseUsageTestCase(unittest.TestCase):
    def test_print(self):
893 894
        linear = paddle.nn.Linear(3, 3, bias_attr=False)
        print(linear.weight)
895 896

    def test_copy(self):
897 898 899 900 901 902 903 904 905
        linear = paddle.nn.Linear(1, 3)
        linear_copy = copy.deepcopy(linear)
        linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
        np.testing.assert_array_equal(
            linear.weight.numpy(), linear_copy.weight.numpy()
        )
        np.testing.assert_array_equal(
            linear.weight.numpy(), linear_copy2.numpy()
        )
906 907 908 909 910 911 912 913

    def func_fp16_initilaizer(self):
        paddle.set_default_dtype("float16")
        linear1 = paddle.nn.Linear(1, 3, bias_attr=False)
        linear2 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
914 915
            weight_attr=paddle.fluid.initializer.Uniform(),
        )
916 917 918 919
        linear3 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
920 921
            weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer(),
        )
922 923 924 925
        linear4 = paddle.nn.Linear(
            1,
            3,
            bias_attr=False,
926 927
            weight_attr=paddle.fluid.initializer.MSRAInitializer(),
        )
928
        res = [
929 930 931
            linear1.weight.numpy(),
            linear2.weight.numpy(),
            linear3.weight.numpy(),
932
            linear4.weight.numpy(),
933 934 935 936 937
        ]
        paddle.set_default_dtype("float32")
        return res

    def func_layer_helper_base(self, value):
938
        base = paddle.fluid.layer_helper_base.LayerHelperBase(
939 940
            "test_layer", "test_layer"
        )
941 942 943 944 945
        return base.to_variable(value).numpy()

    def func_base_to_variable(self, value):
        paddle.fluid.dygraph.base.to_variable(value)

946
    def test_backward_with_single_tensor(self):
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
        arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
        egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
        egr_tensor12.retain_grads()
        arr = np.ones([4, 16, 16, 32]).astype('float32')
        self.assertEqual(egr_tensor12.persistable, False)
        self.assertTrue("generated_tensor" in egr_tensor12.name)
        self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
        self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
        self.assertEqual(egr_tensor12.stop_gradient, True)
        self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
        np.testing.assert_array_equal(egr_tensor12.numpy(), arr4)
        np.testing.assert_array_equal(egr_tensor12.gradient(), None)
        egr_tensor12.stop_gradient = False
        egr_tensor12.backward()
        np.testing.assert_array_equal(egr_tensor12.gradient(), arr)
962

963
    def test_set_value(self):
964 965 966 967 968 969 970 971
        linear = paddle.nn.Linear(1, 3)
        ori_place = linear.weight.place
        new_weight = np.ones([1, 3]).astype('float32')
        self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))

        linear.weight.set_value(new_weight)
        np.testing.assert_array_equal(linear.weight.numpy(), new_weight)
        self.assertTrue(linear.weight.place._equals(ori_place))
972 973


974 975
if __name__ == "__main__":
    unittest.main()