test_assign_op.py 12.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yu Yang 已提交
15
import unittest
16 17 18 19 20 21

import gradient_checker
import numpy as np
import op_test
from decorator_helper import prog_scope

22
import paddle
23
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25
import paddle.fluid.framework as framework
26
import paddle.fluid.layers as layers
27 28
from paddle.fluid import Program, program_guard
from paddle.fluid.backward import append_backward
Y
Yu Yang 已提交
29 30 31 32


class TestAssignOp(op_test.OpTest):
    def setUp(self):
C
chentianyu03 已提交
33
        self.python_api = paddle.assign
Y
Yu Yang 已提交
34
        self.op_type = "assign"
35
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
36 37 38 39
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
40
        paddle.enable_static()
41
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
42
        self.check_output(check_eager=True)
43
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
44 45
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
46 47

    def test_backward(self):
48
        paddle.enable_static()
49
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
50
        self.check_grad(['X'], 'Out', check_eager=True)
51
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
52 53
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
54 55


56 57
class TestAssignFP16Op(op_test.OpTest):
    def setUp(self):
C
chentianyu03 已提交
58
        self.python_api = paddle.assign
59 60 61 62 63 64
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
65
        paddle.enable_static()
66
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
67
        self.check_output(check_eager=True)
68
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
69 70
        paddle.disable_static()
        framework._disable_legacy_dygraph()
71 72

    def test_backward(self):
73
        paddle.enable_static()
74
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
75
        self.check_grad(['X'], 'Out', check_eager=True)
76
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
77 78
        paddle.disable_static()
        framework._disable_legacy_dygraph()
79 80


81 82
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
83
        paddle.enable_static()
84
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
85 86 87 88 89
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
90 91 92
            y = fluid.layers.fill_constant(
                shape=[100, 10], dtype='float32', value=1
            )
93 94 95 96 97
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
98
            mean = paddle.mean(sums)
99
            append_backward(mean)
100
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
101

102 103 104 105 106
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
107 108 109 110
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
111 112 113 114 115
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
116 117
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
118
        paddle.disable_static()
119 120


121
class TestAssignOpError(unittest.TestCase):
122
    def test_errors(self):
123
        paddle.enable_static()
124 125
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
126 127 128
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
129 130
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
131 132
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
133
        paddle.disable_static()
134 135


136 137
class TestAssignOApi(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
138
        paddle.enable_static()
139 140 141 142 143
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
144 145 146
            y = fluid.layers.fill_constant(
                shape=[100, 10], dtype='float32', value=1
            )
147 148 149 150 151
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
152
            mean = paddle.mean(sums)
153 154
            append_backward(mean)

155 156 157 158 159
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
160 161 162 163
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
164 165 166 167 168
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
169 170
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
171
        paddle.disable_static()
172 173 174

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
175
            array = np.random.random(size=(100, 10)).astype(np.bool_)
176 177
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
178
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
179 180 181 182 183 184

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
185
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
186 187 188 189 190 191

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
192
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
193 194 195 196 197 198

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
199
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
200

201 202 203
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
204
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
205 206 207 208 209

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
210 211 212
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
213

214
    def test_clone(self):
215
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
216 217
        self.python_api = paddle.clone

218 219 220 221 222 223 224
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

225 226 227
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
228
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
229 230 231 232 233 234 235
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
236 237 238 239 240
            y_np = exe.run(
                paddle.static.default_main_program(),
                feed={'X': x_np},
                fetch_list=[clone_x],
            )[0]
241

242
        np.testing.assert_array_equal(y_np, x_np)
243
        paddle.disable_static()
244

245 246 247

class TestAssignOpErrorApi(unittest.TestCase):
    def test_errors(self):
248
        paddle.enable_static()
249
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
250 251
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
252 253 254
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
255 256
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
257 258
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
259
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
260
        paddle.disable_static()
261

262 263 264 265 266 267
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
268
        paddle.disable_static()
269

270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
class TestAssignDoubleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

286 287 288
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
289
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
290 291 292
        gradient_checker.double_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestAssignTripleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

318 319 320
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
321
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
322 323 324
        gradient_checker.triple_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
325 326 327 328 329 330 331 332 333 334

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Y
Yu Yang 已提交
335 336
if __name__ == '__main__':
    unittest.main()