test_assign_op.py 13.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import op_test
16
import numpy as np
Y
Yu Yang 已提交
17
import unittest
18
import paddle
19 20
import paddle.fluid.core as core
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.backward import append_backward
23
import paddle.fluid.framework as framework
24 25 26
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
Y
Yu Yang 已提交
27 28 29


class TestAssignOp(op_test.OpTest):
30

Y
Yu Yang 已提交
31
    def setUp(self):
C
chentianyu03 已提交
32
        self.python_api = paddle.assign
Y
Yu Yang 已提交
33
        self.op_type = "assign"
34
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
35 36 37 38
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
39
        paddle.enable_static()
40
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
41
        self.check_output(check_eager=True)
42
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
43 44
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
45 46

    def test_backward(self):
47
        paddle.enable_static()
48
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
49
        self.check_grad(['X'], 'Out', check_eager=True)
50
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
51 52
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
53 54


55
class TestAssignFP16Op(op_test.OpTest):
56

57
    def setUp(self):
C
chentianyu03 已提交
58
        self.python_api = paddle.assign
59 60 61 62 63 64
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
65
        paddle.enable_static()
66
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
67
        self.check_output(check_eager=True)
68
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
69 70
        paddle.disable_static()
        framework._disable_legacy_dygraph()
71 72

    def test_backward(self):
73
        paddle.enable_static()
74
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
75
        self.check_grad(['X'], 'Out', check_eager=True)
76
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
77 78
        paddle.disable_static()
        framework._disable_legacy_dygraph()
79 80


81
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
82

83
    def test_assign_LoDTensorArray(self):
84
        paddle.enable_static()
85
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
86 87 88 89 90
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
91 92 93
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
94 95 96 97 98
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
99
            mean = paddle.mean(sums)
100
            append_backward(mean)
101
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
102

103 104
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
105 106 107 108 109 110 111
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
112 113
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
114
        paddle.disable_static()
115 116


117
class TestAssignOpError(unittest.TestCase):
118

119
    def test_errors(self):
120
        paddle.enable_static()
121 122
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
123 124
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
125 126
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
127 128
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
129
        paddle.disable_static()
130 131


132
class TestAssignOApi(unittest.TestCase):
133

134
    def test_assign_LoDTensorArray(self):
135
        paddle.enable_static()
136 137 138 139 140
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
141 142 143
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
144 145 146 147 148
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
149
            mean = paddle.mean(sums)
150 151
            append_backward(mean)

152 153
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
154 155 156 157 158 159 160
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
161 162
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
163
        paddle.disable_static()
164 165 166

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
167
            array = np.random.random(size=(100, 10)).astype(np.bool_)
168 169
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
170
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
171 172 173 174 175 176

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
177
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
178 179 180 181 182 183

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
184
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
185 186 187 188 189 190

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
191
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
192

193 194 195
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
196
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
197 198 199 200 201

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
202 203 204
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
205

206
    def test_clone(self):
207
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
208 209
        self.python_api = paddle.clone

210 211 212 213 214 215 216
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

217 218 219
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
220
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
221 222 223 224 225 226 227 228 229 230 231
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
            y_np = exe.run(paddle.static.default_main_program(),
                           feed={'X': x_np},
                           fetch_list=[clone_x])[0]

232
        np.testing.assert_array_equal(y_np, x_np)
233
        paddle.disable_static()
234

235 236

class TestAssignOpErrorApi(unittest.TestCase):
237

238
    def test_errors(self):
239
        paddle.enable_static()
240
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
241 242
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
243 244
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
245 246
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
247 248
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
249
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
250
        paddle.disable_static()
251

252 253 254 255 256 257
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
258
        paddle.disable_static()
259

260

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
class TestAssignDoubleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestAssignTripleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Y
Yu Yang 已提交
335 336
if __name__ == '__main__':
    unittest.main()