test_assign_op.py 13.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import op_test
16
import numpy as np
Y
Yu Yang 已提交
17
import unittest
18
import paddle
19 20 21 22
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
23
from paddle.fluid.backward import append_backward
24
import paddle.fluid.framework as framework
25 26 27
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
Y
Yu Yang 已提交
28 29 30


class TestAssignOp(op_test.OpTest):
31

Y
Yu Yang 已提交
32
    def setUp(self):
C
chentianyu03 已提交
33
        self.python_api = paddle.assign
Y
Yu Yang 已提交
34
        self.op_type = "assign"
35
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
36 37 38 39
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
40
        paddle.enable_static()
41
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
42
        self.check_output(check_eager=True)
43
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
44 45
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
46 47

    def test_backward(self):
48
        paddle.enable_static()
49
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
50
        self.check_grad(['X'], 'Out', check_eager=True)
51
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
52 53
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
54 55


56
class TestAssignFP16Op(op_test.OpTest):
57

58
    def setUp(self):
C
chentianyu03 已提交
59
        self.python_api = paddle.assign
60 61 62 63 64 65
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
66
        paddle.enable_static()
67
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
68
        self.check_output(check_eager=True)
69
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
70 71
        paddle.disable_static()
        framework._disable_legacy_dygraph()
72 73

    def test_backward(self):
74
        paddle.enable_static()
75
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
76
        self.check_grad(['X'], 'Out', check_eager=True)
77
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
78 79
        paddle.disable_static()
        framework._disable_legacy_dygraph()
80 81


82
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
83

84
    def test_assign_LoDTensorArray(self):
85
        paddle.enable_static()
86
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
87 88 89 90 91
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
92 93 94
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
95 96 97 98 99
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
100
            mean = paddle.mean(sums)
101
            append_backward(mean)
102
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
103

104 105
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
106 107 108 109 110 111 112
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
113 114
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
115
        paddle.disable_static()
116 117


118
class TestAssignOpError(unittest.TestCase):
119

120
    def test_errors(self):
121
        paddle.enable_static()
122 123
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
124 125
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
126 127
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
128 129
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
130
        paddle.disable_static()
131 132


133
class TestAssignOApi(unittest.TestCase):
134

135
    def test_assign_LoDTensorArray(self):
136
        paddle.enable_static()
137 138 139 140 141
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
142 143 144
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
145 146 147 148 149
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
150
            mean = paddle.mean(sums)
151 152
            append_backward(mean)

153 154
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
155 156 157 158 159 160 161
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
162 163
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
164
        paddle.disable_static()
165 166 167

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
168
            array = np.random.random(size=(100, 10)).astype(np.bool_)
169 170
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
171
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
172 173 174 175 176 177

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
178
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
179 180 181 182 183 184

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
185
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
186 187 188 189 190 191

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
192
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
193

194 195 196
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
197
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
198 199 200 201 202

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
203 204 205
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
206

207
    def test_clone(self):
208
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
209 210
        self.python_api = paddle.clone

211 212 213 214 215 216 217
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

218 219 220
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
221
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
222 223 224 225 226 227 228 229 230 231 232
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
            y_np = exe.run(paddle.static.default_main_program(),
                           feed={'X': x_np},
                           fetch_list=[clone_x])[0]

233
        np.testing.assert_array_equal(y_np, x_np)
234
        paddle.disable_static()
235

236 237

class TestAssignOpErrorApi(unittest.TestCase):
238

239
    def test_errors(self):
240
        paddle.enable_static()
241
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
242 243
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
244 245
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
246 247
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
248 249
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
250
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
251
        paddle.disable_static()
252

253 254 255 256 257 258
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
259
        paddle.disable_static()
260

261

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
class TestAssignDoubleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestAssignTripleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Y
Yu Yang 已提交
336 337
if __name__ == '__main__':
    unittest.main()