test_assign_op.py 13.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import op_test
18
import numpy as np
Y
Yu Yang 已提交
19
import unittest
20
import paddle
21 22 23 24
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
25
from paddle.fluid.backward import append_backward
26
import paddle.fluid.framework as framework
27 28 29
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
Y
Yu Yang 已提交
30 31 32


class TestAssignOp(op_test.OpTest):
33

Y
Yu Yang 已提交
34
    def setUp(self):
C
chentianyu03 已提交
35
        self.python_api = paddle.assign
Y
Yu Yang 已提交
36
        self.op_type = "assign"
37
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
38 39 40 41
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
42
        paddle.enable_static()
43
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
44
        self.check_output(check_eager=True)
45
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
46 47
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
48 49

    def test_backward(self):
50
        paddle.enable_static()
51
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
52
        self.check_grad(['X'], 'Out', check_eager=True)
53
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
54 55
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
56 57


58
class TestAssignFP16Op(op_test.OpTest):
59

60
    def setUp(self):
C
chentianyu03 已提交
61
        self.python_api = paddle.assign
62 63 64 65 66 67
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
68
        paddle.enable_static()
69
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
70
        self.check_output(check_eager=True)
71
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
72 73
        paddle.disable_static()
        framework._disable_legacy_dygraph()
74 75

    def test_backward(self):
76
        paddle.enable_static()
77
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
78
        self.check_grad(['X'], 'Out', check_eager=True)
79
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
80 81
        paddle.disable_static()
        framework._disable_legacy_dygraph()
82 83


84
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
85

86
    def test_assign_LoDTensorArray(self):
87
        paddle.enable_static()
88
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
89 90 91 92 93
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
94 95 96
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
97 98 99 100 101
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
102
            mean = paddle.mean(sums)
103
            append_backward(mean)
104
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
105

106 107
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
108 109 110 111 112 113 114
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
115 116
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
117
        paddle.disable_static()
118 119


120
class TestAssignOpError(unittest.TestCase):
121

122
    def test_errors(self):
123
        paddle.enable_static()
124 125
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
126 127
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
128 129
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
130 131
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
132
        paddle.disable_static()
133 134


135
class TestAssignOApi(unittest.TestCase):
136

137
    def test_assign_LoDTensorArray(self):
138
        paddle.enable_static()
139 140 141 142 143
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
144 145 146
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
147 148 149 150 151
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
152
            mean = paddle.mean(sums)
153 154
            append_backward(mean)

155 156
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
157 158 159 160 161 162 163
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
164 165
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
166
        paddle.disable_static()
167 168 169

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
170
            array = np.random.random(size=(100, 10)).astype(np.bool_)
171 172
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
173
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
174 175 176 177 178 179

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
180
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
181 182 183 184 185 186

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
187
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
188 189 190 191 192 193

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
194
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
195

196 197 198
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
199
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
200 201 202 203 204

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
205 206 207
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
208

209
    def test_clone(self):
210
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
211 212
        self.python_api = paddle.clone

213 214 215 216 217 218 219
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

220 221 222
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
223
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
224 225 226 227 228 229 230 231 232 233 234
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
            y_np = exe.run(paddle.static.default_main_program(),
                           feed={'X': x_np},
                           fetch_list=[clone_x])[0]

235
        np.testing.assert_array_equal(y_np, x_np)
236
        paddle.disable_static()
237

238 239

class TestAssignOpErrorApi(unittest.TestCase):
240

241
    def test_errors(self):
242
        paddle.enable_static()
243
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
244 245
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
246 247
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
248 249
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
250 251
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
252
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
253
        paddle.disable_static()
254

255 256 257 258 259 260
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
261
        paddle.disable_static()
262

263

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
class TestAssignDoubleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.double_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestAssignTripleGradCheck(unittest.TestCase):

    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

        gradient_checker.triple_grad_check([data],
                                           out,
                                           x_init=[data_arr],
                                           place=place,
                                           eps=eps)
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
        gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper,
                                                       [data],
                                                       out,
                                                       x_init=[data_arr],
                                                       place=place)

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Y
Yu Yang 已提交
338 339
if __name__ == '__main__':
    unittest.main()