“58c027cc38189114f584d7f7b732211ac523b686”上不存在“git@gitcode.net:s920243400/PaddleDetection.git”
test_assign_op.py 12.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import op_test
16
import numpy as np
Y
Yu Yang 已提交
17
import unittest
18
import paddle
19 20
import paddle.fluid.core as core
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.backward import append_backward
23
import paddle.fluid.framework as framework
24 25 26
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
Y
Yu Yang 已提交
27 28 29 30


class TestAssignOp(op_test.OpTest):
    def setUp(self):
C
chentianyu03 已提交
31
        self.python_api = paddle.assign
Y
Yu Yang 已提交
32
        self.op_type = "assign"
33
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
34 35 36 37
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
38
        paddle.enable_static()
39
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
40
        self.check_output(check_eager=True)
41
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
42 43
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
44 45

    def test_backward(self):
46
        paddle.enable_static()
47
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
48
        self.check_grad(['X'], 'Out', check_eager=True)
49
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
50 51
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
52 53


54 55
class TestAssignFP16Op(op_test.OpTest):
    def setUp(self):
C
chentianyu03 已提交
56
        self.python_api = paddle.assign
57 58 59 60 61 62
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
63
        paddle.enable_static()
64
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
65
        self.check_output(check_eager=True)
66
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
67 68
        paddle.disable_static()
        framework._disable_legacy_dygraph()
69 70

    def test_backward(self):
71
        paddle.enable_static()
72
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
73
        self.check_grad(['X'], 'Out', check_eager=True)
74
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
75 76
        paddle.disable_static()
        framework._disable_legacy_dygraph()
77 78


79 80
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
81
        paddle.enable_static()
82
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
83 84 85 86 87
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
88 89 90
            y = fluid.layers.fill_constant(
                shape=[100, 10], dtype='float32', value=1
            )
91 92 93 94 95
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
96
            mean = paddle.mean(sums)
97
            append_backward(mean)
98
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
99

100 101 102 103 104
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
105 106 107 108
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
109 110 111 112 113
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
114 115
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
116
        paddle.disable_static()
117 118


119
class TestAssignOpError(unittest.TestCase):
120
    def test_errors(self):
121
        paddle.enable_static()
122 123
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
124 125 126
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
127 128
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
129 130
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
131
        paddle.disable_static()
132 133


134 135
class TestAssignOApi(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
136
        paddle.enable_static()
137 138 139 140 141
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
142 143 144
            y = fluid.layers.fill_constant(
                shape=[100, 10], dtype='float32', value=1
            )
145 146 147 148 149
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
150
            mean = paddle.mean(sums)
151 152
            append_backward(mean)

153 154 155 156 157
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
158 159 160 161
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
162 163 164 165 166
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
167 168
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
169
        paddle.disable_static()
170 171 172

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
173
            array = np.random.random(size=(100, 10)).astype(np.bool_)
174 175
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
176
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
177 178 179 180 181 182

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
183
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
184 185 186 187 188 189

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
190
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
191 192 193 194 195 196

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
197
        np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
198

199 200 201
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
202
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
203 204 205 206 207

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
208 209 210
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
211

212
    def test_clone(self):
213
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
214 215
        self.python_api = paddle.clone

216 217 218 219 220 221 222
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

223 224 225
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
226
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
227 228 229 230 231 232 233
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
234 235 236 237 238
            y_np = exe.run(
                paddle.static.default_main_program(),
                feed={'X': x_np},
                fetch_list=[clone_x],
            )[0]
239

240
        np.testing.assert_array_equal(y_np, x_np)
241
        paddle.disable_static()
242

243 244 245

class TestAssignOpErrorApi(unittest.TestCase):
    def test_errors(self):
246
        paddle.enable_static()
247
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
248 249
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
250 251 252
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
253 254
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
255 256
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
257
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
258
        paddle.disable_static()
259

260 261 262 263 264 265
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
266
        paddle.disable_static()
267

268

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
class TestAssignDoubleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

284 285 286
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
287
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
288 289 290
        gradient_checker.double_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestAssignTripleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
        return paddle.fluid.layers.assign(x[0])

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

        data = layers.data('data', [3, 4, 5], False, dtype)
        data.persistable = True
        out = paddle.fluid.layers.assign(data)
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

316 317 318
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
319
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
320 321 322
        gradient_checker.triple_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
323 324 325 326 327 328 329 330 331 332

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Y
Yu Yang 已提交
333 334
if __name__ == '__main__':
    unittest.main()