test_assign_op.py 12.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yu Yang 已提交
15
import unittest
16

W
wanghuancoder 已提交
17
import eager_op_test
18 19 20
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
21
from eager_op_test import convert_float_to_uint16, convert_uint16_to_float
22

23
import paddle
24 25
from paddle import fluid
from paddle.fluid import Program, core, program_guard
26
from paddle.fluid.backward import append_backward
Y
Yu Yang 已提交
27 28


W
wanghuancoder 已提交
29
class TestAssignOp(eager_op_test.OpTest):
Y
Yu Yang 已提交
30
    def setUp(self):
C
chentianyu03 已提交
31
        self.python_api = paddle.assign
32
        self.public_python_api = paddle.assign
Y
Yu Yang 已提交
33
        self.op_type = "assign"
34
        self.prim_op_type = "prim"
35
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
36 37 38 39
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
40
        paddle.enable_static()
W
wanghuancoder 已提交
41
        self.check_output()
42
        paddle.disable_static()
Y
Yu Yang 已提交
43 44

    def test_backward(self):
45
        paddle.enable_static()
W
wanghuancoder 已提交
46
        self.check_grad(['X'], 'Out', check_prim=True)
47
        paddle.disable_static()
Y
Yu Yang 已提交
48 49


50 51 52
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
W
wanghuancoder 已提交
53
class TestAssignFP16Op(eager_op_test.OpTest):
54
    def setUp(self):
C
chentianyu03 已提交
55
        self.python_api = paddle.assign
56
        self.public_python_api = paddle.assign
57
        self.op_type = "assign"
58
        self.prim_op_type = "prim"
59 60 61 62 63
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
64
        paddle.enable_static()
W
wanghuancoder 已提交
65
        self.check_output()
66
        paddle.disable_static()
67 68

    def test_backward(self):
69
        paddle.enable_static()
W
wanghuancoder 已提交
70
        self.check_grad(['X'], 'Out', check_prim=True)
71
        paddle.disable_static()
72 73


74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "BFP16 test runs only on GPU"
)
class TestAssignBFP16Op(eager_op_test.OpTest):
    def setUp(self):
        self.python_api = paddle.assign
        self.public_python_api = paddle.assign
        self.op_type = "assign"
        self.prim_op_type = "prim"
        x = np.random.uniform(0, 1, [100, 10]).astype(np.float32)
        x = convert_float_to_uint16(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
        paddle.enable_static()
        self.check_output()
        paddle.disable_static()

    def test_backward(self):
        paddle.enable_static()
        self.check_grad(['X'], 'Out', check_prim=True)
        paddle.disable_static()


99 100
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
101
        paddle.enable_static()
102 103 104
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
105
            x = paddle.static.data(name='x', shape=[100, 10], dtype='float32')
106
            x.stop_gradient = False
107
            y = paddle.tensor.fill_constant(
108 109
                shape=[100, 10], dtype='float32', value=1
            )
110
            z = paddle.add(x=x, y=y)
111
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
112
            init_array = paddle.tensor.array_write(x=z, i=i)
113
            array = paddle.assign(init_array)
114
            sums = paddle.tensor.array_read(array=init_array, i=i)
115
            mean = paddle.mean(sums)
116 117
            append_backward(mean)

118 119 120 121 122
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
123 124 125 126
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
127 128 129 130 131
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
132 133
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
134
        paddle.disable_static()
135 136


137
class TestAssignOpError(unittest.TestCase):
138
    def test_errors(self):
139
        paddle.enable_static()
140 141
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
142 143 144
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
145
            self.assertRaises(TypeError, paddle.assign, x1)
146
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
147
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
148
            self.assertRaises(TypeError, paddle.assign, x2)
149
        paddle.disable_static()
150 151


152 153
class TestAssignOApi(unittest.TestCase):
    def test_assign_LoDTensorArray(self):
154
        paddle.enable_static()
155 156 157
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
158
            x = paddle.static.data(name='x', shape=[100, 10], dtype='float32')
159
            x.stop_gradient = False
160
            y = paddle.tensor.fill_constant(
161 162
                shape=[100, 10], dtype='float32', value=1
            )
163
            z = paddle.add(x=x, y=y)
164
            i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
165
            init_array = paddle.tensor.array_write(x=z, i=i)
166
            array = paddle.assign(init_array)
167
            sums = paddle.tensor.array_read(array=init_array, i=i)
168
            mean = paddle.mean(sums)
169 170
            append_backward(mean)

171 172 173 174 175
        place = (
            fluid.CUDAPlace(0)
            if core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
176 177 178 179
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
180 181 182 183 184
        res = exe.run(
            main_program,
            feed={'x': feed_x},
            fetch_list=[sums.name, x.grad_name],
        )
185 186
        np.testing.assert_allclose(res[0], feed_add, rtol=1e-05)
        np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05)
187
        paddle.disable_static()
188 189

    def test_assign_NumpyArray(self):
190 191 192 193 194 195
        for dtype in [np.bool_, np.float32, np.int32, np.int64]:
            with fluid.dygraph.guard():
                array = np.random.random(size=(100, 10)).astype(dtype)
                result1 = paddle.zeros(shape=[3, 3], dtype='float32')
                paddle.assign(array, result1)
            np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05)
196

197 198 199
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
200
        np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05)
201 202 203 204 205

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
206 207 208
        np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05)
        np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05)
        np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05)
209

210
    def test_clone(self):
C
chentianyu03 已提交
211 212
        self.python_api = paddle.clone

213 214
        x = paddle.ones([2])
        x.stop_gradient = False
姜永久 已提交
215
        x.retain_grads()
216
        clone_x = paddle.clone(x)
姜永久 已提交
217
        clone_x.retain_grads()
218 219 220 221

        y = clone_x**3
        y.backward()

222 223 224
        np.testing.assert_array_equal(x, [1, 1])
        np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
        np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
225 226 227 228 229 230 231
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
232 233 234 235 236
            y_np = exe.run(
                paddle.static.default_main_program(),
                feed={'X': x_np},
                fetch_list=[clone_x],
            )[0]
237

238
        np.testing.assert_array_equal(y_np, x_np)
239
        paddle.disable_static()
240

241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
@unittest.skipIf(
    not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
class TestAssignOApiFP16(unittest.TestCase):
    def test_assign_fp16(self):
        x = np.random.uniform(0, 10, [3, 3]).astype(np.float16)
        x = paddle.to_tensor(x)
        result = paddle.zeros(shape=[3, 3], dtype='float16')
        paddle.assign(x, result)
        np.testing.assert_equal(result.numpy(), x.numpy())

    def test_assign_bfp16(self):
        x_f = np.random.uniform(0, 10, [3, 3]).astype(np.float32)
        x = convert_float_to_uint16(x_f)
        x = paddle.to_tensor(x)
        result = paddle.zeros(shape=[3, 3], dtype='bfloat16')
        paddle.assign(x, result)
        np.testing.assert_allclose(
            convert_uint16_to_float(result.numpy()), x_f, rtol=1e-02
        )
        np.testing.assert_equal(
            convert_uint16_to_float(result.numpy()), convert_uint16_to_float(x)
        )


267 268
class TestAssignOpErrorApi(unittest.TestCase):
    def test_errors(self):
269
        paddle.enable_static()
270 271
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
272 273 274
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
275 276
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
277 278
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
279
        paddle.disable_static()
280

281 282 283 284 285 286
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
287
        paddle.disable_static()
288

289

290 291
class TestAssignDoubleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
292
        return paddle.assign(x[0])
293 294 295 296 297 298 299

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
300
        data = paddle.static.data('data', [3, 4, 5], dtype)
301
        data.persistable = True
302
        out = paddle.assign(data)
303 304
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

305 306 307 308 309 310
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
311 312 313 314 315 316 317 318

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)
319
        paddle.disable_static()
320 321 322 323


class TestAssignTripleGradCheck(unittest.TestCase):
    def assign_wrapper(self, x):
324
        return paddle.assign(x[0])
325 326 327 328 329 330 331

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
332
        data = paddle.static.data('data', [3, 4, 5], dtype)
333
        data.persistable = True
334
        out = paddle.assign(data)
335 336
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

337 338 339 340 341 342
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.assign_wrapper, [data], out, x_init=[data_arr], place=place
        )
343 344 345 346 347 348 349 350

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)
351
        paddle.disable_static()
352 353


Y
Yu Yang 已提交
354 355
if __name__ == '__main__':
    unittest.main()