test_assign_op.py 10.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import op_test
18
import numpy as np
Y
Yu Yang 已提交
19
import unittest
20
import paddle
21 22 23 24
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
25
from paddle.fluid.backward import append_backward
26
import paddle.fluid.framework as framework
Y
Yu Yang 已提交
27 28 29


class TestAssignOp(op_test.OpTest):
30

Y
Yu Yang 已提交
31
    def setUp(self):
C
chentianyu03 已提交
32
        self.python_api = paddle.assign
Y
Yu Yang 已提交
33
        self.op_type = "assign"
34
        x = np.random.random(size=(100, 10)).astype('float64')
Y
Yu Yang 已提交
35 36 37 38
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
39
        paddle.enable_static()
40
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
41
        self.check_output(check_eager=True)
42
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
43 44
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
45 46

    def test_backward(self):
47
        paddle.enable_static()
48
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
49
        self.check_grad(['X'], 'Out', check_eager=True)
50
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
51 52
        paddle.disable_static()
        framework._disable_legacy_dygraph()
Y
Yu Yang 已提交
53 54


55
class TestAssignFP16Op(op_test.OpTest):
56

57
    def setUp(self):
C
chentianyu03 已提交
58
        self.python_api = paddle.assign
59 60 61 62 63 64
        self.op_type = "assign"
        x = np.random.random(size=(100, 10)).astype('float16')
        self.inputs = {'X': x}
        self.outputs = {'Out': x}

    def test_forward(self):
65
        paddle.enable_static()
66
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
67
        self.check_output(check_eager=True)
68
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
69 70
        paddle.disable_static()
        framework._disable_legacy_dygraph()
71 72

    def test_backward(self):
73
        paddle.enable_static()
74
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
75
        self.check_grad(['X'], 'Out', check_eager=True)
76
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
77 78
        paddle.disable_static()
        framework._disable_legacy_dygraph()
79 80


81
class TestAssignOpWithLoDTensorArray(unittest.TestCase):
82

83
    def test_assign_LoDTensorArray(self):
84
        paddle.enable_static()
85
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
86 87 88 89 90
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
91 92 93
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
94 95 96 97 98
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = fluid.layers.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
99
            mean = paddle.mean(sums)
100
            append_backward(mean)
101
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
102

103 104
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
105 106 107 108 109 110 111 112 113
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
        self.assertTrue(np.allclose(res[0], feed_add))
        self.assertTrue(np.allclose(res[1], ones / 1000.0))
114
        paddle.disable_static()
115 116


117
class TestAssignOpError(unittest.TestCase):
118

119
    def test_errors(self):
120
        paddle.enable_static()
121 122
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
123 124
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
125 126
            self.assertRaises(TypeError, fluid.layers.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
127 128
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, fluid.layers.assign, x2)
129
        paddle.disable_static()
130 131


132
class TestAssignOApi(unittest.TestCase):
133

134
    def test_assign_LoDTensorArray(self):
135
        paddle.enable_static()
136 137 138 139 140
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program):
            x = fluid.data(name='x', shape=[100, 10], dtype='float32')
            x.stop_gradient = False
141 142 143
            y = fluid.layers.fill_constant(shape=[100, 10],
                                           dtype='float32',
                                           value=1)
144 145 146 147 148
            z = fluid.layers.elementwise_add(x=x, y=y)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            init_array = fluid.layers.array_write(x=z, i=i)
            array = paddle.assign(init_array)
            sums = fluid.layers.array_read(array=init_array, i=i)
149
            mean = paddle.mean(sums)
150 151
            append_backward(mean)

152 153
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
154 155 156 157 158 159 160 161 162
        exe = fluid.Executor(place)
        feed_x = np.random.random(size=(100, 10)).astype('float32')
        ones = np.ones((100, 10)).astype('float32')
        feed_add = feed_x + ones
        res = exe.run(main_program,
                      feed={'x': feed_x},
                      fetch_list=[sums.name, x.grad_name])
        self.assertTrue(np.allclose(res[0], feed_add))
        self.assertTrue(np.allclose(res[1], ones / 1000.0))
163
        paddle.disable_static()
164 165 166

    def test_assign_NumpyArray(self):
        with fluid.dygraph.guard():
167
            array = np.random.random(size=(100, 10)).astype(np.bool_)
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
        self.assertTrue(np.allclose(result1.numpy(), array))

    def test_assign_NumpyArray1(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.float32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
        self.assertTrue(np.allclose(result1.numpy(), array))

    def test_assign_NumpyArray2(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int32)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
        self.assertTrue(np.allclose(result1.numpy(), array))

    def test_assign_NumpyArray3(self):
        with fluid.dygraph.guard():
            array = np.random.random(size=(100, 10)).astype(np.int64)
            result1 = paddle.zeros(shape=[3, 3], dtype='float32')
            paddle.assign(array, result1)
        self.assertTrue(np.allclose(result1.numpy(), array))

193 194 195 196 197 198 199 200 201 202 203 204 205
    def test_assign_List(self):
        l = [1, 2, 3]
        result = paddle.assign(l)
        self.assertTrue(np.allclose(result.numpy(), np.array(l)))

    def test_assign_BasicTypes(self):
        result1 = paddle.assign(2)
        result2 = paddle.assign(3.0)
        result3 = paddle.assign(True)
        self.assertTrue(np.allclose(result1.numpy(), np.array([2])))
        self.assertTrue(np.allclose(result2.numpy(), np.array([3.0])))
        self.assertTrue(np.allclose(result3.numpy(), np.array([1])))

206
    def test_clone(self):
207
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
C
chentianyu03 已提交
208 209
        self.python_api = paddle.clone

210 211 212 213 214 215 216 217 218 219
        x = paddle.ones([2])
        x.stop_gradient = False
        clone_x = paddle.clone(x)

        y = clone_x**3
        y.backward()

        self.assertTrue(np.array_equal(x, [1, 1]), True)
        self.assertTrue(np.array_equal(clone_x.grad.numpy(), [3, 3]), True)
        self.assertTrue(np.array_equal(x.grad.numpy(), [3, 3]), True)
220
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
221 222 223 224 225 226 227 228 229 230 231 232
        paddle.enable_static()

        with program_guard(Program(), Program()):
            x_np = np.random.randn(2, 3).astype('float32')
            x = paddle.static.data("X", shape=[2, 3])
            clone_x = paddle.clone(x)
            exe = paddle.static.Executor()
            y_np = exe.run(paddle.static.default_main_program(),
                           feed={'X': x_np},
                           fetch_list=[clone_x])[0]

        self.assertTrue(np.array_equal(y_np, x_np), True)
233
        paddle.disable_static()
234

235 236

class TestAssignOpErrorApi(unittest.TestCase):
237

238
    def test_errors(self):
239
        paddle.enable_static()
240
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
241 242
        with program_guard(Program(), Program()):
            # The type of input must be Variable or numpy.ndarray.
243 244
            x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                         fluid.CPUPlace())
245 246
            self.assertRaises(TypeError, paddle.assign, x1)
            # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
247 248
            x2 = np.array([[2.5, 2.5]], dtype='uint8')
            self.assertRaises(TypeError, paddle.assign, x2)
249
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
250
        paddle.disable_static()
251

252 253 254 255 256 257
    def test_type_error(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
            # not support to assign list(var)
            self.assertRaises(TypeError, paddle.assign, x)
258
        paddle.disable_static()
259

260

Y
Yu Yang 已提交
261 262
if __name__ == '__main__':
    unittest.main()