test_imperative_selected_rows.py 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.optimizer import SGDOptimizer
import numpy as np
import paddle.fluid.core as core
21
import paddle
22
from paddle.fluid.framework import _test_eager_guard
23 24


25
class SimpleNet(paddle.nn.Layer):
Y
Youwei Song 已提交
26
    def __init__(self, vocab_size, hidden_size, dtype):
27
        super().__init__()
28 29 30 31 32 33
        self.emb = fluid.dygraph.Embedding(
            size=[vocab_size, hidden_size],
            dtype=dtype,
            param_attr='emb.w',
            is_sparse=True,
        )
34 35 36 37 38 39 40

    def forward(self, input):
        input_emb = self.emb(input)
        return input_emb, self.emb


class TestSimpleNet(unittest.TestCase):
41
    def func_selectedrows_gradient1(self):
42 43 44 45 46 47 48
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for dtype in ["float32", "float64"]:
                for sort_sum_gradient in [True, False]:
49
                    paddle.disable_static(place)
50
                    fluid.set_flags(
51 52
                        {'FLAGS_sort_sum_gradient': sort_sum_gradient}
                    )
53
                    # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
54

55
                    input_word = np.array([[1, 2], [2, 1]]).astype('int64')
Z
Zhou Wei 已提交
56
                    input = paddle.to_tensor(input_word)
57

58
                    simplenet = SimpleNet(20, 32, dtype)
59 60 61 62
                    adam = SGDOptimizer(
                        learning_rate=0.001,
                        parameter_list=simplenet.parameters(),
                    )  # grad_clip=grad_clip
63
                    input_emb, emb = simplenet(input)
64

65 66
                    self.assertIsNone(emb.weight.gradient())
                    self.assertIsNone(input_emb.gradient())
67

68
                    input_emb.backward()
69
                    adam.minimize(input_emb)
70
                    self.assertIsNotNone(emb.weight.gradient())
71

72
                    emb.clear_gradients()
73
                    self.assertIsNone(emb.weight.gradient())
74

75
                    input_emb.clear_gradient()
76
                    self.assertIsNotNone(input_emb.gradient())
77
                    paddle.enable_static()
78

79
    def test_selectedrows_gradient1(self):
80
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
81 82 83
        with _test_eager_guard():
            self.func_selectedrows_gradient1()
        self.func_selectedrows_gradient1()
84
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
85 86

    def func_selectedrows_gradient2(self):
87 88 89 90 91 92 93
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for sort_sum_gradient in [True, False]:
                with fluid.dygraph.guard(place):
94
                    fluid.set_flags(
95 96
                        {'FLAGS_sort_sum_gradient': sort_sum_gradient}
                    )
97
                    grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
98

Y
Youwei Song 已提交
99
                    input_word = np.array([[1, 2], [2, 1]]).astype('int64')
100 101
                    input = to_variable(input_word)

Y
Youwei Song 已提交
102
                    simplenet = SimpleNet(20, 32, "float32")
103 104 105 106 107
                    adam = SGDOptimizer(
                        learning_rate=0.001,
                        parameter_list=simplenet.parameters(),
                        grad_clip=grad_clip,
                    )
108 109
                    input_emb, emb = simplenet(input)

110 111
                    self.assertIsNone(emb.weight.gradient())
                    self.assertIsNone(input_emb.gradient())
112

113
                    input_emb.backward()
114
                    adam.minimize(input_emb)
115
                    self.assertIsNotNone(emb.weight.gradient())
116 117

                    emb.clear_gradients()
118
                    self.assertIsNone(emb.weight.gradient())
119 120

                    input_emb.clear_gradient()
121
                    self.assertIsNotNone(input_emb.gradient())
122

123
    def test_selectedrows_gradient2(self):
124
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
125 126 127
        with _test_eager_guard():
            self.func_selectedrows_gradient2()
        self.func_selectedrows_gradient2()
128
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
129

130 131 132

if __name__ == '__main__':
    unittest.main()