test_imperative_selected_rows.py 5.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
import numpy as np
import paddle.fluid.core as core
24
import paddle
25
from paddle.fluid.framework import _test_eager_guard
26 27


28
class SimpleNet(paddle.nn.Layer):
Y
Youwei Song 已提交
29 30
    def __init__(self, vocab_size, hidden_size, dtype):
        super(SimpleNet, self).__init__()
31 32 33 34 35 36 37 38 39 40 41 42
        self.emb = fluid.dygraph.Embedding(
            size=[vocab_size, hidden_size],
            dtype=dtype,
            param_attr='emb.w',
            is_sparse=True)

    def forward(self, input):
        input_emb = self.emb(input)
        return input_emb, self.emb


class TestSimpleNet(unittest.TestCase):
43
    def func_selectedrows_gradient1(self):
44 45 46 47 48 49 50
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for dtype in ["float32", "float64"]:
                for sort_sum_gradient in [True, False]:
51
                    paddle.disable_static(place)
52 53 54
                    fluid.set_flags({
                        'FLAGS_sort_sum_gradient': sort_sum_gradient
                    })
55
                    # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
56

57
                    input_word = np.array([[1, 2], [2, 1]]).astype('int64')
Z
Zhou Wei 已提交
58
                    input = paddle.to_tensor(input_word)
59

60 61 62 63 64 65
                    simplenet = SimpleNet(20, 32, dtype)
                    adam = SGDOptimizer(
                        learning_rate=0.001,
                        parameter_list=simplenet.parameters(
                        ))  # grad_clip=grad_clip
                    input_emb, emb = simplenet(input)
66

67 68
                    self.assertTrue(emb.weight.gradient() is None)
                    self.assertTrue(input_emb.gradient() is None)
69

70
                    input_emb.backward()
71 72
                    adam.minimize(input_emb)
                    self.assertTrue(emb.weight.gradient() is not None)
73

74 75
                    emb.clear_gradients()
                    self.assertTrue(emb.weight.gradient() is None)
76

77 78 79
                    input_emb.clear_gradient()
                    self.assertTrue(input_emb.gradient() is not None)
                    paddle.enable_static()
80

81
    def test_selectedrows_gradient1(self):
82
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
83 84 85
        with _test_eager_guard():
            self.func_selectedrows_gradient1()
        self.func_selectedrows_gradient1()
86
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
87 88

    def func_selectedrows_gradient2(self):
89 90 91 92 93 94 95
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for sort_sum_gradient in [True, False]:
                with fluid.dygraph.guard(place):
96 97 98
                    fluid.set_flags({
                        'FLAGS_sort_sum_gradient': sort_sum_gradient
                    })
99
                    grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
100

Y
Youwei Song 已提交
101
                    input_word = np.array([[1, 2], [2, 1]]).astype('int64')
102 103
                    input = to_variable(input_word)

Y
Youwei Song 已提交
104
                    simplenet = SimpleNet(20, 32, "float32")
105 106
                    adam = SGDOptimizer(
                        learning_rate=0.001,
107 108
                        parameter_list=simplenet.parameters(),
                        grad_clip=grad_clip)
109 110
                    input_emb, emb = simplenet(input)

111 112
                    self.assertTrue(emb.weight.gradient() is None)
                    self.assertTrue(input_emb.gradient() is None)
113

114
                    input_emb.backward()
115
                    adam.minimize(input_emb)
116
                    self.assertTrue(emb.weight.gradient() is not None)
117 118

                    emb.clear_gradients()
119
                    self.assertTrue(emb.weight.gradient() is None)
120 121

                    input_emb.clear_gradient()
122
                    self.assertTrue(input_emb.gradient() is not None)
123

124
    def test_selectedrows_gradient2(self):
125
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
126 127 128
        with _test_eager_guard():
            self.func_selectedrows_gradient2()
        self.func_selectedrows_gradient2()
129
        fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
130

131 132 133

if __name__ == '__main__':
    unittest.main()