test_imperative_selected_rows.py 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
import numpy as np
import paddle.fluid.core as core


class SimpleNet(fluid.Layer):
Y
Youwei Song 已提交
27 28
    def __init__(self, vocab_size, hidden_size, dtype):
        super(SimpleNet, self).__init__()
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
        self.emb = fluid.dygraph.Embedding(
            size=[vocab_size, hidden_size],
            dtype=dtype,
            param_attr='emb.w',
            is_sparse=True)

    def forward(self, input):
        input_emb = self.emb(input)
        return input_emb, self.emb


class TestSimpleNet(unittest.TestCase):
    def test_selectedrows_gradient1(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for dtype in ["float32", "float64"]:
                for sort_sum_gradient in [True, False]:
                    with fluid.dygraph.guard(place):
                        backward_strategy = fluid.dygraph.BackwardStrategy()
                        backward_strategy.sort_sum_gradient = sort_sum_gradient
                        # grad_clip = fluid.dygraph_grad_clip.GradClipByGlobalNorm(5.0)

Y
Youwei Song 已提交
54
                        input_word = np.array([[1, 2], [2, 1]]).astype('int64')
55 56
                        input = to_variable(input_word)

Y
Youwei Song 已提交
57
                        simplenet = SimpleNet(20, 32, dtype)
58 59 60
                        adam = SGDOptimizer(
                            learning_rate=0.001,
                            parameter_list=simplenet.parameters())
61 62 63
                        input_emb, emb = simplenet(input)

                        try:
64
                            emb.weight.gradient()
65
                        except ValueError as e:
66 67
                            assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                                e)
68 69 70
                        try:
                            input_emb.gradient()
                        except ValueError as e:
71 72
                            assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                                e)
73 74 75

                        input_emb.backward(backward_strategy)
                        adam.minimize(input_emb)  # grad_clip=grad_clip
76
                        emb.weight.gradient()
77 78 79

                        emb.clear_gradients()
                        try:
80
                            emb.weight.gradient()
81
                        except ValueError as e:
82 83
                            assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                                e)
84 85

                        input_emb.clear_gradient()
86
                        input_emb.gradient()
87 88 89 90 91 92 93 94 95 96 97 98 99 100

    def test_selectedrows_gradient2(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for sort_sum_gradient in [True, False]:
                with fluid.dygraph.guard(place):
                    backward_strategy = fluid.dygraph.BackwardStrategy()
                    backward_strategy.sort_sum_gradient = sort_sum_gradient
                    grad_clip = fluid.dygraph_grad_clip.GradClipByGlobalNorm(
                        5.0)

Y
Youwei Song 已提交
101
                    input_word = np.array([[1, 2], [2, 1]]).astype('int64')
102 103
                    input = to_variable(input_word)

Y
Youwei Song 已提交
104
                    simplenet = SimpleNet(20, 32, "float32")
105 106 107
                    adam = SGDOptimizer(
                        learning_rate=0.001,
                        parameter_list=simplenet.parameters())
108 109 110
                    input_emb, emb = simplenet(input)

                    try:
111
                        emb.weight.gradient()
112
                    except ValueError as e:
113 114
                        assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                            e)
115 116 117
                    try:
                        input_emb.gradient()
                    except ValueError as e:
118 119
                        assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                            e)
120 121 122

                    input_emb.backward(backward_strategy)
                    adam.minimize(input_emb, grad_clip=grad_clip)
123
                    emb.weight.gradient()
124 125 126

                    emb.clear_gradients()
                    try:
127
                        emb.weight.gradient()
128
                    except ValueError as e:
129 130
                        assert "has no grad, Please set Variable.stop_gradient=False, or check if this is the first and only variable need grad, if so, please set its pre-Variable's stop_gradient=False, to make sure it has gradient" in str(
                            e)
131 132

                    input_emb.clear_gradient()
133
                    input_emb.gradient()
134 135 136 137


if __name__ == '__main__':
    unittest.main()