parallel_dygraph_unused_variables.py 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import paddle

from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
from paddle.nn import Layer, Embedding


class SimpleNet(Layer):
    def __init__(self,
                 hidden_size,
                 vocab_size,
                 num_steps=20,
                 init_scale=0.1,
                 is_sparse=False,
                 dtype="float32"):
        super(SimpleNet, self).__init__()
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.init_scale = init_scale
        self.num_steps = num_steps
        self.embedding = Embedding(
            self.vocab_size,
            self.hidden_size,
40
            sparse=is_sparse,
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
            weight_attr=paddle.ParamAttr(
                name='embedding_param',
                initializer=paddle.nn.initializer.Uniform(
                    low=-init_scale, high=init_scale)))
        self.softmax_weight = self.create_parameter(
            attr=paddle.ParamAttr(),
            shape=[self.hidden_size, self.vocab_size],
            dtype=dtype,
            default_initializer=paddle.nn.initializer.Uniform(
                low=-self.init_scale, high=self.init_scale))
        self.softmax_bias = self.create_parameter(
            attr=paddle.ParamAttr(),
            shape=[self.vocab_size],
            dtype=dtype,
            default_initializer=paddle.nn.initializer.Uniform(
                low=-self.init_scale, high=self.init_scale))
        # add tmp var
        self.tmp = self.create_parameter(
            attr=paddle.ParamAttr(),
            shape=[self.vocab_size],
            dtype=dtype,
            default_initializer=paddle.nn.initializer.Uniform(
                low=-self.init_scale, high=self.init_scale))

    def forward(self, input, label):
        x_emb = self.embedding(input)
        fc = paddle.matmul(x_emb, self.softmax_weight)

        # it use stop gradient to block gradient return
        fc.stop_gradient = True
        fc = paddle.add(fc, self.softmax_bias)
        projection = paddle.reshape(fc, shape=[-1, self.vocab_size])
        loss = paddle.nn.functional.softmax_with_cross_entropy(
            logits=projection, label=label, soft_label=False)
        loss = paddle.reshape(loss, shape=[-1, self.num_steps])
        loss = paddle.mean(loss, axis=[0])
        loss = paddle.sum(loss)

        return {"loss": loss}


# global configs
batch_size = 4
batch_num = 200
hidden_size = 10
vocab_size = 1000
num_steps = 3
init_scale = 0.1


def fake_sample_reader():
    def __reader__():
        for i in range(batch_num):
            x_data = np.arange(num_steps).astype('int64')
            y_data = np.arange(1, 1 + num_steps).astype('int64')
            yield x_data, y_data

    return __reader__


class TestSparseEmbeddingUnusedVars(TestParallelDyGraphRunnerBase):
    def get_model(self):
        model = SimpleNet(
            hidden_size=hidden_size,
            vocab_size=vocab_size,
            num_steps=num_steps,
            init_scale=init_scale,
108
            is_sparse=False)
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133

        train_reader = paddle.batch(
            fake_sample_reader(), batch_size=batch_size, drop_last=True)

        optimizer = paddle.optimizer.SGD(learning_rate=0.001,
                                         parameters=model.parameters())

        return model, train_reader, optimizer

    def run_one_loop(self, model, optimizer, batch):
        x_data = np.array([x[0].reshape(3) for x in batch]).astype('int64')
        y_data = np.array([x[1].reshape(3) for x in batch]).astype('int64')
        x_data = x_data.reshape((-1, num_steps, 1))
        y_data = y_data.reshape((-1, 1))

        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)

        dy_loss = model(x, y)

        return dy_loss["loss"]


if __name__ == "__main__":
    runtime_main(TestSparseEmbeddingUnusedVars)