test_nce.py 10.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
16 17 18 19

import numpy as np
from op_test import OpTest

20
import paddle
21 22
import paddle.fluid as fluid
import paddle.fluid.initializer as initializer
23
from paddle.fluid import Program, program_guard
24

W
wanghaoshuang 已提交
25

26 27 28
def nce(
    input, weight, bias, sample_weight, labels, num_classes, num_sample_class
):
W
wanghaoshuang 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41
    samples = []
    sample_labels = []
    batch_size = input.shape[0]
    num_true_class = labels.shape[1]
    for i in range(batch_size):
        w = 1 if sample_weight is None else sample_weight[i]
        for label in labels[i]:
            samples.append((i, label, True, w))
            sample_labels.append(label)
        for num in range(num_sample_class):
            samples.append((i, num, False, w))
            sample_labels.append(num)
    # forward bias
W
wanghaoshuang 已提交
42
    sample_out = np.zeros(len(samples)).astype(np.float32)
W
wanghaoshuang 已提交
43 44
    if bias is not None:
        for i in range(len(samples)):
W
wanghaoshuang 已提交
45
            sample_out[i] = bias[samples[i][1]]
W
wanghaoshuang 已提交
46 47
    # forward weight
    for i in range(len(samples)):
W
wanghaoshuang 已提交
48
        sample_out[i] += np.dot(input[samples[i][0]], weight[samples[i][1]])
W
wanghaoshuang 已提交
49 50

    # forward activation
W
wanghaoshuang 已提交
51
    sample_out = 1.0 / (1.0 + np.exp(-sample_out))
W
wanghaoshuang 已提交
52 53 54 55
    # forward cost
    out = np.zeros(batch_size).astype(np.float32)
    b = 1.0 / num_classes * num_sample_class
    for i in range(len(samples)):
W
wanghaoshuang 已提交
56
        o = sample_out[i]
W
wanghaoshuang 已提交
57 58
        cost = -np.log(o / (o + b)) if samples[i][2] else -np.log(b / (o + b))
        out[samples[i][0]] += cost * samples[i][3]
59 60 61 62 63 64 65 66 67
    return (
        out[:, np.newaxis],
        np.array(sample_out).reshape(
            batch_size, num_sample_class + num_true_class
        ),
        np.array(sample_labels).reshape(
            batch_size, num_sample_class + num_true_class
        ),
    )
W
wanghaoshuang 已提交
68 69 70


class TestNCE(OpTest):
71 72 73 74 75 76 77 78 79
    def generate_data(
        self,
        dim,
        batch_size,
        num_classes,
        num_true_class,
        num_neg_samples,
        is_sparse,
    ):
W
wanghaoshuang 已提交
80 81 82 83
        input = np.random.randn(batch_size, dim).astype(np.float32)
        weight = np.random.randn(num_classes, dim).astype(np.float32)
        bias = np.random.randn(num_classes).astype(np.float32)
        sample_weight = np.random.randn(batch_size).astype(np.float32)
84 85 86
        labels = np.random.randint(
            0, num_classes, (batch_size, num_true_class)
        ).astype("int64")
W
wanghaoshuang 已提交
87
        self.attrs = {
W
wanghaoshuang 已提交
88 89
            'num_total_classes': num_classes,
            'num_neg_samples': num_neg_samples,
90 91
            'custom_neg_classes': list(range(num_neg_samples)),
            'seed': 0,
92
            'sampler': 0,
P
pangyoki 已提交
93
            'is_sparse': is_sparse,
94
            'is_test': self.is_test,
W
wanghaoshuang 已提交
95 96
        }
        self.inputs = {
W
wanghaoshuang 已提交
97
            'Input': input,
W
wanghaoshuang 已提交
98
            'Label': labels,
W
wanghaoshuang 已提交
99 100
            'Weight': weight,
            'Bias': bias,
101
            'SampleWeight': sample_weight,
W
wanghaoshuang 已提交
102 103
        }

P
pangyoki 已提交
104 105 106
    def set_is_test(self):
        self.is_test = False

W
wanghaoshuang 已提交
107
    def set_data(self):
Z
zhupengyang 已提交
108
        self.generate_data(5, 25, 100, 1, 2, False)
W
wanghaoshuang 已提交
109 110

    def compute(self):
111 112 113 114 115 116 117 118 119
        out = nce(
            self.inputs['Input'],
            self.inputs['Weight'],
            self.inputs['Bias'],
            self.inputs['SampleWeight'],
            self.inputs['Label'],
            self.attrs['num_total_classes'],
            self.attrs['num_neg_samples'],
        )
P
pangyoki 已提交
120 121 122 123 124 125
        if self.is_test:
            self.outputs = {'Cost': out[0]}
        else:
            self.outputs = {
                'Cost': out[0],
                'SampleLogits': out[1],
126
                'SampleLabels': out[2],
P
pangyoki 已提交
127
            }
W
wanghaoshuang 已提交
128 129 130

    def setUp(self):
        self.op_type = 'nce'
P
pangyoki 已提交
131
        self.set_is_test()
W
wanghaoshuang 已提交
132 133 134 135 136 137 138
        self.set_data()
        self.compute()

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
139 140 141
        self.check_grad(
            ["Input", "Weight", "Bias"], "Cost", max_relative_error=0.02
        )
W
wanghaoshuang 已提交
142 143


144
class TestNCECase1Tensor(TestNCE):
W
wanghaoshuang 已提交
145
    def set_data(self):
Z
zhupengyang 已提交
146
        self.generate_data(10, 20, 100, 2, 5, False)
147 148


P
pangyoki 已提交
149 150 151 152 153 154 155 156 157
class TestNCETensorIsTest(TestNCE):
    # if is_test = True, there's no need to calculate grad
    def set_is_test(self):
        self.is_test = True

    def test_check_grad(self):
        pass


158 159 160 161 162 163 164 165 166 167 168 169
class TestNCECase1SelectedRows(unittest.TestCase):
    def setUp(self):
        self.base_lr = 0.0001
        self.batch_size = 8

    @staticmethod
    def get_place():
        place = fluid.core.CPUPlace()
        return place

    @staticmethod
    def get_train_data(batch_size):
T
tianshuo78520a 已提交
170
        batches = []
171 172 173
        for i in range(batch_size):
            input = np.random.randn(batch_size, 10).astype(np.float32)
            labels = np.random.randint(0, 20, (batch_size, 1))
T
tianshuo78520a 已提交
174 175
            batches.append([input, labels])
        return batches
176 177 178 179 180 181

    def get_optimizer(self):
        # SGD optimizer
        optimizer = fluid.optimizer.SGD(learning_rate=self.base_lr)
        return optimizer

182 183 184 185 186 187 188 189
    def train_network(
        self,
        num_total_classes,
        num_neg_samples,
        sampler,
        custom_dist,
        is_sparse,
    ):
G
GGBond8488 已提交
190 191 192 193
        input = paddle.static.data(
            name="input", shape=[-1, 10], dtype="float32"
        )
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
194

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='nce_w',
                initializer=initializer.ConstantInitializer(),
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 1],
                dtype='float32',
                name='nce_b',
                initializer=initializer.ConstantInitializer(),
            )
        )

216
        cost = paddle.static.nn.nce(
217 218 219 220 221 222 223 224 225 226 227 228
            input=input,
            label=label,
            num_total_classes=num_total_classes,
            sampler=sampler,
            custom_dist=custom_dist,
            sample_weight=None,
            param_attr='nce_w',
            bias_attr='nce_b',
            seed=1,
            num_neg_samples=num_neg_samples,
            is_sparse=is_sparse,
        )
229
        avg_cost = paddle.mean(cost)
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
        # optimizer
        optimizer = self.get_optimizer()
        optimizer.minimize(avg_cost)

        return [avg_cost, [input, label]]

    def test_input_is_selected_rows(self):
        place = self.get_place()
        exe = fluid.Executor(place)

        data = self.get_train_data(self.batch_size)
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')

        rets = []
        # for dense
        dense_scope = fluid.core.Scope()
        dense_startup_program = fluid.framework.Program()
        dense_train_program = fluid.framework.Program()
        with fluid.scope_guard(dense_scope):
249 250 251 252 253 254
            with fluid.program_guard(
                dense_train_program, dense_startup_program
            ):
                cost, feeds = self.train_network(
                    20, 5, "custom_dist", nid_freq_arr.tolist(), False
                )
255 256
                feeder = fluid.DataFeeder(feed_list=feeds, place=place)
                exe.run(dense_startup_program)
257 258 259 260 261
                loss_val = exe.run(
                    dense_train_program,
                    feed=feeder.feed(data),
                    fetch_list=[cost.name],
                )
262 263 264 265 266 267 268
                rets.append(np.mean(loss_val))

        # for sparse
        sparse_scope = fluid.core.Scope()
        sparse_startup_program = fluid.framework.Program()
        sparse_train_program = fluid.framework.Program()
        with fluid.scope_guard(sparse_scope):
269 270 271 272 273 274
            with fluid.program_guard(
                sparse_train_program, sparse_startup_program
            ):
                cost, feeds = self.train_network(
                    20, 5, "custom_dist", nid_freq_arr.tolist(), True
                )
275 276
                feeder = fluid.DataFeeder(feed_list=feeds, place=place)
                exe.run(sparse_startup_program)
277 278 279 280 281
                loss_val = exe.run(
                    sparse_train_program,
                    feed=feeder.feed(data),
                    fetch_list=[cost.name],
                )
282 283 284
                rets.append(np.mean(loss_val))

        self.assertEqual(rets[0], rets[1])
W
wanghaoshuang 已提交
285 286


287
class TestNCE_OpError(unittest.TestCase):
288 289
    def test_errors(self):
        with program_guard(Program(), Program()):
290 291 292
            input1 = fluid.create_lod_tensor(
                np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace()
            )
G
GGBond8488 已提交
293
            label1 = paddle.static.data(
294 295
                name='label1', shape=[-1, 4], dtype="int64"
            )
296
            # the input(input) of nce layer must be Variable.
297 298 299
            self.assertRaises(
                TypeError, paddle.static.nn.nce, input1, label1, 5
            )
300

G
GGBond8488 已提交
301
            input2 = paddle.static.data(
302 303 304 305 306
                name='input2', shape=[-1, 4], dtype="float32"
            )
            label2 = fluid.create_lod_tensor(
                np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace()
            )
307
            # the input(label) of nce layer must be Variable.
308 309 310
            self.assertRaises(
                TypeError, paddle.static.nn.nce, input2, label2, 5
            )
311

G
GGBond8488 已提交
312
            input3 = paddle.static.data(
313 314
                name='input3', shape=[-1, 4], dtype="float16"
            )
G
GGBond8488 已提交
315
            label3 = paddle.static.data(
316 317
                name='label3', shape=[-1, 1], dtype="int64"
            )
318
            # the data type of input(input) must be float32 or float64.
319 320 321
            self.assertRaises(
                TypeError, paddle.static.nn.nce, input3, label3, 5
            )
322

G
GGBond8488 已提交
323
            input4 = paddle.static.data(
324 325
                name='input4', shape=[-1, 4], dtype="float32"
            )
G
GGBond8488 已提交
326
            label4 = paddle.static.data(
327 328
                name='label4', shape=[-1, 1], dtype="int32"
            )
329
            # the data type of input(label) must be int64.
330 331 332
            self.assertRaises(
                TypeError, paddle.static.nn.nce, input4, label4, 5
            )
333 334


W
wanghaoshuang 已提交
335 336
if __name__ == '__main__':
    unittest.main()