test_npair_loss_op.py 6.6 KB
Newer Older
C
ceci3 已提交
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
C
ceci3 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17 18

import numpy as np

19
import paddle
20
import paddle.fluid as fluid
C
ceci3 已提交
21
import paddle.fluid.core as core
22
from paddle.fluid import Program, program_guard
C
ceci3 已提交
23 24 25 26 27 28 29


def npairloss(anchor, positive, labels, l2_reg=0.002):
    def softmax_cross_entropy_with_logits(logits, labels):
        logits = np.exp(logits)
        logits = logits / np.sum(logits, axis=1).reshape(-1, 1)

30 31 32
        return np.mean(
            -np.sum(labels * np.log(logits), axis=1), dtype=np.float32
        )
C
ceci3 已提交
33 34 35 36 37 38 39 40

    batch_size = labels.shape[0]

    labels = np.reshape(labels, (batch_size, 1))
    labels = np.equal(labels, labels.transpose()).astype(float)
    labels = labels / np.sum(labels, axis=1, keepdims=True)

    l2loss = np.mean(np.sum(np.power(anchor, 2), 1)) + np.mean(
41 42
        np.sum(np.power(positive, 2), 1)
    )
C
ceci3 已提交
43 44 45 46
    l2loss = (l2loss * 0.25 * l2_reg).astype(np.float32)

    similarity_matrix = np.matmul(anchor, positive.transpose())
    celoss = np.mean(
47 48
        softmax_cross_entropy_with_logits(similarity_matrix, labels)
    )
C
ceci3 已提交
49 50 51 52 53 54 55 56 57

    return l2loss + celoss


class TestNpairLossOp(unittest.TestCase):
    def setUp(self):
        self.dtype = np.float32

    def __assert_close(self, tensor, np_array, msg, atol=1e-4):
58 59 60
        np.testing.assert_allclose(
            np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg
        )
C
ceci3 已提交
61

C
ceci3 已提交
62
    def test_npair_loss(self):
C
ceci3 已提交
63
        reg_lambda = 0.002
C
ceci3 已提交
64
        num_data, feat_dim, num_classes = 18, 6, 3
C
ceci3 已提交
65

C
ceci3 已提交
66
        place = core.CPUPlace()
C
ceci3 已提交
67 68
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
        embeddings_anchor = np.random.rand(num_data, feat_dim).astype(
            np.float32
        )
        embeddings_positive = np.random.rand(num_data, feat_dim).astype(
            np.float32
        )
        row_labels = np.random.randint(0, num_classes, size=(num_data)).astype(
            np.float32
        )
        out_loss = npairloss(
            embeddings_anchor,
            embeddings_positive,
            row_labels,
            l2_reg=reg_lambda,
        )

G
GGBond8488 已提交
86
        anc = paddle.static.data(
87 88 89 90
            dtype='float32',
            name='anc',
            shape=embeddings_anchor.shape,
        )
G
GGBond8488 已提交
91
        pos = paddle.static.data(
92 93 94 95
            dtype='float32',
            name='pos',
            shape=embeddings_positive.shape,
        )
G
GGBond8488 已提交
96
        lab = paddle.static.data(
97 98 99 100 101
            dtype='float32',
            name='lab',
            shape=row_labels.shape,
        )

102
        npair_loss_op = paddle.nn.functional.npair_loss(
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
            anchor=anc, positive=pos, labels=lab, l2_reg=reg_lambda
        )
        out_tensor = exe.run(
            feed={
                'anc': embeddings_anchor,
                'pos': embeddings_positive,
                'lab': row_labels,
            },
            fetch_list=[npair_loss_op.name],
        )

        self.__assert_close(
            out_tensor,
            out_loss,
            "inference output are different at "
            + str(place)
            + ", "
            + str(np.dtype('float32'))
            + str(np.array(out_tensor))
            + str(out_loss),
            atol=1e-3,
        )
C
ceci3 已提交
125 126


127 128 129 130 131 132
class TestNpairLossOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            anchor_np = np.random.random((2, 4)).astype("float32")
            positive_np = np.random.random((2, 4)).astype("float32")
            labels_np = np.random.random((2)).astype("float32")
133 134 135 136 137 138
            anchor_data = fluid.data(
                name='anchor', shape=[2, 4], dtype='float32'
            )
            positive_data = fluid.data(
                name='positive', shape=[2, 4], dtype='float32'
            )
139 140 141 142
            labels_data = fluid.data(name='labels', shape=[2], dtype='float32')

            def test_anchor_Variable():
                # the anchor type must be Variable
143
                paddle.nn.functional.npair_loss(
144 145
                    anchor=anchor_np, positive=positive_data, labels=labels_data
                )
146 147 148

            def test_positive_Variable():
                # the positive type must be Variable
149
                paddle.nn.functional.npair_loss(
150 151
                    anchor=anchor_data, positive=positive_np, labels=labels_data
                )
152 153 154

            def test_labels_Variable():
                # the labels type must be Variable
155
                paddle.nn.functional.npair_loss(
156 157
                    anchor=anchor_data, positive=positive_data, labels=labels_np
                )
158 159 160 161 162 163 164

            self.assertRaises(TypeError, test_anchor_Variable)
            self.assertRaises(TypeError, test_positive_Variable)
            self.assertRaises(TypeError, test_labels_Variable)

            def test_anchor_type():
                # dtype must be float32 or float64
165 166 167
                anchor_data1 = fluid.data(
                    name='anchor1', shape=[2, 4], dtype='int32'
                )
168
                paddle.nn.functional.npair_loss(
169 170
                    anchor=anchor_data, positive=positive_data, labels=labels_np
                )
171 172 173

            def test_positive_type():
                # dtype must be float32 or float64
174 175 176
                positive_data1 = fluid.data(
                    name='positive1', shape=[2, 4], dtype='int32'
                )
177
                paddle.nn.functional.npair_loss(
178 179 180 181
                    anchor=anchor_data,
                    positive=positive_data1,
                    labels=labels_np,
                )
182 183 184

            def test_labels_type():
                # dtype must be float32 or float64
185 186 187
                labels_data1 = fluid.data(
                    name='labels1', shape=[2], dtype='int32'
                )
188
                paddle.nn.functional.npair_loss(
189 190 191 192
                    anchor=anchor_data,
                    positive=positive_data,
                    labels=labels_data1,
                )
193 194 195 196 197 198

            self.assertRaises(TypeError, test_anchor_type)
            self.assertRaises(TypeError, test_positive_type)
            self.assertRaises(TypeError, test_labels_type)


C
ceci3 已提交
199 200
if __name__ == '__main__':
    unittest.main()