test_npair_loss_op.py 6.7 KB
Newer Older
C
ceci3 已提交
1
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
C
ceci3 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17 18

import numpy as np

19
import paddle
20
import paddle.fluid as fluid
C
ceci3 已提交
21
import paddle.fluid.core as core
22
from paddle.fluid import Program, program_guard
C
ceci3 已提交
23 24 25 26 27 28 29


def npairloss(anchor, positive, labels, l2_reg=0.002):
    def softmax_cross_entropy_with_logits(logits, labels):
        logits = np.exp(logits)
        logits = logits / np.sum(logits, axis=1).reshape(-1, 1)

30 31 32
        return np.mean(
            -np.sum(labels * np.log(logits), axis=1), dtype=np.float32
        )
C
ceci3 已提交
33 34 35 36 37 38 39 40

    batch_size = labels.shape[0]

    labels = np.reshape(labels, (batch_size, 1))
    labels = np.equal(labels, labels.transpose()).astype(float)
    labels = labels / np.sum(labels, axis=1, keepdims=True)

    l2loss = np.mean(np.sum(np.power(anchor, 2), 1)) + np.mean(
41 42
        np.sum(np.power(positive, 2), 1)
    )
C
ceci3 已提交
43 44 45 46
    l2loss = (l2loss * 0.25 * l2_reg).astype(np.float32)

    similarity_matrix = np.matmul(anchor, positive.transpose())
    celoss = np.mean(
47 48
        softmax_cross_entropy_with_logits(similarity_matrix, labels)
    )
C
ceci3 已提交
49 50 51 52 53 54 55 56 57

    return l2loss + celoss


class TestNpairLossOp(unittest.TestCase):
    def setUp(self):
        self.dtype = np.float32

    def __assert_close(self, tensor, np_array, msg, atol=1e-4):
58 59 60
        np.testing.assert_allclose(
            np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg
        )
C
ceci3 已提交
61

C
ceci3 已提交
62
    def test_npair_loss(self):
C
ceci3 已提交
63
        reg_lambda = 0.002
C
ceci3 已提交
64
        num_data, feat_dim, num_classes = 18, 6, 3
C
ceci3 已提交
65

C
ceci3 已提交
66
        place = core.CPUPlace()
C
ceci3 已提交
67 68
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
        embeddings_anchor = np.random.rand(num_data, feat_dim).astype(
            np.float32
        )
        embeddings_positive = np.random.rand(num_data, feat_dim).astype(
            np.float32
        )
        row_labels = np.random.randint(0, num_classes, size=(num_data)).astype(
            np.float32
        )
        out_loss = npairloss(
            embeddings_anchor,
            embeddings_positive,
            row_labels,
            l2_reg=reg_lambda,
        )

        anc = fluid.layers.data(
            dtype='float32',
            name='anc',
            shape=embeddings_anchor.shape,
            append_batch_size=False,
        )
        pos = fluid.layers.data(
            dtype='float32',
            name='pos',
            shape=embeddings_positive.shape,
            append_batch_size=False,
        )
        lab = fluid.layers.data(
            dtype='float32',
            name='lab',
            shape=row_labels.shape,
            append_batch_size=False,
        )

105
        npair_loss_op = paddle.nn.functional.npair_loss(
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
            anchor=anc, positive=pos, labels=lab, l2_reg=reg_lambda
        )
        out_tensor = exe.run(
            feed={
                'anc': embeddings_anchor,
                'pos': embeddings_positive,
                'lab': row_labels,
            },
            fetch_list=[npair_loss_op.name],
        )

        self.__assert_close(
            out_tensor,
            out_loss,
            "inference output are different at "
            + str(place)
            + ", "
            + str(np.dtype('float32'))
            + str(np.array(out_tensor))
            + str(out_loss),
            atol=1e-3,
        )
C
ceci3 已提交
128 129


130 131 132 133 134 135
class TestNpairLossOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            anchor_np = np.random.random((2, 4)).astype("float32")
            positive_np = np.random.random((2, 4)).astype("float32")
            labels_np = np.random.random((2)).astype("float32")
136 137 138 139 140 141
            anchor_data = fluid.data(
                name='anchor', shape=[2, 4], dtype='float32'
            )
            positive_data = fluid.data(
                name='positive', shape=[2, 4], dtype='float32'
            )
142 143 144 145
            labels_data = fluid.data(name='labels', shape=[2], dtype='float32')

            def test_anchor_Variable():
                # the anchor type must be Variable
146
                paddle.nn.functional.npair_loss(
147 148
                    anchor=anchor_np, positive=positive_data, labels=labels_data
                )
149 150 151

            def test_positive_Variable():
                # the positive type must be Variable
152
                paddle.nn.functional.npair_loss(
153 154
                    anchor=anchor_data, positive=positive_np, labels=labels_data
                )
155 156 157

            def test_labels_Variable():
                # the labels type must be Variable
158
                paddle.nn.functional.npair_loss(
159 160
                    anchor=anchor_data, positive=positive_data, labels=labels_np
                )
161 162 163 164 165 166 167

            self.assertRaises(TypeError, test_anchor_Variable)
            self.assertRaises(TypeError, test_positive_Variable)
            self.assertRaises(TypeError, test_labels_Variable)

            def test_anchor_type():
                # dtype must be float32 or float64
168 169 170
                anchor_data1 = fluid.data(
                    name='anchor1', shape=[2, 4], dtype='int32'
                )
171
                paddle.nn.functional.npair_loss(
172 173
                    anchor=anchor_data, positive=positive_data, labels=labels_np
                )
174 175 176

            def test_positive_type():
                # dtype must be float32 or float64
177 178 179
                positive_data1 = fluid.data(
                    name='positive1', shape=[2, 4], dtype='int32'
                )
180
                paddle.nn.functional.npair_loss(
181 182 183 184
                    anchor=anchor_data,
                    positive=positive_data1,
                    labels=labels_np,
                )
185 186 187

            def test_labels_type():
                # dtype must be float32 or float64
188 189 190
                labels_data1 = fluid.data(
                    name='labels1', shape=[2], dtype='int32'
                )
191
                paddle.nn.functional.npair_loss(
192 193 194 195
                    anchor=anchor_data,
                    positive=positive_data,
                    labels=labels_data1,
                )
196 197 198 199 200 201

            self.assertRaises(TypeError, test_anchor_type)
            self.assertRaises(TypeError, test_positive_type)
            self.assertRaises(TypeError, test_labels_type)


C
ceci3 已提交
202 203
if __name__ == '__main__':
    unittest.main()