test_sequence_softmax_op.py 2.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
import unittest
import numpy as np
17 18
from op_test import OpTest
from test_softmax_op import stable_softmax
19
import paddle.fluid.core as core
20 21 22 23 24


class TestSequenceSoftmaxOp(OpTest):
    def setUp(self):
        self.op_type = "sequence_softmax"
25 26 27
        self.use_cudnn = False
        self.init_op_type()

28
        x = np.random.uniform(0.1, 1, (11, 1)).astype("float32")
29
        lod = [[4, 1, 3, 3]]
30 31

        out = np.zeros((11, 1)).astype("float32")
32 33 34 35
        offset = 0
        for i in range(len(lod[0])):
            sub_x = x[offset:offset + lod[0][i], :]
            sub_x = sub_x.reshape(1, lod[0][i])
36
            sub_out = stable_softmax(sub_x)
37 38
            out[offset:offset + lod[0][i], :] = sub_out.reshape(lod[0][i], 1)
            offset += lod[0][i]
39 40 41

        self.inputs = {"X": (x, lod)}
        self.outputs = {"Out": out}
42 43 44 45
        self.attrs = {'use_cudnn': self.use_cudnn, }

    def init_op_type(self):
        pass
46 47

    def test_check_output(self):
48 49 50 51 52
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=1e-5)
        else:
            self.check_output()
53

54
    def test_check_grad(self):
55 56 57 58 59 60 61 62 63
        if self.use_cudnn:
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place, ["X"], "Out", max_relative_error=0.01)
        else:
            self.check_grad(["X"], "Out", max_relative_error=0.01)


# ----------------cudnn Sequencesoftmax----------------
64 65
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
66 67 68
class TestSequenceSoftmaxCUDNNOp(TestSequenceSoftmaxOp):
    def init_op_type(self):
        self.use_cudnn = True
69

70 71 72

if __name__ == "__main__":
    unittest.main()