test_faster_tokenizer_op.py 18.0 KB
Newer Older
S
Steffy-zxf 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
import paddle
import paddle.nn as nn
21
from paddle.fluid.framework import core, _non_static_mode, _test_eager_guard
S
Steffy-zxf 已提交
22
from paddle.fluid.layer_helper import LayerHelper
23
from paddle import _legacy_C_ops
S
Steffy-zxf 已提交
24 25

import sys
26
import tempfile
27

S
Steffy-zxf 已提交
28 29 30 31 32 33 34
sys.path.append("./tokenizer")
from tokenizer.bert_tokenizer import BertTokenizer


def to_string_tensor(string_values, name):
    """
    Create the tensor that the value holds the list of string.
35 36
    NOTICE: The value will be holded in the cpu place.

S
Steffy-zxf 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49
    Args:
        string_values(list[string]): The value will be setted to the tensor.
        name(string): The name of the tensor.
    """
    tensor = paddle.Tensor(core.VarDesc.VarType.STRING, [], name,
                           core.VarDesc.VarType.STRINGS, False)
    tensor.value().set_string_list(string_values)
    return tensor


def to_map_tensor(string_dict, name):
    """
    Create the tensor that the value holds the map, the type of key is the string
50 51 52
    and the value is the int.
    NOTICE: The value will be holded in the cpu place.

S
Steffy-zxf 已提交
53 54 55 56 57 58 59 60 61 62 63
    Args:
        string_dict(dict): The value will be setted to the tensor.
        name(string): The name of the tensor.
    """
    tensor = paddle.Tensor(core.VarDesc.VarType.RAW, [], name,
                           core.VarDesc.VarType.VOCAB, True)
    tensor.value().set_vocab(string_dict)
    return tensor


class FasterTokenizer(nn.Layer):
64

S
Steffy-zxf 已提交
65 66 67 68 69 70 71 72 73 74 75 76
    def __init__(self, vocab_dict):
        super(FasterTokenizer, self).__init__()
        vocab_tensor = to_map_tensor(vocab_dict, "vocab")
        self.register_buffer("vocab", vocab_tensor, persistable=True)

    def forward(self,
                text,
                text_pair=None,
                do_lower_case=True,
                max_seq_len=-1,
                is_split_into_words=False,
                pad_to_max_seq_len=False):
J
Jiabin Yang 已提交
77
        if _non_static_mode():
78
            input_ids, seg_ids = _legacy_C_ops.faster_tokenizer(
S
Steffy-zxf 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
                self.vocab, text, text_pair, "do_lower_case", do_lower_case,
                "max_seq_len", max_seq_len, "pad_to_max_seq_len",
                pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
            return input_ids, seg_ids

        attrs = {
            "do_lower_case": do_lower_case,
            "max_seq_len": max_seq_len,
            "pad_to_max_seq_len": pad_to_max_seq_len,
            "is_split_into_words": is_split_into_words,
        }
        helper = LayerHelper("faster_tokenizer")
        input_ids = helper.create_variable_for_type_inference(dtype="int64")
        seg_ids = helper.create_variable_for_type_inference(dtype="int64")
        if text_pair is None:
94 95 96 97 98 99 100 101 102 103
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
S
Steffy-zxf 已提交
104
        else:
105 106 107 108 109 110 111 112 113 114 115
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text,
                                 'TextPair': text_pair
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
S
Steffy-zxf 已提交
116 117 118 119
        return input_ids, seg_ids


class Predictor(object):
120

S
Steffy-zxf 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
    def __init__(self, model_dir):
        model_file = os.path.join(model_dir, "inference.pdmodel")
        params_file = os.path.join(model_dir, "inference.pdiparams")
        if not os.path.exists(model_file):
            raise ValueError("not find model file path {}".format(model_file))
        if not os.path.exists(params_file):
            raise ValueError("not find params file path {}".format(params_file))
        config = paddle.inference.Config(model_file, params_file)

        # fast_tokenizer op only support cpu.
        config.disable_gpu()
        config.set_cpu_math_library_num_threads(10)

        config.switch_use_feed_fetch_ops(False)
        self.predictor = paddle.inference.create_predictor(config)
        self.input_handles = [
            self.predictor.get_input_handle(name)
            for name in self.predictor.get_input_names()
        ]
        self.output_handles = [
            self.predictor.get_output_handle(name)
            for name in self.predictor.get_output_names()
        ]

    def predict(self, data):

        self.input_handles[0].copy_from_cpu(data)
        self.predictor.run()
        input_ids = self.output_handles[0].copy_to_cpu()
        token_type_ids = self.output_handles[1].copy_to_cpu()
        return input_ids, token_type_ids


class TestBertTokenizerOp(unittest.TestCase):
155

S
Steffy-zxf 已提交
156
    def setUp(self):
157
        self.temp_dir = tempfile.TemporaryDirectory()
S
Steffy-zxf 已提交
158
        self.bert_tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
159
        self.save_path = os.path.join(self.temp_dir.name, "fast_tokenizer")
S
Steffy-zxf 已提交
160 161 162
        self.param_path = os.path.join(self.save_path, "model.pdparams")
        self.inference_path = os.path.join(self.save_path, "inference")

163 164 165
    def tearDown(self):
        self.temp_dir.cleanup()

S
Steffy-zxf 已提交
166
    def init_data(self):
167
        self.faster_tokenizer = FasterTokenizer(self.bert_tokenizer.vocab)
S
Steffy-zxf 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
        self.text = [
            '选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。'
            '酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,'
            '还算丰富。 服务吗,一般'
        ]
        self.text_pair = ['非常不错,服务很好,位于市中心区,交通方便,不过价格也高!']
        self.text_tensor = to_string_tensor(self.text, "text")
        self.text_pair_tensor = to_string_tensor(self.text_pair, "text_pair")
        self.texts = [
            '很好的地理位置,一蹋糊涂的服务,萧条的酒店。',
            ' 选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,'
            '但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般',
            'Test bert tokenizer. The first text.'
        ]
        self.text_pairs = [
            '非常不错,服务很好,位于市中心区,交通方便,不过价格也高!', '房间太小。其他的都一般。。。。。。。。。',
            'Test bert tokenizer. The second text.'
        ]
        self.texts_tensor = to_string_tensor(self.texts, "texts")
        self.text_pairs_tensor = to_string_tensor(self.text_pairs, "text_pairs")

189 190
    def run_padding(self):
        self.init_data()
S
Steffy-zxf 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
        self.max_seq_len = 128
        self.pad_to_max_seq_len = True
        self.is_split_into_words = False

        # case 1: only one text (batch_size = 1)
        input_ids, token_type_ids = self.faster_tokenizer(
            text=self.text_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            text=self.text,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1])
211 212
        py_token_type_ids = np.array(
            encoded_inputs[0]["token_type_ids"]).reshape([1, -1])
213 214 215 216 217
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

        # case 2: only one text and one text_pair (batch_size = 1)
        input_ids, token_type_ids = self.faster_tokenizer(
            text=self.text_tensor,
            text_pair=self.text_pair_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            text=self.text,
            text_pair=self.text_pair,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1])
237 238
        py_token_type_ids = np.array(
            encoded_inputs[0]["token_type_ids"]).reshape([1, -1])
239 240 241 242 243
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

        # case 3: only texts (batch_size = 3)
        input_ids, token_type_ids = self.faster_tokenizer(
            text=self.texts_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            self.texts,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = [i["input_ids"] for i in encoded_inputs]
        py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs]
        py_input_ids = np.array(py_input_ids).reshape([3, -1])
        py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1])
264 265 266 267 268
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

        # case 4: texts and text pairs (batch_size = 3)
        input_ids, token_type_ids = self.faster_tokenizer(
            text=self.texts_tensor,
            text_pair=self.text_pairs_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            self.texts,
            self.text_pairs,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = [i["input_ids"] for i in encoded_inputs]
        py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs]
        py_input_ids = np.array(py_input_ids).reshape([3, -1])
        py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1])
291 292 293 294 295
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
296

297 298 299 300 301 302 303
    def test_padding(self):
        with _test_eager_guard():
            self.run_padding()
        self.run_padding()

    def run_no_padding(self):
        self.init_data()
S
Steffy-zxf 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
        self.max_seq_len = 128
        self.pad_to_max_seq_len = False
        self.is_split_into_words = False

        # case 1: only one text (batch_size = 1)
        input_ids, token_type_ids = self.faster_tokenizer(
            text=self.text_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            self.text,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1])
324 325
        py_token_type_ids = np.array(
            encoded_inputs[0]["token_type_ids"]).reshape([1, -1])
326 327 328 329 330
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

        # case 2: only one text and one text_pair (batch_size = 1)
        input_ids, token_type_ids = self.faster_tokenizer(
            self.text_tensor,
            self.text_pair_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()

        encoded_inputs = self.bert_tokenizer(
            self.text,
            self.text_pair,
            max_seq_len=self.max_seq_len,
            pad_to_max_seq_len=self.pad_to_max_seq_len,
            is_split_into_words=self.is_split_into_words)
        py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1])
350 351
        py_token_type_ids = np.array(
            encoded_inputs[0]["token_type_ids"]).reshape([1, -1])
352 353 354 355 356
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
357

358 359 360 361 362 363 364
    def test_no_padding(self):
        with _test_eager_guard():
            self.run_no_padding()
        self.run_no_padding()

    def run_is_split_into_words(self):
        self.init_data()
S
Steffy-zxf 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377
        self.is_split_into_words = True

        input_ids, token_type_ids = self.faster_tokenizer(
            self.text_tensor,
            do_lower_case=self.bert_tokenizer.do_lower_case,
            is_split_into_words=self.is_split_into_words)
        input_ids = input_ids.numpy()
        token_type_ids = token_type_ids.numpy()
        encoded_inputs = self.bert_tokenizer(
            list(self.text[0]), is_split_into_words=self.is_split_into_words)
        py_input_ids = np.array(encoded_inputs["input_ids"]).reshape([1, -1])
        py_token_type_ids = np.array(encoded_inputs["token_type_ids"]).reshape(
            [1, -1])
378 379 380 381 382
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
383

384 385 386 387 388
    def test_is_split_into_words(self):
        with _test_eager_guard():
            self.run_is_split_into_words()
        self.run_is_split_into_words()

S
Steffy-zxf 已提交
389
    def test_inference(self):
390
        self.init_data()
S
Steffy-zxf 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path, exist_ok=True)
        paddle.save(self.faster_tokenizer.state_dict(), self.param_path)
        state_dict = paddle.load(self.param_path)
        self.faster_tokenizer.set_dict(state_dict)

        static_model = paddle.jit.to_static(
            self.faster_tokenizer,
            input_spec=[
                paddle.static.InputSpec(
                    shape=[None], dtype=core.VarDesc.VarType.STRINGS),  # texts
            ])
        # Save in static graph model.
        paddle.jit.save(static_model, self.inference_path)
        predictor = Predictor(self.save_path)
        input_ids, token_type_ids = predictor.predict(self.text)

        encoded_inputs = self.bert_tokenizer(self.text)
        py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1])
410 411
        py_token_type_ids = np.array(
            encoded_inputs[0]["token_type_ids"]).reshape([1, -1])
412 413 414 415 416
        np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01)
        np.testing.assert_allclose(token_type_ids,
                                   py_token_type_ids,
                                   rtol=0,
                                   atol=0.01)
S
Steffy-zxf 已提交
417 418

    def test_feed_string_var(self):
419
        self.init_data()
S
Steffy-zxf 已提交
420
        paddle.enable_static()
421 422 423
        x = paddle.static.data(name="x",
                               shape=[-1],
                               dtype=core.VarDesc.VarType.STRINGS)
S
Steffy-zxf 已提交
424 425 426 427 428 429 430
        exe = paddle.static.Executor(paddle.framework.CPUPlace())
        exe.run(paddle.static.default_main_program(), feed={'x': self.text})
        paddle.disable_static()


if __name__ == '__main__':
    unittest.main()