# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import io import json import os import six import unicodedata from .. import PretrainedTokenizer from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation __all__ = ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'] class BasicTokenizer(object): """ Runs basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (bool): Whether the text strips accents and convert to lower case. If you use the BERT Pretrained model, lower is set to Flase when using the cased model, otherwise it is set to True. Default: True. """ def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer.""" self.do_lower_case = do_lower_case def tokenize(self, text): """ Tokenizes a piece of text using basic tokenizer. Args: text (str): A piece of text. Returns: list(str): A list of tokens. """ text = convert_to_unicode(text) text = self._clean_text(text) text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """ Strips accents from a piece of text. """ text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """ Splits punctuation on a piece of text. """ chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """ Adds whitespace around any CJK character. """ output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """ Checks whether CP is the codepoint of a CJK character. """ # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """ Performs invalid character removal and whitespace cleanup on text. """ output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """ Runs WordPiece tokenization. Args: vocab (Vocab|dict): Vocab of the word piece tokenizer. unk_token (str): A specific token to replace all unkown tokens. max_input_chars_per_word (int): If a word's length is more than max_input_chars_per_word, it will be dealt as unknown word. Default: 100. """ def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: list (str): A list of wordpiece tokens. Example: input = "unaffable" output = ["un", "##aff", "##able"] """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens class BertTokenizer(PretrainedTokenizer): """ Constructs a BERT tokenizer. It uses a basic tokenizer to do punctuation splitting, lower casing and so on, and follows a WordPiece tokenizer to tokenize as subwords. Args: vocab_file (str): file path of the vocabulary do_lower_case (bool): Whether the text strips accents and convert to lower case. If you use the BERT pretrained model, lower is set to Flase when using the cased model, otherwise it is set to True. Default: True. unk_token (str): The special token for unkown words. Default: "[UNK]". sep_token (str): The special token for separator token . Default: "[SEP]". pad_token (str): The special token for padding. Default: "[PAD]". cls_token (str): The special token for cls. Default: "[CLS]". mask_token (str): The special token for mask. Default: "[MASK]". Examples: .. code-block:: python from paddle.hapi.text import BertTokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # the following line get: ['he', 'was', 'a', 'puppet', '##eer'] tokens = tokenizer('He was a puppeteer') # the following line get: 'he was a puppeteer' tokenizer.convert_tokens_to_string(tokens) """ resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained pretrained_resource_files_map = { "vocab_file": { "bert-base-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt", "bert-large-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt", "bert-base-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt", "bert-large-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt", "bert-base-multilingual-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt", "bert-base-multilingual-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt", "bert-base-chinese": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", "bert-wwm-chinese": "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt", "bert-wwm-ext-chinese": "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt", } } pretrained_init_configuration = { "bert-base-uncased": { "do_lower_case": True }, "bert-large-uncased": { "do_lower_case": True }, "bert-base-cased": { "do_lower_case": False }, "bert-large-cased": { "do_lower_case": False }, "bert-base-multilingual-uncased": { "do_lower_case": True }, "bert-base-multilingual-cased": { "do_lower_case": False }, "bert-base-chinese": { "do_lower_case": False }, "bert-wwm-chinese": { "do_lower_case": False }, "bert-wwm-ext-chinese": { "do_lower_case": False }, } def __init__(self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]"): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the " "vocabulary from a pretrained model please use " "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" .format(vocab_file)) self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer( vocab=self.vocab, unk_token=unk_token) @property def vocab_size(self): """ return the size of vocabulary. Returns: int: the size of vocabulary. """ return len(self.vocab) def _tokenize(self, text): """ End-to-end tokenization for BERT models. Args: text (str): The text to be tokenized. Returns: list: A list of string representing converted tokens. """ split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def __call__(self, text): """ End-to-end tokenization for BERT models. Args: text (str): The text to be tokenized. Returns: list: A list of string representing converted tokens. """ return self._tokenize(text) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (list of string) in a single string. Since the usage of WordPiece introducing `##` to concat subwords, also remove `##` when converting. Args: tokens (list): A list of string representing tokens to be converted. Returns: str: Converted string from tokens. """ out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def num_special_tokens_to_add(self, pair=False): """ Returns the number of added tokens when encoding a sequence with special tokens. Note: This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the number of added tokens in the case of a single sequence if set to False. Returns: Number of tokens added to sequences """ token_ids_0 = [] token_ids_1 = [] return len( self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: :: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of input_id with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + token_ids_1 + _sep def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of token_type_id according to the given sequence(s). """ _sep = [self.sep_token_id] _cls = [self.cls_token_id] if token_ids_1 is None: return len(_cls + token_ids_0 + _sep) * [0] return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1] def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``encode`` methods. Args: token_ids_0 (List[int]): List of ids of the first sequence. token_ids_1 (List[int], optinal): List of ids of the second sequence. already_has_special_tokens (bool, optional): Whether or not the token list is already formatted with special tokens for the model. Defaults to None. Returns: results (List[int]): The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list( map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ( [0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def encode(self, text, text_pair=None, max_seq_len=None, pad_to_max_seq_len=True, truncation_strategy="longest_first", return_position_ids=True, return_segment_ids=True, return_input_mask=True, return_length=True, return_overflowing_tokens=False, return_special_tokens_mask=False): """ Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_seq_len`` is specified. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) max_seq_len (:obj:`int`, `optional`, defaults to :int:`None`): If set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary pad_to_max_seq_len (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to True, the returned sequences will be padded according to the model's padding side and padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`): String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_seq_len starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_seq_len) return_position_ids (:obj:`bool`, `optional`, defaults to :obj:`True`): Set to True to return tokens position ids (default True). return_segment_ids (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to return token type IDs. return_input_mask (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to return the attention mask. return_length (:obj:`int`, defaults to :obj:`True`): If set the resulting dictionary will include the length of each encoded inputs return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return overflowing token information (default False). return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return special tokens mask information (default False). Return: A Dictionary of shape:: { input_ids: list[int], position_ids: list[int] if return_position_ids is True (default) segment_ids: list[int] if return_segment_ids is True (default) input_mask: list[int] if return_input_mask is True (default) seq_len: int if return_length is True (default) overflowing_tokens: list[int] if a ``max_seq_len`` is specified and return_overflowing_tokens is True num_truncated_tokens: int if a ``max_seq_len`` is specified and return_overflowing_tokens is True special_tokens_mask: list[int] if return_special_tokens_mask is True } With the fields: - ``input_ids``: list of token ids to be fed to a model - ``position_ids``: list of token position ids to be fed to a model - ``segment_ids``: list of token type ids to be fed to a model - ``input_mask``: list of indices specifying which tokens should be attended to by the model - ``length``: the input_ids length - ``overflowing_tokens``: list of overflowing tokens if a max length is specified. - ``num_truncated_tokens``: number of overflowing tokens a ``max_seq_len`` is specified - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. """ def get_input_ids(text): if isinstance(text, str): tokens = self._tokenize(text) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance( text[0], str): return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance( text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) ids = get_input_ids(text) pair_ids = get_input_ids(text_pair) if text_pair is not None else None pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 encoded_inputs = {} # Truncation: Handle max sequence length total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add( pair=pair)) if max_seq_len and total_len > max_seq_len: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_seq_len, truncation_strategy=truncation_strategy, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_seq_len # Add special tokens sequence = self.build_inputs_with_special_tokens(ids, pair_ids) segment_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) # Build output dictionnary encoded_inputs["input_ids"] = sequence if return_segment_ids: encoded_inputs["segment_ids"] = segment_ids if return_special_tokens_mask: encoded_inputs[ "special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) if return_length: encoded_inputs["seq_len"] = len(encoded_inputs["input_ids"]) # Check lengths assert max_seq_len is None or len(encoded_inputs[ "input_ids"]) <= max_seq_len # Padding needs_to_be_padded = pad_to_max_seq_len and \ max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len if needs_to_be_padded: difference = max_seq_len - len(encoded_inputs["input_ids"]) if return_input_mask: encoded_inputs["input_mask"] = [1] * len(encoded_inputs[ "input_ids"]) + [0] * difference if return_segment_ids: # 0 for padding token mask encoded_inputs["segment_ids"] = ( encoded_inputs["segment_ids"] + [0] * difference) if return_special_tokens_mask: encoded_inputs["special_tokens_mask"] = encoded_inputs[ "special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [ self.pad_token_id ] * difference else: if return_input_mask: encoded_inputs["input_mask"] = [1] * len(encoded_inputs[ "input_ids"]) if return_position_ids: encoded_inputs["position_ids"] = list( range(len(encoded_inputs["input_ids"]))) return encoded_inputs