text_utils.py 6.2 KB
Newer Older
1
import os
2

3
import numpy as np
4
from gensim.utils import tokenize
5

6
from ..wdtypes import *  # noqa: F403
7
from .fastai_transforms import Vocab, Tokenizer
8

J
jrzaurin 已提交
9 10 11 12 13 14 15 16 17 18 19
__all__ = ["simple_preprocess", "get_texts", "pad_sequences", "build_embeddings_matrix"]


def simple_preprocess(
    doc: str,
    lower: bool = False,
    deacc: bool = False,
    min_len: int = 2,
    max_len: int = 15,
) -> List[str]:
    r"""
20 21
    This is ``Gensim``'s :obj:`simple_preprocess` with a ``lower`` param to
    indicate wether or not to lower case all the token in the doc
J
jrzaurin 已提交
22

23 24 25
    For more information see: ``Gensim`` `utils module
    <https://radimrehurek.com/gensim/utils.html>`_. Returns the list of tokens
    for a given doc
26 27 28 29 30

    Parameters
    ----------
    doc: str
        Input document.
31
    lower: bool, default = False
32
        Lower case tokens in the input doc
33
    deacc: bool, default = False
34
        Remove accent marks from tokens using ``Gensim``'s :obj:`deaccent`
35
    min_len: int, default = 2
36
        Minimum length of token (inclusive). Shorter tokens are discarded.
37
    max_len: int, default = 15
38 39
        Maximum length of token in result (inclusive). Longer tokens are discarded.

40 41 42 43 44
    Examples
    --------
    >>> from pytorch_widedeep.utils import simple_preprocess
    >>> simple_preprocess('Machine learning is great')
    ['Machine', 'learning', 'is', 'great']
J
jrzaurin 已提交
45 46 47
    """
    tokens = [
        token
J
jrzaurin 已提交
48
        for token in tokenize(doc, lower=lower, deacc=deacc, errors="ignore")
J
jrzaurin 已提交
49 50 51 52 53 54
        if min_len <= len(token) <= max_len and not token.startswith("_")
    ]
    return tokens


def get_texts(texts: List[str]) -> List[List[str]]:
55 56
    r"""Tokenization using ``Fastai``'s :obj:`Tokenizer` because it does a
    series of very convenients things during the tokenization process
57 58

    See :class:`pytorch_widedeep.utils.fastai_utils.Tokenizer`
J
jrzaurin 已提交
59

60 61
    Returns a list containing the tokens per text or document

62 63
    Parameters
    ----------
64
    texts: List
65
        List of str with the texts (or documents). One str per document
66

67 68 69 70 71 72 73
    Examples
    --------
    >>> from pytorch_widedeep.utils import get_texts
    >>> texts = ['Machine learning is great', 'but building stuff is even better']
    >>> get_texts(texts)
    [['xxmaj', 'machine', 'learning', 'is', 'great'], ['but', 'building', 'stuff', 'is', 'even', 'better']]

74
    .. note:: :obj:`get_texts` uses :class:`pytorch_widedeep.utils.fastai_transforms.Tokenizer`.
75 76 77 78 79
        Such tokenizer uses a series of convenient processing steps, including
        the  addition of some special tokens, such as ``TK_MAJ`` (`xxmaj`), used to
        indicate the next word begins with a capital in the original text. For more
        details of special tokens please see the ``fastai`` `docs
        <https://docs.fast.ai/text.transform.html#Tokenizer>`_.
J
jrzaurin 已提交
80 81 82 83 84 85 86 87
    """
    processed_textx = [" ".join(simple_preprocess(t)) for t in texts]
    tok = Tokenizer().process_all(processed_textx)
    return tok


def pad_sequences(
    seq: List[int], maxlen: int, pad_first: bool = True, pad_idx: int = 1
88
) -> np.ndarray:
J
jrzaurin 已提交
89
    r"""
90
    Given a List of tokenized and `numericalised` sequences it will return
91
    padded sequences according to the input parameters.
J
jrzaurin 已提交
92 93 94

    Parameters
    ----------
95
    seq: List
96
        List of int with the `numericalised` tokens
97
    maxlen: int
J
jrzaurin 已提交
98
        Maximum length of the padded sequences
99
    pad_first: bool,  default = True
J
jrzaurin 已提交
100 101
        Indicates whether the padding index will be added at the beginning or the
        end of the sequences
102
    pad_idx: int, default = 1
103
        padding index. Fastai's Tokenizer leaves 0 for the 'unknown' token.
J
jrzaurin 已提交
104

105 106 107 108 109 110
    Examples
    --------
    >>> from pytorch_widedeep.utils import pad_sequences
    >>> seq = [1,2,3]
    >>> pad_sequences(seq, maxlen=5, pad_idx=0)
    array([0, 0, 1, 2, 3], dtype=int32)
J
jrzaurin 已提交
111
    """
112 113 114
    if len(seq) == 0:
        return np.zeros(maxlen, dtype="int32") + pad_idx
    elif len(seq) >= maxlen:
J
jrzaurin 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127
        res = np.array(seq[-maxlen:]).astype("int32")
        return res
    else:
        res = np.zeros(maxlen, dtype="int32") + pad_idx
        if pad_first:
            res[-len(seq) :] = seq
        else:
            res[: len(seq) :] = seq
        return res


def build_embeddings_matrix(
    vocab: Vocab, word_vectors_path: str, min_freq: int, verbose: int = 1
128
) -> np.ndarray:  # pragma: no cover
129 130
    r"""Build the embedding matrix using pretrained word vectors. Returns the
    pretrained word embeddings
J
jrzaurin 已提交
131

132 133 134 135
    Returns pretrained word embeddings. If a word in our vocabulary is not
    among the pretrained embeddings it will be assigned the mean pretrained
    word-embeddings vector

J
jrzaurin 已提交
136 137
    Parameters
    ----------
138
    vocab: Vocab
139 140
        see :class:`pytorch_widedeep.utils.fastai_utils.Vocab`
    word_vectors_path: str
J
jrzaurin 已提交
141
        path to the pretrained word embeddings
142
    min_freq: int
J
jrzaurin 已提交
143
        minimum frequency required for a word to be in the vocabulary
144
    verbose: int,  default=1
145
        level of verbosity. Set to 0 for no verbosity
J
jrzaurin 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
    """
    if not os.path.isfile(word_vectors_path):
        raise FileNotFoundError("{} not found".format(word_vectors_path))
    if verbose:
        print("Indexing word vectors...")

    embeddings_index = {}
    f = open(word_vectors_path)
    for line in f:
        values = line.split()
        word = values[0]
        coefs = np.asarray(values[1:], dtype="float32")
        embeddings_index[word] = coefs
    f.close()

    if verbose:
        print("Loaded {} word vectors".format(len(embeddings_index)))
        print("Preparing embeddings matrix...")

165
    mean_word_vector = np.mean(list(embeddings_index.values()), axis=0)  # type: ignore[arg-type]
J
jrzaurin 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
    embedding_dim = len(list(embeddings_index.values())[0])
    num_words = len(vocab.itos)
    embedding_matrix = np.zeros((num_words, embedding_dim))
    found_words = 0
    for i, word in enumerate(vocab.itos):
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
            found_words += 1
        else:
            embedding_matrix[i] = mean_word_vector

    if verbose:
        print(
            "{} words in the vocabulary had {} vectors and appear more than {} times".format(
                found_words, word_vectors_path, min_freq
            )
        )

185
    return embedding_matrix.astype("float32")