diff --git a/ppocr/modeling/heads/self_attention.py b/ppocr/modeling/heads/self_attention.py index 4e96e4a455c49246f711e4b7888d38ac88c3678e..6c27fdbe434166e9277cc8d695bce2743cbd8ec6 100644 --- a/ppocr/modeling/heads/self_attention.py +++ b/ppocr/modeling/heads/self_attention.py @@ -319,9 +319,7 @@ class PrepareEncoder(nn.Layer): self.src_emb_dim = src_emb_dim self.src_max_len = src_max_len self.emb = paddle.nn.Embedding( - num_embeddings=self.src_max_len, - embedding_dim=self.src_emb_dim, - sparse=True) + num_embeddings=self.src_max_len, embedding_dim=self.src_emb_dim) self.dropout_rate = dropout_rate def forward(self, src_word, src_pos):