From 1bc4acfda7b25888799c462b2f8cb8ce07cd6d72 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 7 Jul 2021 11:08:59 +0000 Subject: [PATCH] x.shape bug when is -1 --- deepspeech/modules/embedding.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/deepspeech/modules/embedding.py b/deepspeech/modules/embedding.py index fbbda023..8eff1aa7 100644 --- a/deepspeech/modules/embedding.py +++ b/deepspeech/modules/embedding.py @@ -67,10 +67,10 @@ class PositionalEncoding(nn.Layer): paddle.Tensor: Encoded tensor. Its shape is (batch, time, ...) paddle.Tensor: for compatibility to RelPositionalEncoding, (batch=1, time, ...) """ - T = x.shape[1] assert offset + x.shape[1] < self.max_len - #TODO(Hui Zhang): using T = x.size(1), __getitem__ not support Tensor - pos_emb = self.pe[:, offset:offset + T] + # when frozen graph, the x.shape[1] is -1, offset=0 + # result pos_emb is [1, 4999, D] not [1, 5000, D] + pos_emb = self.pe[:, offset:offset + x.shape[1]] x = x * self.xscale + pos_emb return self.dropout(x), self.dropout(pos_emb) @@ -116,6 +116,7 @@ class RelPositionalEncoding(PositionalEncoding): """ assert offset + x.shape[1] < self.max_len x = x * self.xscale - #TODO(Hui Zhang): using x.size(1), __getitem__ not support Tensor + # when frozen graph, the x.shape[1] is -1, offset=0 + # result pos_emb is [1, 4999, D] not [1, 5000, D] pos_emb = self.pe[:, offset:offset + x.shape[1]] return self.dropout(x), self.dropout(pos_emb) -- GitLab