From f26846ccd1168f81f8173c06b3dcc6e630394693 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Mon, 22 Nov 2021 12:07:32 +0800 Subject: [PATCH] fix attenton loss for ce --- ppocr/modeling/heads/rec_att_head.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index 6d77e42e..7926f074 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -45,6 +45,7 @@ class AttentionHead(nn.Layer): output_hiddens = [] if targets is not None: + print("target is not None") for i in range(num_steps): char_onehots = self._char_to_onehot( targets[:, i], onehot_dim=self.num_classes) @@ -53,8 +54,8 @@ class AttentionHead(nn.Layer): output_hiddens.append(paddle.unsqueeze(outputs, axis=1)) output = paddle.concat(output_hiddens, axis=1) probs = self.generator(output) - else: + print("target is None") targets = paddle.zeros(shape=[batch_size], dtype="int32") probs = None char_onehots = None @@ -75,6 +76,7 @@ class AttentionHead(nn.Layer): probs_step, axis=1)], axis=1) next_input = probs_step.argmax(axis=1) targets = next_input + if not self.training: probs = paddle.nn.functional.softmax(probs, axis=2) return probs -- GitLab