diff --git a/ppocr/modeling/heads/rec_att_head.py b/ppocr/modeling/heads/rec_att_head.py index 6d77e42eb5def579052687ab6fdc265159311884..7926f0741f51135760cc90b6419d13bafa0cbaee 100644 --- a/ppocr/modeling/heads/rec_att_head.py +++ b/ppocr/modeling/heads/rec_att_head.py @@ -45,6 +45,7 @@ class AttentionHead(nn.Layer): output_hiddens = [] if targets is not None: + print("target is not None") for i in range(num_steps): char_onehots = self._char_to_onehot( targets[:, i], onehot_dim=self.num_classes) @@ -53,8 +54,8 @@ class AttentionHead(nn.Layer): output_hiddens.append(paddle.unsqueeze(outputs, axis=1)) output = paddle.concat(output_hiddens, axis=1) probs = self.generator(output) - else: + print("target is None") targets = paddle.zeros(shape=[batch_size], dtype="int32") probs = None char_onehots = None @@ -75,6 +76,7 @@ class AttentionHead(nn.Layer): probs_step, axis=1)], axis=1) next_input = probs_step.argmax(axis=1) targets = next_input + if not self.training: probs = paddle.nn.functional.softmax(probs, axis=2) return probs