提交 e9c3ff84 编写于 作者: T tink2123

fix attenton loss for ce

上级 f26846cc
...@@ -45,7 +45,6 @@ class AttentionHead(nn.Layer): ...@@ -45,7 +45,6 @@ class AttentionHead(nn.Layer):
output_hiddens = [] output_hiddens = []
if targets is not None: if targets is not None:
print("target is not None")
for i in range(num_steps): for i in range(num_steps):
char_onehots = self._char_to_onehot( char_onehots = self._char_to_onehot(
targets[:, i], onehot_dim=self.num_classes) targets[:, i], onehot_dim=self.num_classes)
...@@ -55,7 +54,6 @@ class AttentionHead(nn.Layer): ...@@ -55,7 +54,6 @@ class AttentionHead(nn.Layer):
output = paddle.concat(output_hiddens, axis=1) output = paddle.concat(output_hiddens, axis=1)
probs = self.generator(output) probs = self.generator(output)
else: else:
print("target is None")
targets = paddle.zeros(shape=[batch_size], dtype="int32") targets = paddle.zeros(shape=[batch_size], dtype="int32")
probs = None probs = None
char_onehots = None char_onehots = None
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册