未验证 提交 33dca040 编写于 作者: Z zhiboniu 提交者: GitHub

fix batchsize=1 backward error (#2973)

上级 cb972a2b
use_gpu: true use_gpu: true
log_iter: 1 log_iter: 10
save_dir: output save_dir: output
snapshot_epoch: 10 snapshot_epoch: 10
weights: output/higherhrnet_hrnet_v1_512/290 weights: output/higherhrnet_hrnet_w32_512/model_final
epoch: 300 epoch: 300
num_joints: &num_joints 17 num_joints: &num_joints 17
flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
......
...@@ -92,15 +92,14 @@ class HrHRNetHead(nn.Layer): ...@@ -92,15 +92,14 @@ class HrHRNetHead(nn.Layer):
xo2 = self.conv2(x2) xo2 = self.conv2(x2)
num_joints = self.num_joints num_joints = self.num_joints
if self.training: if self.training:
heatmap1, tagmap = paddle.split(xo1, 2, axis=1)
if self.swahr: if self.swahr:
so1 = self.scalelayer0(x1) so1 = self.scalelayer0(x1)
so2 = self.scalelayer1(x2) so2 = self.scalelayer1(x2)
hrhrnet_outputs = ([xo1[:, :num_joints], so1], [xo2, so2], hrhrnet_outputs = ([heatmap1, so1], [xo2, so2], tagmap)
xo1[:, num_joints:])
return self.loss(hrhrnet_outputs, targets) return self.loss(hrhrnet_outputs, targets)
else: else:
hrhrnet_outputs = (xo1[:, :num_joints], xo2, hrhrnet_outputs = (heatmap1, xo2, tagmap)
xo1[:, num_joints:])
return self.loss(hrhrnet_outputs, targets) return self.loss(hrhrnet_outputs, targets)
# averaged heatmap, upsampled tagmap # averaged heatmap, upsampled tagmap
......
...@@ -194,7 +194,10 @@ class AELoss(object): ...@@ -194,7 +194,10 @@ class AELoss(object):
def __call__(self, preds, tagmaps): def __call__(self, preds, tagmaps):
bs = preds.shape[0] bs = preds.shape[0]
losses = [self.apply_single(preds[i], tagmaps[i]) for i in range(bs)] losses = [
self.apply_single(preds[i:i + 1].squeeze(),
tagmaps[i:i + 1].squeeze()) for i in range(bs)
]
pull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses) pull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses)
push = self.push_factor * sum(loss[1] for loss in losses) / len(losses) push = self.push_factor * sum(loss[1] for loss in losses) / len(losses)
return pull, push return pull, push
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册