提交 1911cb57 编写于 作者: L leaves-zwx

fetch deconv test data

上级 d1855f08
......@@ -151,8 +151,8 @@ def register_param_grad_hook(model):
np.save(param_grad_dump_path, param_grad.detach().cpu().numpy())
def get_dump_path(param_name):
param_grad_name = param_name.replace('.weight', '.weight_diff')
param_grad_name = param_name.replace('.bias', '.bias_diff')
param_grad_name = param_name.replace('.weight', '.weight_grad')
param_grad_name = param_name.replace('.bias', '.bias_grad')
param_grad_name = param_grad_name.replace('.', '-')
return os.path.join(param_grad_dump_dir, param_grad_name)
......
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy
from torch import nn
from torch.nn import functional as F
......@@ -32,7 +33,18 @@ class MaskRCNNC4Predictor(nn.Module):
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
mask_conv5_in_dump_path = './new_dump/mask/conv5_in' + '.' + str(x.size())
numpy.save(mask_conv5_in_dump_path, x.cpu().detach().numpy())
mask_conv5_in_grad_dump_path = './new_dump/mask/conv5_in_grad' + '.' + str(x.size())
x.register_hook(lambda grad : numpy.save(mask_conv5_in_grad_dump_path, grad.cpu().detach().numpy()))
x = F.relu(self.conv5_mask(x))
mask_conv5_out_dump_path = './new_dump/mask/conv5_out' + '.' + str(x.size())
numpy.save(mask_conv5_out_dump_path, x.cpu().detach().numpy())
mask_conv5_out_grad_dump_path = './new_dump/mask/conv5_out_grad' + '.' + str(x.size())
x.register_hook(lambda grad : numpy.save(mask_conv5_out_grad_dump_path, grad.cpu().detach().numpy()))
return self.mask_fcn_logits(x)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册