未验证 提交 f69988da 编写于 作者: Z zxcd 提交者: GitHub

add error judgment, and add testing for 0-size tensor. (#55952)

上级 08dbb33c
......@@ -130,7 +130,8 @@ static void ShareTensorsIntoScope(const std::vector<Tensor> &tensors,
paddle::framework::Scope *scope) {
for (size_t i = 0; i < tensors.size(); ++i) {
auto name = tensors[i].name();
if (name == paddle::framework::kFakeVarName) {
if (name == paddle::framework::kFakeVarName ||
name == paddle::framework::kEmptyVarName) {
continue;
}
auto *var = scope->Var(name);
......
......@@ -143,7 +143,8 @@ static void ShareVarsIntoScope(const std::vector<Variable *> &vars,
const std::vector<std::string> &var_names,
framework::Scope *scope) {
for (size_t i = 0; i < vars.size(); ++i) {
if (var_names[i] == framework::kFakeVarName) {
if (var_names[i] == framework::kFakeVarName ||
var_names[i] == paddle::framework::kEmptyVarName) {
continue;
}
auto *var = scope->Var(var_names[i]);
......
......@@ -41,5 +41,28 @@ class TestBackwardWithoutParams(unittest.TestCase):
np.testing.assert_equal(x.grad.numpy(), np.full(x.shape, 0.25))
class ZeroSizeNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
@paddle.jit.to_static
def forward(self, x):
y = paddle.randn((0,))
out = paddle.nn.functional.relu(x)
y.stop_gradient = True
return y, out
class TestZeroSizeNet(unittest.TestCase):
def test_run(self):
net = ZeroSizeNet()
x = paddle.ones([2, 2])
x.stop_gradient = False
_, out = net(x)
loss = paddle.mean(out)
loss.backward()
np.testing.assert_equal(x.grad.numpy(), np.full(x.shape, 0.25))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册