未验证 提交 49f5a97b 编写于 作者: 张春乔 提交者: GitHub

Add for-else (#49521)

* add for-else

* add * for unpacking
上级 4383494f
...@@ -381,22 +381,20 @@ def monkey_patch_variable(): ...@@ -381,22 +381,20 @@ def monkey_patch_variable():
lhs_dtype = safe_get_dtype(self) lhs_dtype = safe_get_dtype(self)
if not isinstance(other_var, Variable): if not isinstance(other_var, Variable):
if reverse: if reverse:
has_batch_size = False
for elem in self.shape: for elem in self.shape:
if elem < 0: if elem < 0:
has_batch_size = True other_var = create_tensor_with_batchsize(
self, other_var, lhs_dtype
)
break break
if not has_batch_size: else:
# when break is not triggered, enter the else branch
other_var = create_tensor( other_var = create_tensor(
current_block(self), current_block(self),
other_var, other_var,
dtype=lhs_dtype, dtype=lhs_dtype,
shape=self.shape, shape=self.shape,
) )
else:
other_var = create_tensor_with_batchsize(
self, other_var, lhs_dtype
)
else: else:
# add fill_op to current_block # add fill_op to current_block
other_var = create_scalar( other_var = create_scalar(
......
...@@ -372,9 +372,9 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -372,9 +372,9 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss = model.embed_linear0(indices) loss = model.embed_linear0(indices)
loss.backward() loss.backward()
_, params_grads = optimizer.minimize(loss) _, params_grads = optimizer.minimize(loss)
for items in params_grads: for (items_0, *items_len) in params_grads:
assert items[0].name is not model.embed1.weight.name assert items_0.name is not model.embed1.weight.name
assert items[0].name is not model.linear_1.weight.name assert items_0.name is not model.linear_1.weight.name
assert model.embed1.weight._grad_ivar() is None assert model.embed1.weight._grad_ivar() is None
assert model.linear_1.weight._grad_ivar() is None assert model.linear_1.weight._grad_ivar() is None
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册