未验证 提交 785684ad 编写于 作者: C co63oc 提交者: GitHub

Fix typos in dy2st unittests (#56006)

上级 9877fb88
......@@ -21,9 +21,9 @@ def add_fn(x):
return x
def loss_fn(x, lable):
def loss_fn(x, label):
loss = paddle.nn.functional.cross_entropy(
x, lable, reduction='none', use_softmax=False
x, label, reduction='none', use_softmax=False
)
return loss
......@@ -57,7 +57,7 @@ def dyfunc_with_if_else(x_v, label=None):
def dyfunc_with_if_else2(x, col=100):
row = 0
if abs(col) > x.shape[-1]:
# TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently.
# TODO: Don't support return non-Tensor in Tensor-dependent `if` statement currently.
# `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed.
# col = -1
col = paddle.tensor.fill_constant(shape=[1], value=-1, dtype="int64")
......@@ -136,7 +136,7 @@ def dyfunc_with_if_else_early_return2():
return e, None
def dyfunc_with_if_else_with_list_geneator(x):
def dyfunc_with_if_else_with_list_generator(x):
if 10 > 5:
y = paddle.add_n(
[paddle.full(shape=[2], fill_value=v) for v in range(5)]
......@@ -151,7 +151,7 @@ def nested_if_else(x_v):
feat_size = x_v.shape[-1]
bias = paddle.tensor.fill_constant([feat_size], dtype='float32', value=1)
if x_v.shape[0] != batch_size:
# TODO: Don't support return non-Tensor in Tensor-dependent `if` stament currently.
# TODO: Don't support return non-Tensor in Tensor-dependent `if` statement currently.
# `x_v.shape[0]` is not Tensor, and `batch_size` is the return value of `true_fn` after transformed.
# col = -1
# batch_size = x_v.shape[0]
......
......@@ -728,7 +728,7 @@ class AttentionModel(paddle.nn.Layer):
src_emb = self.src_embeder(self._transpose_batch_time(src))
# NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully.
# NOTE: modify model code about `enc_hidden` and `enc_cell` to transform dygraph code successfully.
# Because nested list can't be transformed now.
enc_hidden_0 = to_variable(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
......
......@@ -26,7 +26,7 @@ from ifelse_simple_func import (
dyfunc_with_if_else,
dyfunc_with_if_else2,
dyfunc_with_if_else3,
dyfunc_with_if_else_with_list_geneator,
dyfunc_with_if_else_with_list_generator,
fluid,
if_tensor_case,
if_with_and_or,
......@@ -116,7 +116,7 @@ class TestDygraphIfElse4(TestDygraphIfElse):
class TestDygraphIfElseWithListGenerator(TestDygraphIfElse):
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = dyfunc_with_if_else_with_list_geneator
self.dyfunc = dyfunc_with_if_else_with_list_generator
class TestDygraphNestedIfElse(TestDygraphIfElse):
......
......@@ -44,7 +44,7 @@ class TestPropertySave(unittest.TestCase):
self.a.get_float(1)
def test_set(self):
"""test propety set."""
"""test property set."""
try:
a = paddle.framework.core.Property()
a.set_float('float', 10.0)
......
......@@ -39,13 +39,13 @@ class TestSetItemBase(unittest.TestCase):
def test_case(self):
func = self.init_func()
dy_res = self.run_dygrah(func)
dy_res = self.run_dygraph(func)
st_res = self.run_to_static(func)
for dy_out, st_out in zip(dy_res, st_res):
np.testing.assert_allclose(dy_out.numpy(), st_out.numpy())
def run_dygrah(self, func):
def run_dygraph(self, func):
x = self.init_data()
y = func(x)
x_grad = paddle.grad(y, x)[0]
......@@ -53,7 +53,7 @@ class TestSetItemBase(unittest.TestCase):
def run_to_static(self, func):
func = paddle.jit.to_static(func)
return self.run_dygrah(func)
return self.run_dygraph(func)
class TestCase1(TestSetItemBase):
......@@ -169,7 +169,7 @@ class TestCase11(TestSetItemBase):
return foo
def run_dygrah(self, func):
def run_dygraph(self, func):
x = self.init_data()
value = paddle.ones((16, 32))
value.stop_gradient = False
......@@ -188,7 +188,7 @@ class TestCase12(TestSetItemBase):
return foo
def run_dygrah(self, func):
def run_dygraph(self, func):
x = self.init_data()
value = paddle.ones((32,))
value.stop_gradient = False
......
......@@ -116,8 +116,8 @@ class TestOriginInfo(unittest.TestCase):
for i in range(self.line_num):
static_lineno = self.static_abs_lineno_list[i]
staic_loc = Location(static_filepath, static_lineno)
self.assertIn(staic_loc.line_location, origin_info_map)
static_loc = Location(static_filepath, static_lineno)
self.assertIn(static_loc.line_location, origin_info_map)
dy_lineno = dygraph_abs_lineno_list[i]
dy_col_offset = self.dy_abs_col_offset[i]
......@@ -129,7 +129,7 @@ class TestOriginInfo(unittest.TestCase):
code,
)
self.assertEqual(
str(origin_info_map[staic_loc.line_location]), str(origin_info)
str(origin_info_map[static_loc.line_location]), str(origin_info)
)
def test_attach_origin_info(self):
......
......@@ -261,23 +261,23 @@ class SwitchModeNet(paddle.nn.Layer):
@paddle.jit.to_static
def switch_mode_funciton():
def switch_mode_function():
return True
class TestFunctionTrainEvalMode(unittest.TestCase):
def test_switch_mode(self):
paddle.disable_static()
switch_mode_funciton.eval()
switch_mode_funciton()
self.assertEqual(switch_mode_funciton._training, False)
_, partial_layer = switch_mode_funciton.program_cache.last()[-1]
switch_mode_function.eval()
switch_mode_function()
self.assertEqual(switch_mode_function._training, False)
_, partial_layer = switch_mode_function.program_cache.last()[-1]
self.assertEqual(partial_layer.training, False)
switch_mode_funciton.train()
switch_mode_funciton()
self.assertEqual(switch_mode_funciton._training, True)
_, partial_layer = switch_mode_funciton.program_cache.last()[-1]
switch_mode_function.train()
switch_mode_function()
self.assertEqual(switch_mode_function._training, True)
_, partial_layer = switch_mode_function.program_cache.last()[-1]
self.assertEqual(partial_layer.training, True)
def test_raise_error(self):
......
......@@ -70,7 +70,7 @@ def train(to_static, build_strategy=None):
with paddle.amp.auto_cast():
pred = resnet(img)
# FIXME(Aurelius84): The followding cross_entropy seems to bring out a
# FIXME(Aurelius84): The following cross_entropy seems to bring out a
# precision problem, need to figure out the underlying reason.
# If we remove it, the loss between dygraph and dy2stat is exactly same.
loss = paddle.nn.functional.cross_entropy(
......
......@@ -70,11 +70,11 @@ def train(args, attn_model=False):
dropout=args.dropout,
)
gloabl_norm_clip = ClipGradByGlobalNorm(args.max_grad_norm)
global_norm_clip = ClipGradByGlobalNorm(args.max_grad_norm)
optimizer = fluid.optimizer.SGD(
args.learning_rate,
parameter_list=model.parameters(),
grad_clip=gloabl_norm_clip,
grad_clip=global_norm_clip,
)
model.train()
......
......@@ -486,7 +486,7 @@ class TestTensorShapeInWhile4(TestTensorShapeBasic):
self.expected_slice_op_num = 0
# 5. Test op num for negetive dim
# 5. Test op num for negative dim
class TestOpNumBasicWithTensorShape(unittest.TestCase):
def setUp(self):
self._set_input_spec()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册