未验证 提交 11aa5edd 编写于 作者: C co63oc 提交者: GitHub

Fix typos (#54011)

上级 ae241565
......@@ -81,7 +81,7 @@ class TestDistCTR2x2(TestDistRunnerBase):
dnn_out = fc
# build lr model
lr_embbding = fluid.layers.embedding(
lr_embedding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
......@@ -92,7 +92,7 @@ class TestDistCTR2x2(TestDistRunnerBase):
is_sparse=IS_SPARSE,
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
merge_layer = paddle.concat([dnn_out, lr_pool], axis=1)
......
......@@ -129,7 +129,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
dnn_out = fc
# build lr model
lr_embbding = fluid.layers.embedding(
lr_embedding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
......@@ -141,7 +141,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
padding_idx=0,
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
merge_layer = paddle.concat([dnn_out, lr_pool], axis=1)
......
......@@ -88,7 +88,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
dnn_out = dnn_pool
# build lr model
lr_embbding = fluid.layers.embedding(
lr_embedding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
......@@ -99,7 +99,7 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
is_sparse=True,
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
with fluid.device_guard("gpu"):
......
......@@ -120,7 +120,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
dnn_out = fc
# build lr model
lr_embbding = paddle.static.nn.sparse_embedding(
lr_embedding = paddle.static.nn.sparse_embedding(
input=lr_data,
size=[lr_input_dim, 1],
is_test=inference,
......@@ -132,7 +132,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
merge_layer = paddle.concat([dnn_out, lr_pool], axis=1)
......
......@@ -80,7 +80,7 @@ def net(batch_size=4, lr=0.01):
dnn_out = dnn_pool
# build lr model
lr_embbding = fluid.layers.embedding(
lr_embedding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
......@@ -91,7 +91,7 @@ def net(batch_size=4, lr=0.01):
is_sparse=True,
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
with fluid.device_guard("gpu"):
......
......@@ -239,7 +239,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
dnn_out = fc
# build lr model
lr_embbding = paddle.static.nn.sparse_embedding(
lr_embedding = paddle.static.nn.sparse_embedding(
input=lr_data,
size=[lr_input_dim, 1],
is_test=inference,
......@@ -250,7 +250,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
)
lr_pool = paddle.static.nn.sequence_lod.sequence_pool(
input=lr_embbding, pool_type="sum"
input=lr_embedding, pool_type="sum"
)
merge_layer = paddle.concat([dnn_out, lr_pool], axis=1)
predict = paddle.static.nn.fc(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册