提交 a383eb20 编写于 作者: S seiriosPlus

fix UT

上级 ffd5f44e
...@@ -30,13 +30,30 @@ class TestLookupTableFuseOp(unittest.TestCase): ...@@ -30,13 +30,30 @@ class TestLookupTableFuseOp(unittest.TestCase):
def check_with_place(self, place): def check_with_place(self, place):
scope = fluid.global_scope() scope = fluid.global_scope()
init_program = fluid.Program() scope.var("LearningRate").get_tensor().set([0.01], place)
scope.var("Ids").get_tensor().set([i for i in range(100)], place)
lr = scope.var("LearningRate") init_program = fluid.Program()
lr.get_tensor().set([0.01], place)
ids = [i for i in range(100)] lr = init_program.global_block().create_var(
out = scope.var("output") name="LearningRate",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
ids = init_program.global_block().create_var(
name="Ids",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[100],
dtype="int64")
output = init_program.global_block().create_var(
name="output",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[100, 8],
dtype="float32")
metas = [] metas = []
metas.append( metas.append(
...@@ -55,32 +72,34 @@ class TestLookupTableFuseOp(unittest.TestCase): ...@@ -55,32 +72,34 @@ class TestLookupTableFuseOp(unittest.TestCase):
init_program.global_block().append_op( init_program.global_block().append_op(
type="lookup_sparse_table_read", type="lookup_sparse_table_read",
inputs={"Ids": ids}, inputs={"Ids": ids},
outputs={"Out": out}, outputs={"Out": output},
attrs={ attrs={
"tablename": "embedding_1.block0", "tablename": "embedding_1.block0",
"init": True, "init": True,
"value_names": ["Param", "Moment1", "Moment2"], "value_names": ["Param"],
}) })
init_program.global_block().append_op( init_program.global_block().append_op(
type="lookup_sparse_table_read", type="lookup_sparse_table_read",
inputs={"Ids": ids}, inputs={"Ids": ids},
outputs={"Out": out}, outputs={"Out": output},
attrs={ attrs={
"tablename": "embedding_2.block0", "tablename": "embedding_2.block0",
"init": True, "init": True,
"value_names": ["Param"], "value_names": ["Param"],
}) })
executor = fluid.Executor(fluid.CPUPlace()) executor = fluid.Executor(place)
executor.run(init_program) executor.run(init_program)
training_program = fluid.Program() training_program = fluid.Program()
rows = [0, 1, 2, 3, 4, 5, 6] scope.var('Beta1Pow').get_tensor().set([0], place)
row_numel = 7 scope.var('Beta2Pow').get_tensor().set([0], place)
w_selected_rows = scope.var('W').get_selected_rows() rows = [0, 1, 2, 3, 4, 5, 6]
row_numel = 8
w_selected_rows = scope.var('Grad').get_selected_rows()
w_selected_rows.set_height(len(rows)) w_selected_rows.set_height(len(rows))
w_selected_rows.set_rows(rows) w_selected_rows.set_rows(rows)
w_array = np.ones((len(rows), row_numel)).astype("float32") w_array = np.ones((len(rows), row_numel)).astype("float32")
...@@ -89,11 +108,44 @@ class TestLookupTableFuseOp(unittest.TestCase): ...@@ -89,11 +108,44 @@ class TestLookupTableFuseOp(unittest.TestCase):
w_tensor = w_selected_rows.get_tensor() w_tensor = w_selected_rows.get_tensor()
w_tensor.set(w_array, place) w_tensor.set(w_array, place)
lr = training_program.global_block().create_var(
name="LearningRate",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
grads = training_program.global_block().create_var(
name="Grad",
persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS,
shape=[100, 8],
dtype="float32")
beta1 = training_program.global_block().create_var(
name="Beta1Pow",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
beta2 = training_program.global_block().create_var(
name="Beta2Pow",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
training_program.global_block().append_op( training_program.global_block().append_op(
type="lookup_sparse_table_fuse_adam", type="lookup_sparse_table_fuse_adam",
inputs={"Grad": ids, inputs={
"LearningRate": lr}, "Grad": grads,
outputs={"Out": out}, "LearningRate": lr,
"Beta1Pow": beta1,
"Beta2Pow": beta2,
},
outputs={"Beta1PowOut": beta1,
"Beta2PowOut": beta2},
attrs={ attrs={
"is_entry": False, "is_entry": False,
"tablename": "embedding_1.block0", "tablename": "embedding_1.block0",
...@@ -102,15 +154,16 @@ class TestLookupTableFuseOp(unittest.TestCase): ...@@ -102,15 +154,16 @@ class TestLookupTableFuseOp(unittest.TestCase):
training_program.global_block().append_op( training_program.global_block().append_op(
type="lookup_sparse_table_fuse_sgd", type="lookup_sparse_table_fuse_sgd",
inputs={"Grad": ids, inputs={"Grad": grads,
"LearningRate": lr}, "LearningRate": lr},
outputs={"Out": out},
attrs={ attrs={
"is_entry": False, "is_entry": False,
"tablename": "embedding_2.block0", "tablename": "embedding_2.block0",
"value_names": ["Param"], "value_names": ["Param"],
}) })
executor.run(training_program)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册