From ab8cc401e61dd49d393a72903a427ea6fa14bec7 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 17 Oct 2017 16:05:05 -0700 Subject: [PATCH] add sparse sgd operator unittest --- paddle/operators/sgd_op.h | 3 +- paddle/pybind/pybind.cc | 5 ++ .../v2/framework/tests/test_selected_rows.py | 23 +++---- .../paddle/v2/framework/tests/test_sgd_op.py | 60 +++++++++++++++++++ 4 files changed, 79 insertions(+), 12 deletions(-) diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index a872d7f749..8c28d5e66b 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -37,7 +37,8 @@ class SGDOpKernel : public framework::OpKernel { auto* learning_rate = ctx.Input("LearningRate"); auto* grad_var = ctx.InputVar("Grad"); - if (grad_var->IsType()) { + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { param_out->mutable_data(ctx.GetPlace()); auto* grad = ctx.Input("Grad"); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index fcae92ad99..65e265b614 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -186,6 +186,11 @@ All parameter, weight, gradient are variables in Paddle. return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_selected_rows", + [](Variable &self) -> SelectedRows * { + return self.GetMutable(); + }, + py::return_value_policy::reference) .def("get_net", [](Variable &self) -> operators::NetOp * { return self.GetMutable(); diff --git a/python/paddle/v2/framework/tests/test_selected_rows.py b/python/paddle/v2/framework/tests/test_selected_rows.py index 661e818179..e8a930cb08 100644 --- a/python/paddle/v2/framework/tests/test_selected_rows.py +++ b/python/paddle/v2/framework/tests/test_selected_rows.py @@ -8,29 +8,30 @@ class TestSelectedRows(unittest.TestCase): place = core.CPUPlace() height = 10 rows = [0, 4, 7] - row_numel = 10 - selcted_rows = core.SelectedRows(rows, row_numel) - np_array = np.ones((len(rows), height)).astype("float32") + row_numel = 12 + selected_rows = core.SelectedRows(rows, height) + np_array = np.ones((len(rows), row_numel)).astype("float32") np_array[0, 0] = 2.0 np_array[2, 8] = 4.0 - tensor = selcted_rows.get_tensor() + tensor = selected_rows.get_tensor() tensor.set(np_array, place) # compare rows - self.assertEqual(0, selcted_rows.rows()[0]) - self.assertEqual(4, selcted_rows.rows()[1]) - self.assertEqual(7, selcted_rows.rows()[2]) + self.assertEqual(0, selected_rows.rows()[0]) + self.assertEqual(4, selected_rows.rows()[1]) + self.assertEqual(7, selected_rows.rows()[2]) # compare height - self.assertEqual(10, selcted_rows.height()) + self.assertEqual(10, selected_rows.height()) # compare tensor self.assertAlmostEqual(2.0, - selcted_rows.get_tensor().get_float_element(0)) + selected_rows.get_tensor().get_float_element(0)) self.assertAlmostEqual(1.0, - selcted_rows.get_tensor().get_float_element(1)) + selected_rows.get_tensor().get_float_element(1)) self.assertAlmostEqual( - 4.0, selcted_rows.get_tensor().get_float_element(2 * row_numel + 8)) + 4.0, + selected_rows.get_tensor().get_float_element(2 * row_numel + 8)) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index 2dd881e5e1..c7d6a3b345 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -1,5 +1,7 @@ import unittest import numpy as np +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator from op_test import OpTest @@ -17,5 +19,63 @@ class TestSGDOp(OpTest): self.check_output() +class TestSparseSGDOp(unittest.TestCase): + def test_sparse_sgd(self): + scope = core.Scope() + + # create and initialize Grad Variable + place = core.CPUPlace() + height = 10 + rows = [0, 4, 7] + row_numel = 12 + + grad_selected_rows = scope.var('Grad').get_selected_rows() + grad_selected_rows.set_height(height) + grad_selected_rows.set_rows(rows) + np_array = np.ones((len(rows), row_numel)).astype("float32") + np_array[0, 0] = 2.0 + np_array[2, 8] = 4.0 + grad_tensor = grad_selected_rows.get_tensor() + grad_tensor.set(np_array, place) + + # create and initialize Param Variable + param = scope.var('Param').get_tensor() + param_array = np.full((height, row_numel), 5.0).astype("float32") + param.set(param_array, place) + + # create and initialize LeraningRate Variable + lr = scope.var('LearningRate').get_tensor() + lr_array = np.full((1), 2.0).astype("float32") + lr.set(lr_array, place) + + # create and run sgd operator + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate') + ctx = core.DeviceContext.create(place) + sgd_op.run(scope, ctx) + + # get and compare result + result_array = np.array(param) + + # rows[0] = 0, 5.0 - 2.0 * 2.0 + self.assertAlmostEqual(1.0, result_array[rows[0], 0]) + # rows[0] = 0, 5.0 - 2.0 * 1.0 + self.assertAlmostEqual(3.0, result_array[rows[0], 2]) + # 5.0 - 2.0 * 0.0 + self.assertAlmostEqual(5.0, result_array[1, 0]) + # rows[1] = 4, 5.0 - 2.0 * 1.0 + self.assertAlmostEqual(3.0, result_array[rows[1], 10]) + # 5.0 - 2.0 * 0.0 + self.assertAlmostEqual(5.0, result_array[5, 8]) + # rows[2] = 7, 5.0 - 2.0 * 1.0 + self.assertAlmostEqual(3.0, result_array[rows[2], 1]) + # rows[2] = 7, 5.0 - 2.0 * 4.0 + self.assertAlmostEqual(-3.0, result_array[rows[2], 8]) + + if __name__ == "__main__": unittest.main() -- GitLab