From a994327fb158ee7692238fef6e29c7f1ed6dc1ca Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 3 Apr 2018 15:39:46 +0800 Subject: [PATCH] add TestSGDOpOptimizeSelectedRows --- .../fluid/tests/unittests/test_sgd_op.py | 63 ++++++++++--------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index b3fd63611..191f21725 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -101,31 +101,50 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase): def check_with_place(self, place): scope = core.Scope() + row_width = 12 # create and initialize Grad Variable - height = 10 - rows = [0, 4, 7] - row_numel = 12 + grad_height = 10 + grad_rows = [0, 4, 7] grad_selected_rows = scope.var('Grad').get_selected_rows() - grad_selected_rows.set_height(height) - grad_selected_rows.set_rows(rows) - np_array = np.ones((len(rows), row_numel)).astype("float32") - np_array[0, 0] = 2.0 - np_array[2, 8] = 4.0 + grad_selected_rows.set_height(grad_height) + grad_selected_rows.set_rows(grad_rows) + grad_array = np.ones((len(grad_rows), row_width)).astype("float32") + grad_array[0, 0] = 2.0 + grad_array[2, 8] = 4.0 grad_tensor = grad_selected_rows.get_tensor() - grad_tensor.set(np_array, place) + grad_tensor.set(grad_array, place) # create and initialize Param Variable - param = scope.var('Param').get_tensor() - param_array = np.full((height, row_numel), 5.0).astype("float32") - param.set(param_array, place) + # create and initialize W Variable + param_rows = [0, 1, 2, 3, 4, 5, 6, 7] + + # init Param + w_selected_rows = scope.var('Param').get_selected_rows() + w_selected_rows.set_height(len(param_rows)) + w_selected_rows.set_rows(param_rows) + w_array = np.ones((len(param_rows), row_width)).astype("float32") + for i in range(len(param_rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + w_before_optimize = np.array(w_tensor) + print(w_before_optimize) # create and initialize LeraningRate Variable + lr_value = 0.1 lr = scope.var('LearningRate').get_tensor() - lr_array = np.full((1), 2.0).astype("float32") + lr_array = np.full((1), lr_value).astype("float32") lr.set(lr_array, place) + # optimize with Python + w_after_optimize = np.copy(w_before_optimize) + for index, id in enumerate(grad_rows): + w_after_optimize[id] = w_before_optimize[ + id] - lr_value * grad_array[index] + # create and run sgd operator sgd_op = Operator( "sgd", @@ -136,22 +155,8 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase): sgd_op.run(scope, place) # get and compare result - result_array = np.array(param) - - # rows[0] = 0, 5.0 - 2.0 * 2.0 - self.assertAlmostEqual(1.0, result_array[rows[0], 0]) - # rows[0] = 0, 5.0 - 2.0 * 1.0 - self.assertAlmostEqual(3.0, result_array[rows[0], 2]) - # 5.0 - 2.0 * 0.0 - self.assertAlmostEqual(5.0, result_array[1, 0]) - # rows[1] = 4, 5.0 - 2.0 * 1.0 - self.assertAlmostEqual(3.0, result_array[rows[1], 10]) - # 5.0 - 2.0 * 0.0 - self.assertAlmostEqual(5.0, result_array[5, 8]) - # rows[2] = 7, 5.0 - 2.0 * 1.0 - self.assertAlmostEqual(3.0, result_array[rows[2], 1]) - # rows[2] = 7, 5.0 - 2.0 * 4.0 - self.assertAlmostEqual(-3.0, result_array[rows[2], 8]) + result_array = np.array(w_tensor) + assert (result_array == w_after_optimize).all() def test_sparse_sgd(self): places = [core.CPUPlace()] -- GitLab