From 70fecee080ac91781c6c68bdc9cbe9fdaa0cbe48 Mon Sep 17 00:00:00 2001
From: wangyang59 <wangyang59@baidu.com>
Date: Tue, 8 Nov 2016 14:02:21 -0800
Subject: [PATCH] add unittest for Matrix and Vector in API

---
 paddle/api/PaddleAPI.h        |  3 +++
 paddle/api/Vector.cpp         | 15 +++++++++++
 paddle/api/test/testMatrix.py | 14 +++++++---
 paddle/api/test/testVector.py | 49 ++++++++++++++++++++++++++++++-----
 4 files changed, 71 insertions(+), 10 deletions(-)

diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h
index 386de6d5974..5df73201360 100644
--- a/paddle/api/PaddleAPI.h
+++ b/paddle/api/PaddleAPI.h
@@ -272,6 +272,9 @@ public:
   /// Return is GPU vector or not.
   bool isGpu() const;
 
+  /// Return a list of float, the memory is alloced and copied.
+  FloatArray getData() const;
+
   /// __len__ in python
   size_t getSize() const;
 
diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp
index 787cf1c973b..3da7a5c476d 100644
--- a/paddle/api/Vector.cpp
+++ b/paddle/api/Vector.cpp
@@ -267,6 +267,21 @@ void Vector::copyFromNumpyArray(float* data, int dim) {
   m->vec->copyFrom(data, dim);
 }
 
+FloatArray Vector::getData() const {
+  if (this->isGpu()) {
+    float* src = m->vec->getData();
+    size_t len = m->vec->getSize();
+    float* dest = new float[len];
+    hl_memcpy_device2host(dest, src, len * sizeof(float));
+    FloatArray ret_val(dest, len);
+    ret_val.needFree = true;
+    return ret_val;
+  } else {
+    FloatArray ret_val(m->vec->getData(), m->vec->getSize());
+    return ret_val;
+  }
+}
+
 bool Vector::isGpu() const {
   return std::dynamic_pointer_cast<paddle::GpuVector>(m->vec) != nullptr;
 }
diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py
index 11035a92816..6d0d42f340e 100644
--- a/paddle/api/test/testMatrix.py
+++ b/paddle/api/test/testMatrix.py
@@ -42,7 +42,7 @@ class TestMatrix(unittest.TestCase):
         self.assertEqual(m.getSparseRowCols(2), [])
 
     def test_sparse_value(self):
-        m = swig_paddle.Matrix.createSparse(3, 3, 6, False)
+        m = swig_paddle.Matrix.createSparse(3, 3, 6, False, False, False)
         self.assertIsNotNone(m)
         m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [7.3, 4.2, 3.2])
 
@@ -66,7 +66,7 @@ class TestMatrix(unittest.TestCase):
         self.assertIsNotNone(m)
         self.assertTrue(abs(m.get(1, 1) - 0.5) < 1e-5)
 
-    def test_numpy(self):
+    def test_numpyCpu(self):
         numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
         m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat)
         self.assertEqual(
@@ -100,8 +100,16 @@ class TestMatrix(unittest.TestCase):
 
             for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]):
                 self.assertAlmostEqual(a, e)
+    
+    def test_numpy(self):
+        numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
+        m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat)
+        self.assertEqual((int(m.getHeight()), int(m.getWidth())), numpy_mat.shape)
+        self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
+        for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]):
+            self.assertAlmostEqual(a, e)
 
 
 if __name__ == "__main__":
-    swig_paddle.initPaddle("--use_gpu=0")
+    swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0")
     unittest.main()
diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py
index 5226df79eea..49039514146 100644
--- a/paddle/api/test/testVector.py
+++ b/paddle/api/test/testVector.py
@@ -20,20 +20,28 @@ import unittest
 
 class TestIVector(unittest.TestCase):
     def test_createZero(self):
-        m = swig_paddle.IVector.createZero(10)
+        m = swig_paddle.IVector.createZero(10, False)
         self.assertIsNotNone(m)
         for i in xrange(10):
             self.assertEqual(m[i], 0)
             m[i] = i
             self.assertEqual(m[i], i)
+        
+        m = swig_paddle.IVector.createZero(10)
+        self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
+        self.assertEqual(m.getData(), [0]*10)
 
     def test_create(self):
-        m = swig_paddle.IVector.create(range(10))
+        m = swig_paddle.IVector.create(range(10), False)
         self.assertIsNotNone(m)
         for i in xrange(10):
             self.assertEqual(m[i], i)
+        
+        m = swig_paddle.IVector.create(range(10))
+        self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
+        self.assertEqual(m.getData(), range(10))
 
-    def test_numpy(self):
+    def test_cpu_numpy(self):
         vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32")
         iv = swig_paddle.IVector.createCpuVectorFromNumpy(vec)
         self.assertEqual(vec.shape[0], int(iv.__len__()))
@@ -61,25 +69,43 @@ class TestIVector(unittest.TestCase):
             expect_vec = range(0, 10)
             expect_vec[4] = 7
             self.assertEqual(vec.getData(), expect_vec)
+    
+    def test_numpy(self):
+        vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32")
+        iv = swig_paddle.IVector.createVectorFromNumpy(vec)
+        self.assertEqual(iv.isGpu(), swig_paddle.isUsingGpu())
+        self.assertEqual(iv.getData(), list(vec))
 
 
 class TestVector(unittest.TestCase):
     def testCreateZero(self):
-        v = swig_paddle.Vector.createZero(10)
+        v = swig_paddle.Vector.createZero(10, False)
         self.assertIsNotNone(v)
         for i in xrange(len(v)):
             self.assertTrue(util.doubleEqual(v[i], 0))
             v[i] = i
             self.assertTrue(util.doubleEqual(v[i], i))
+        
+        v = swig_paddle.Vector.createZero(10)
+        self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu())
+        self.assertEqual(v.getData(), [0]*10)
 
     def testCreate(self):
-        v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)])
+        v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)], False)
         self.assertIsNotNone(v)
         for i in xrange(len(v)):
             self.assertTrue(util.doubleEqual(v[i], i / 100.0))
         self.assertEqual(100, len(v))
+        
+        v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)])
+        self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu())
+        self.assertEqual(100, len(v))
+        vdata = v.getData()
+        for i in xrange(len(v)):
+            self.assertTrue(util.doubleEqual(vdata[i], i / 100.0))
+        
 
-    def testNumpy(self):
+    def testCpuNumpy(self):
         numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32")
         vec = swig_paddle.Vector.createCpuVectorFromNumpy(numpy_arr)
         assert isinstance(vec, swig_paddle.Vector)
@@ -102,9 +128,18 @@ class TestVector(unittest.TestCase):
 
         for i in xrange(1, len(numpy_3)):
             util.doubleEqual(numpy_3[i], vec[i])
+    
+    def testNumpy(self):
+        numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32")
+        vec = swig_paddle.Vector.createVectorFromNumpy(numpy_arr)
+        self.assertEqual(vec.isGpu(), swig_paddle.isUsingGpu())
+        vecData = vec.getData()
+        for n, v in zip(numpy_arr, vecData):
+            self.assertTrue(util.doubleEqual(n, v))
+        
 
     def testCopyFromNumpy(self):
-        vec = swig_paddle.Vector.createZero(1)
+        vec = swig_paddle.Vector.createZero(1, False)
         arr = np.array([1.3, 3.2, 2.4], dtype="float32")
         vec.copyFromNumpyArray(arr)
         for i in xrange(len(vec)):
-- 
GitLab