diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 386de6d59741764eb510cd08f145a477bf134c97..5df732013607e147d11f584f634064d406bd46f4 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -272,6 +272,9 @@ public: /// Return is GPU vector or not. bool isGpu() const; + /// Return a list of float, the memory is alloced and copied. + FloatArray getData() const; + /// __len__ in python size_t getSize() const; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 787cf1c973bab1a0daf9444aaa08cedf8b6fdf25..3da7a5c476da26a1b8b64d8a35a871b7db7fe2e7 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -267,6 +267,21 @@ void Vector::copyFromNumpyArray(float* data, int dim) { m->vec->copyFrom(data, dim); } +FloatArray Vector::getData() const { + if (this->isGpu()) { + float* src = m->vec->getData(); + size_t len = m->vec->getSize(); + float* dest = new float[len]; + hl_memcpy_device2host(dest, src, len * sizeof(float)); + FloatArray ret_val(dest, len); + ret_val.needFree = true; + return ret_val; + } else { + FloatArray ret_val(m->vec->getData(), m->vec->getSize()); + return ret_val; + } +} + bool Vector::isGpu() const { return std::dynamic_pointer_cast(m->vec) != nullptr; } diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index 11035a9281656c49b6d1757dbac2f7f58cb7d8c8..6d0d42f340e0bdfa76a38ad137a7a9b5e3e89e3f 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -42,7 +42,7 @@ class TestMatrix(unittest.TestCase): self.assertEqual(m.getSparseRowCols(2), []) def test_sparse_value(self): - m = swig_paddle.Matrix.createSparse(3, 3, 6, False) + m = swig_paddle.Matrix.createSparse(3, 3, 6, False, False, False) self.assertIsNotNone(m) m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [7.3, 4.2, 3.2]) @@ -66,7 +66,7 @@ class TestMatrix(unittest.TestCase): self.assertIsNotNone(m) self.assertTrue(abs(m.get(1, 1) - 0.5) < 1e-5) - def test_numpy(self): + def test_numpyCpu(self): numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat) self.assertEqual( @@ -100,8 +100,16 @@ class TestMatrix(unittest.TestCase): for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]): self.assertAlmostEqual(a, e) + + def test_numpy(self): + numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") + m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat) + self.assertEqual((int(m.getHeight()), int(m.getWidth())), numpy_mat.shape) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]): + self.assertAlmostEqual(a, e) if __name__ == "__main__": - swig_paddle.initPaddle("--use_gpu=0") + swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") unittest.main() diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 5226df79eea3bedbf2b5b6f5fa684cc99a194f7c..4903951414643de5d090f276cd0d5995f25238dc 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -20,20 +20,28 @@ import unittest class TestIVector(unittest.TestCase): def test_createZero(self): - m = swig_paddle.IVector.createZero(10) + m = swig_paddle.IVector.createZero(10, False) self.assertIsNotNone(m) for i in xrange(10): self.assertEqual(m[i], 0) m[i] = i self.assertEqual(m[i], i) + + m = swig_paddle.IVector.createZero(10) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(m.getData(), [0]*10) def test_create(self): - m = swig_paddle.IVector.create(range(10)) + m = swig_paddle.IVector.create(range(10), False) self.assertIsNotNone(m) for i in xrange(10): self.assertEqual(m[i], i) + + m = swig_paddle.IVector.create(range(10)) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(m.getData(), range(10)) - def test_numpy(self): + def test_cpu_numpy(self): vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32") iv = swig_paddle.IVector.createCpuVectorFromNumpy(vec) self.assertEqual(vec.shape[0], int(iv.__len__())) @@ -61,25 +69,43 @@ class TestIVector(unittest.TestCase): expect_vec = range(0, 10) expect_vec[4] = 7 self.assertEqual(vec.getData(), expect_vec) + + def test_numpy(self): + vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32") + iv = swig_paddle.IVector.createVectorFromNumpy(vec) + self.assertEqual(iv.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(iv.getData(), list(vec)) class TestVector(unittest.TestCase): def testCreateZero(self): - v = swig_paddle.Vector.createZero(10) + v = swig_paddle.Vector.createZero(10, False) self.assertIsNotNone(v) for i in xrange(len(v)): self.assertTrue(util.doubleEqual(v[i], 0)) v[i] = i self.assertTrue(util.doubleEqual(v[i], i)) + + v = swig_paddle.Vector.createZero(10) + self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(v.getData(), [0]*10) def testCreate(self): - v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)]) + v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)], False) self.assertIsNotNone(v) for i in xrange(len(v)): self.assertTrue(util.doubleEqual(v[i], i / 100.0)) self.assertEqual(100, len(v)) + + v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)]) + self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(100, len(v)) + vdata = v.getData() + for i in xrange(len(v)): + self.assertTrue(util.doubleEqual(vdata[i], i / 100.0)) + - def testNumpy(self): + def testCpuNumpy(self): numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") vec = swig_paddle.Vector.createCpuVectorFromNumpy(numpy_arr) assert isinstance(vec, swig_paddle.Vector) @@ -102,9 +128,18 @@ class TestVector(unittest.TestCase): for i in xrange(1, len(numpy_3)): util.doubleEqual(numpy_3[i], vec[i]) + + def testNumpy(self): + numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") + vec = swig_paddle.Vector.createVectorFromNumpy(numpy_arr) + self.assertEqual(vec.isGpu(), swig_paddle.isUsingGpu()) + vecData = vec.getData() + for n, v in zip(numpy_arr, vecData): + self.assertTrue(util.doubleEqual(n, v)) + def testCopyFromNumpy(self): - vec = swig_paddle.Vector.createZero(1) + vec = swig_paddle.Vector.createZero(1, False) arr = np.array([1.3, 3.2, 2.4], dtype="float32") vec.copyFromNumpyArray(arr) for i in xrange(len(vec)):