提交 5022b14d 编写于 作者: Q qiaolongfei

fix mixed tensor compile and add cpu unit test

上级 e011e34a
...@@ -22,7 +22,12 @@ endif() ...@@ -22,7 +22,12 @@ endif()
cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context tensor) if(WITH_GPU)
nv_test(mixed_vector_test SRCS mixed_vector_test.cc DEPS place memory device_context tensor)
else()
cc_test(mixed_vector_test SRCS mixed_vector_test.cc DEPS place memory device_context tensor)
endif()
cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory)
nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <algorithm> #include <algorithm>
#include <initializer_list> #include <initializer_list>
#include <memory>
#include <vector> #include <vector>
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
...@@ -386,13 +387,14 @@ template <typename T> ...@@ -386,13 +387,14 @@ template <typename T>
class CPUVector : public std::vector<T, std::allocator<T>> { class CPUVector : public std::vector<T, std::allocator<T>> {
public: public:
CPUVector() : std::vector<T>() {} CPUVector() : std::vector<T>() {}
CPUVector(size_t count, const T &value = T()) explicit CPUVector(size_t count, const T &value = T())
: std::vector<T>(count, value) {} : std::vector<T>(count, value) {}
CPUVector(std::initializer_list<T> init) : std::vector<T>(init) {} CPUVector(std::initializer_list<T> init) : std::vector<T>(init) {}
CPUVector(const std::vector<T> &other) : std::vector<T>(other) {} explicit CPUVector(const std::vector<T> &other) : std::vector<T>(other) {}
explicit CPUVector(const CPUVector<T> &other) : std::vector<T>(other) {} explicit CPUVector(const CPUVector<T> &other) : std::vector<T>(other) {}
CPUVector(CPUVector<T> &&other) : std::vector<T>(std::move(other)) {} CPUVector(CPUVector<T> &&other) : std::vector<T>(std::move(other)) {}
CPUVector(std::vector<T> &&other) : std::vector<T>(std::move(other)) {} explicit CPUVector(std::vector<T> &&other)
: std::vector<T>(std::move(other)) {}
CPUVector &operator=(const CPUVector &other) { CPUVector &operator=(const CPUVector &other) {
this->assign(other.begin(), other.end()); this->assign(other.begin(), other.end());
return *this; return *this;
...@@ -410,8 +412,6 @@ class CPUVector : public std::vector<T, std::allocator<T>> { ...@@ -410,8 +412,6 @@ class CPUVector : public std::vector<T, std::allocator<T>> {
return os; return os;
} }
void resize(size_t size) { this->resize(size); }
T &operator[](size_t id) { return this->at(id); } T &operator[](size_t id) { return this->at(id); }
const T &operator[](size_t id) const { return this->at(id); } const T &operator[](size_t id) const { return this->at(id); }
......
...@@ -11,8 +11,15 @@ ...@@ -11,8 +11,15 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef PADDLE_WITH_CUDA
#include <cuda_runtime.h> #include <cuda_runtime.h>
#endif
#include <memory>
#include "glog/logging.h" #include "glog/logging.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h"
...@@ -41,6 +48,38 @@ TEST(mixed_vector, CPU_VECTOR) { ...@@ -41,6 +48,38 @@ TEST(mixed_vector, CPU_VECTOR) {
} }
} }
TEST(mixed_vector, InitWithCount) {
paddle::framework::Vector<int> vec(10, 10);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(vec[i], 10);
}
}
TEST(mixed_vector, ForEach) {
vec<int> tmp;
for (auto& v : tmp) {
VLOG(3) << v;
}
}
TEST(mixed_vector, Reserve) {
paddle::framework::Vector<int> vec;
vec.reserve(1);
vec.push_back(0);
vec.push_back(0);
vec.push_back(0);
}
TEST(mixed_vector, Resize) {
paddle::framework::Vector<int> vec;
vec.resize(1);
vec.push_back(0);
vec.push_back(0);
vec.push_back(0);
}
#ifdef PADDLE_WITH_CUDA
static __global__ void multiply_10(int* ptr) { static __global__ void multiply_10(int* ptr) {
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
ptr[i] *= 10; ptr[i] *= 10;
...@@ -92,23 +131,4 @@ TEST(mixed_vector, MultiGPU) { ...@@ -92,23 +131,4 @@ TEST(mixed_vector, MultiGPU) {
} }
} }
TEST(mixed_vector, InitWithCount) { #endif
paddle::framework::Vector<int> vec(10, 10);
for (int i = 0; i < 10; ++i) {
ASSERT_EQ(vec[i], 10);
}
}
TEST(mixed_vector, ForEach) {
vec<int> tmp;
for (auto& v : tmp) {
}
}
TEST(mixed_vector, Reserve) {
paddle::framework::Vector<int> vec;
vec.reserve(1);
vec.push_back(0);
vec.push_back(0);
vec.push_back(0);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册