提交 e8a678e1 编写于 作者: Q qijun

fix executor gpu unittest runtime error

上级 1f5192a2
...@@ -239,6 +239,7 @@ class ExecutorTesterFeed : public ::testing::Test { ...@@ -239,6 +239,7 @@ class ExecutorTesterFeed : public ::testing::Test {
std::vector<std::vector<float>> inputs_; std::vector<std::vector<float>> inputs_;
}; };
#ifndef PADDLE_WITH_CUDA
TEST_F(ExecutorTesterRandom, CPU) { TEST_F(ExecutorTesterRandom, CPU) {
std::vector<Place> places; std::vector<Place> places;
CPUPlace cpu_place; CPUPlace cpu_place;
...@@ -292,13 +293,19 @@ TEST_F(ExecutorTesterFeed, CPU) { ...@@ -292,13 +293,19 @@ TEST_F(ExecutorTesterFeed, CPU) {
delete executor; delete executor;
} }
#else
#ifdef PADDLE_WITH_CUDA
TEST_F(ExecutorTesterRandom, GPU) { TEST_F(ExecutorTesterRandom, GPU) {
std::vector<Place> places; std::vector<Place> places;
GPUPlace gpu_place(0); GPUPlace gpu_place(0);
places.push_back(gpu_place); places.push_back(gpu_place);
// We have a global Scope and BuddyAllocator, and we must ensure
// global BuddyAllocator is initialized before global Scope. Thus,
// global Scope will deconstruct before BuddyAllocator. Otherwise,
// "pointer being freed was not allocated" error will appear.
// If paddle is compiled with GPU, both CPU and GPU BuddyAllocator
// need to be used at first.
paddle::memory::Used(CPUPlace());
paddle::memory::Used(gpu_place); paddle::memory::Used(gpu_place);
Executor* executor = new Executor(places); Executor* executor = new Executor(places);
...@@ -310,7 +317,13 @@ TEST_F(ExecutorTesterFeed, GPU) { ...@@ -310,7 +317,13 @@ TEST_F(ExecutorTesterFeed, GPU) {
std::vector<Place> places; std::vector<Place> places;
GPUPlace gpu_place(0); GPUPlace gpu_place(0);
places.push_back(gpu_place); places.push_back(gpu_place);
// We have a global Scope and BuddyAllocator, and we must ensure
// global BuddyAllocator is initialized before global Scope. Thus,
// global Scope will deconstruct before BuddyAllocator. Otherwise,
// "pointer being freed was not allocated" error will appear.
// If paddle is compiled with GPU, both CPU and GPU BuddyAllocator
// need to be used at first.
paddle::memory::Used(CPUPlace());
paddle::memory::Used(gpu_place); paddle::memory::Used(gpu_place);
Executor* executor = new Executor(places); Executor* executor = new Executor(places);
......
...@@ -35,8 +35,6 @@ class FetchOp : public framework::OperatorWithKernel { ...@@ -35,8 +35,6 @@ class FetchOp : public framework::OperatorWithKernel {
} }
auto input_dim = ctx->GetInputDim("Input"); auto input_dim = ctx->GetInputDim("Input");
framework::Tensor tmp;
tmp.Resize(input_dim);
(*tensors)[col].Resize(input_dim); (*tensors)[col].Resize(input_dim);
// TODO(qijun): need to handle LodTensor later // TODO(qijun): need to handle LodTensor later
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册