diff --git a/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h b/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h index 311a429f9c307f3913a1ffe5dfb7d84119c9711e..4f7cfc24ec035349f3c85e84d876ad9b5b5493a6 100644 --- a/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h +++ b/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h @@ -38,12 +38,10 @@ class LoDTensorBlockingQueue { public: bool Push(const std::vector& lod_tensor_vec) { - CheckDims(lod_tensor_vec); return queue_.Send(lod_tensor_vec); } bool Push(std::vector&& lod_tensor_vec) { - CheckDims(lod_tensor_vec); return queue_.Send(std::move(lod_tensor_vec)); } @@ -65,21 +63,6 @@ class LoDTensorBlockingQueue { inline bool IsClosed() const { return queue_.IsClosed(); } private: - void CheckDims( - const std::vector& lod_tensor_vec) const { - PADDLE_ENFORCE(dims_.size() == lod_tensor_vec.size(), - "Expect input size is %d but found %s", dims_.size(), - lod_tensor_vec.size()); - for (size_t i = 0; i < dims_.size(); ++i) { - const auto& in_dims = framework::slice_ddim( - lod_tensor_vec[i].dims(), 1, lod_tensor_vec[i].dims().size()); - const auto& expect_dims = - framework::slice_ddim(dims_[i], 1, dims_[i].size()); - PADDLE_ENFORCE(in_dims == expect_dims, - "Dims of the %d-th input tensor do not match", i); - } - } - BlockingQueue> queue_; std::vector dims_; }; diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 918f3be533d51367eade5f5108ad2eab954a9303..a9fd1869c9df5464db6fc87ac633cdba2d6dbe7f 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -216,7 +216,7 @@ class ReshapeKernel { if (shape_tensor) { auto *shape_data = shape_tensor->data(); framework::Tensor cpu_shape_tensor; - if (platform::is_gpu_place(ctx.GetPlace())) { + if (platform::is_gpu_place(shape_tensor->place())) { TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); }