diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 037d2e41b36ae1c9dc07995ae11ed952de4c4612..03f083cdbd9084005052a0284152eb4f45f16915 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -341,7 +341,7 @@ class ExecutionContext { #ifdef PADDLE_WITH_CUDA const inline platform::CUDADeviceContext& cuda_device_context() const { - PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace())); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true); return *reinterpret_cast( &device_context_); } diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 7aa1419126d31ec89fc46bbaa3b23b7516f3ab27..ecedb7d70ffd125c6f1bcea3b2ec460894269b75 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_cudnn_helper.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" -#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" diff --git a/paddle/fluid/operators/gather.h b/paddle/fluid/operators/gather.h index d2f519c162f5e0ce49ceca861070a83b49f2db0d..26fb93c2ebb295fc73832d50c2f8472e96bcb25f 100644 --- a/paddle/fluid/operators/gather.h +++ b/paddle/fluid/operators/gather.h @@ -36,10 +36,16 @@ using framework::Tensor; template void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true); // check index of shape 1-D - PADDLE_ENFORCE(index.dims().size() == 1 || - (index.dims().size() == 2 && index.dims()[1] == 1)); + if (index.dims().size() == 2) { + PADDLE_ENFORCE_EQ(index.dims()[1], 1, + "index.dims()[1] should be 1 when index.dims().size() == " + "2 in gather_op."); + } else { + PADDLE_ENFORCE_EQ(index.dims().size(), 1, + "index.dims().size() should be 1 or 2 in gather_op."); + } int64_t index_size = index.dims()[0]; auto src_dims = src.dims(); diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index acf094238fff92711edf00b4180266138362add1..87451cb1271cb68e6fc5e2f969e8028fac27d0c3 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -19,7 +19,6 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.c #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cross_entropy_op.h" #include "paddle/fluid/operators/lstm_unit_op.h" -#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 2f7aeb70585dfe38d109fc5f6c24fd58c26288b9..8bc1bd720cedb63a8568196acd021fc80a8b6671 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -666,7 +666,11 @@ void Blas::MatMul(const framework::Tensor &mat_a, mat_b.data(), beta, mat_out->data()); } else { PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ || - dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0); + dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0, + "dim_a.batch_size should be equal to dim_b.batch_size, or " + "one of dim_a.batch_size and dim_b.batch_size should be 0. " + "But got dim_a.batch_size = %d, dim_b.batch_size = %d.", + dim_a.batch_size_, dim_b.batch_size_); this->template BatchedGEMM( transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha, mat_a.data(), mat_b.data(), beta, mat_out->data(), diff --git a/paddle/fluid/operators/scatter.cu.h b/paddle/fluid/operators/scatter.cu.h index f8d08b2e44c9626f92337e0f20c7517432125349..8d28173c8edbb44ae8eca0f0ec269a8ee9ae123d 100644 --- a/paddle/fluid/operators/scatter.cu.h +++ b/paddle/fluid/operators/scatter.cu.h @@ -90,12 +90,16 @@ template void GPUScatterAssign(const framework::ExecutionContext& context, const Tensor& src, const Tensor& index, Tensor* output, bool overwrite = true) { - // PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D - const auto& ctx = context.device_context(); - PADDLE_ENFORCE(index.dims().size() == 1 || - (index.dims().size() == 2 && index.dims()[1] == 1)); + if (index.dims().size() == 2) { + PADDLE_ENFORCE_EQ(index.dims()[1], 1, + "index.dims()[1] should be 1 when index.dims().size() == " + "2 in scatter_op."); + } else { + PADDLE_ENFORCE_EQ(index.dims().size(), 1, + "index.dims().size() should be 1 or 2 in scatter_op."); + } int index_size = index.dims()[0]; auto src_dims = src.dims(); diff --git a/paddle/fluid/operators/scatter.h b/paddle/fluid/operators/scatter.h index 6d9d1863c27ec53beeee86ebd14c01c4ee914e92..2a88b96dd8b4b17327f5e727b4e23bf9d707efa3 100644 --- a/paddle/fluid/operators/scatter.h +++ b/paddle/fluid/operators/scatter.h @@ -73,10 +73,16 @@ elementwise_inner_add(const framework::ExecutionContext& ctx, template void ScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true); // check index of shape 1-D - PADDLE_ENFORCE(index.dims().size() == 1 || - (index.dims().size() == 2 && index.dims()[1] == 1)); + if (index.dims().size() == 2) { + PADDLE_ENFORCE_EQ(index.dims()[1], 1, + "index.dims()[1] should be 1 when index.dims().size() == " + "2 in scatter_op."); + } else { + PADDLE_ENFORCE_EQ(index.dims().size(), 1, + "index.dims().size() should be 1 or 2 in scatter_op."); + } int index_size = index.dims()[0]; auto src_dims = src.dims(); @@ -88,7 +94,7 @@ void ScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, // check src shape and dst shape should match for (int i = 1; i < src_dims.size(); i++) - PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); + PADDLE_ENFORCE_EQ(src_dims[i], dst_dims[i]); // slice size size_t slice_size = 1; @@ -105,10 +111,12 @@ void ScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, template void ScatterAssignAdd(const framework::ExecutionContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.device_context().GetPlace())); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.device_context().GetPlace()), + true); // check index of shape 1-D PADDLE_ENFORCE(index.dims().size() == 1 || - (index.dims().size() == 2 && index.dims()[1] == 1)); + (index.dims().size() == 2 && index.dims()[1] == 1), + ""); int index_size = index.dims()[0]; auto src_dims = src.dims(); @@ -122,7 +130,7 @@ void ScatterAssignAdd(const framework::ExecutionContext& ctx, const Tensor& src, // check src shape and dst shape should match for (int i = 1; i < src_dims.size(); i++) - PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); + PADDLE_ENFORCE_EQ(src_dims[i], dst_dims[i]); // slice size size_t slice_size = 1;