未验证 提交 49523ea1 编写于 作者: T Tao Luo 提交者: GitHub

replace PADDLE_ASSERT with PADDLE_ASSERT_MSG (#19586)

* remove unused PADDLE_ASSERT(_IS_NOT_ERROR)

* replace PADDLE_ASSERT with PADDLE_ASSERT_MSG

test=develop
上级 abaf87be
......@@ -28,7 +28,7 @@ using Tensor = framework::Tensor;
template <typename T>
struct TolerableValue {
HOSTDEVICE T operator()(const T& x) const {
PADDLE_ASSERT(std::is_floating_point<T>::value);
PADDLE_ENFORCE_EQ(std::is_floating_point<T>::value, true);
const T kApproInf = 1e20;
if (x == INFINITY) return kApproInf;
if (x == -INFINITY) return -kApproInf;
......
......@@ -27,7 +27,10 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label,
const int ignore_index) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
PADDLE_ASSERT(label[i] >= 0 && label[i] < D || label[i] == ignore_index);
PADDLE_ASSERT_MSG(label[i] >= 0 && label[i] < D || label[i] == ignore_index,
"label[%d] expected >= 0 and < %ld, or == %ld, but got "
"%ld. Please check input value.",
i, D, ignore_index, label[i]);
Y[i] = ignore_index == label[i]
? static_cast<T>(0)
: -math::TolerableValue<T>()(real_log(X[i * D + label[i]]));
......
......@@ -25,7 +25,8 @@ namespace math {
template <typename T>
struct TolerableValue {
HOSTDEVICE T operator()(const T& x) const {
PADDLE_ASSERT(std::is_floating_point<T>::value);
PADDLE_ASSERT_MSG(std::is_floating_point<T>::value,
"TolerableValue should be float in cross_entropy.");
const T kApproInf = 1e20;
if (x == INFINITY) return kApproInf;
......
......@@ -37,7 +37,10 @@ __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
int cidx = boffset / in_c_stride;
int out_offset = bidx * out_n_stride + cidx * out_c_stride;
int out_index = indices_data[i];
PADDLE_ASSERT(out_index < out_c_stride);
PADDLE_ASSERT_MSG(out_index < out_c_stride,
"out_index < out_c_stride. Expected %ld < %ld, but got "
"%ld >= %ld. Please check input value.",
out_index, out_c_stride, out_index, out_c_stride);
output_data[out_offset + out_index] = input_data[i];
}
}
......@@ -59,7 +62,10 @@ __global__ void KernelUnpool2dMaxGrad(
int cidx = boffset / in_c_stride;
int out_offset = bidx * out_n_stride + cidx * out_c_stride;
int out_index = indices_data[i];
PADDLE_ASSERT(out_index < out_c_stride);
PADDLE_ASSERT_MSG(out_index < out_c_stride,
"out_index < out_c_stride. Expected %ld < %ld, but got "
"%ld >= %ld. Please check input value.",
out_index, out_c_stride, out_index, out_c_stride);
input_grad[i] = output_grad[out_offset + out_index];
}
}
......
......@@ -29,7 +29,10 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T>
struct CheckLabelValue {
HOSTDEVICE T operator()(const T& val) const {
PADDLE_ASSERT(val == static_cast<T>(0) || val == static_cast<T>(1));
PADDLE_ASSERT_MSG(val == static_cast<T>(0) || val == static_cast<T>(1),
"LabelValue of modified_huber_loss_op expected to be 0 "
"or 1, but got %ld. Please check input value.",
val);
}
};
......
......@@ -60,7 +60,16 @@ HOSTDEVICE inline void StridedMemcpy(const T* x, const size_t* x_dims, T* out,
size_t offset_i = offsets[i];
if (i == rank - 1) {
PADDLE_ASSERT(x_stride == 1 && out_stride == 1);
PADDLE_ASSERT_MSG(x_stride == 1,
"When i:%d == rank:%d - 1, x_stride of random_crop_op "
"expected to be 1, but got %ld. Please check input "
"value.",
i, rank, x_stride);
PADDLE_ASSERT_MSG(out_stride == 1,
"When i:%d == rank:%d - 1, out_stride of random_crop_op "
"expected to be 1, but got %ld. Please check input "
"value.",
i, rank, out_stride);
x += offset_i;
for (size_t j = 0; j < out_dim_i; ++j) {
*out++ = *x++;
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <unordered_set>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
......@@ -33,7 +34,8 @@ using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T>
struct TolerableValue {
HOSTDEVICE T operator()(const T& x) const {
PADDLE_ASSERT(std::is_floating_point<T>::value);
PADDLE_ASSERT_MSG(std::is_floating_point<T>::value,
"TolerableValue should be float in sample_logits_op.");
const T kApproInf = 1e20;
if (x == INFINITY) return kApproInf;
if (x == -INFINITY) return -kApproInf;
......
......@@ -28,15 +28,6 @@ limitations under the License. */
#define EXIT() throw std::runtime_error("Exception encounter.")
#endif
#define PADDLE_ASSERT(_IS_NOT_ERROR) \
do { \
if (!(_IS_NOT_ERROR)) { \
printf("Exception: %s:%d Assertion `%s` failed.\n", __FILE__, __LINE__, \
TOSTRING(_IS_NOT_ERROR)); \
EXIT(); \
} \
} while (0)
// NOTE: PADDLE_ASSERT is mainly used in CUDA Kernel or HOSTDEVICE function.
#define PADDLE_ASSERT_MSG(_IS_NOT_ERROR, __FORMAT, ...) \
do { \
......
......@@ -48,7 +48,6 @@ if(NOT WITH_GPU OR WIN32)
LIST(REMOVE_ITEM TEST_OPS test_pipeline)
endif()
list(REMOVE_ITEM TEST_OPS test_seq_concat_op) # FIXME(helin): https://github.com/PaddlePaddle/Paddle/issues/8290
list(REMOVE_ITEM TEST_OPS test_modified_huber_loss_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5184
list(REMOVE_ITEM TEST_OPS test_lstm_unit_op) # # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5185
list(REMOVE_ITEM TEST_OPS test_cond_op) # FIXME(qijun): https://github.com/PaddlePaddle/Paddle/issues/5101#issuecomment-339814957
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册