未验证 提交 f74f9762 编写于 作者: T Thomas Young 提交者: GitHub

fix the XXX_GRAD_CASE bug by HexToString (#32004)

上级 297290a8
...@@ -25,7 +25,14 @@ limitations under the License. */ ...@@ -25,7 +25,14 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \ #define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
ExpandAs<n + 1>(context); \ ExpandAs<n + 1>(context); \
...@@ -33,10 +40,10 @@ limitations under the License. */ ...@@ -33,10 +40,10 @@ limitations under the License. */
} }
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) #define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \ #define EXPAND_AS_GRAD_CASE(n) \
case n: { \ case n + 1: { \
ExpandAsBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \ break; \
} }
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \ #define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), ) BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
...@@ -145,6 +152,18 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -145,6 +152,18 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
framework::TensorCopy(*in0, context.GetPlace(), context.device_context(), framework::TensorCopy(*in0, context.GetPlace(), context.device_context(),
out0); out0);
} else { } else {
PADDLE_ENFORCE_GE(dims, 1,
platform::errors::InvalidArgument(
"The rank of the input 'Out@GRAD' for "
"expand_as_grad op must be greater than or "
"equal to 1, but the value received is %d.",
dims));
PADDLE_ENFORCE_LE(dims, MAX_RANK_SUPPORTED,
platform::errors::InvalidArgument(
"The rank of the input 'Out@GRAD' for "
"expand_as_grad op must be less than or equal "
"to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default: default:
......
...@@ -26,7 +26,14 @@ limitations under the License. */ ...@@ -26,7 +26,14 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \ #define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
ExpandAs<n + 1>(context); \ ExpandAs<n + 1>(context); \
...@@ -34,10 +41,10 @@ limitations under the License. */ ...@@ -34,10 +41,10 @@ limitations under the License. */
} }
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) #define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \ #define EXPAND_AS_GRAD_CASE(n) \
case n: { \ case n + 1: { \
ExpandAsBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \ break; \
} }
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \ #define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), ) BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
...@@ -178,7 +185,14 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> { ...@@ -178,7 +185,14 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> {
"expand_as_v2_grad op must be less than or equal " "expand_as_v2_grad op must be less than or equal "
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) } switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"received tensor's rank = %d.",
dims));
}
} }
} }
......
...@@ -28,7 +28,14 @@ limitations under the License. */ ...@@ -28,7 +28,14 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_TEMPLATE(z, n, data) \ #define EXPAND_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
Expand<n + 1>(context); \ Expand<n + 1>(context); \
...@@ -36,10 +43,10 @@ limitations under the License. */ ...@@ -36,10 +43,10 @@ limitations under the License. */
} }
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~) #define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_GRAD_CASE(n) \ #define EXPAND_GRAD_CASE(n) \
case n: { \ case n + 1: { \
ExpandBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ ExpandBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \ break; \
} }
#define EXPAND_GRAD_TEMPLATE(z, n, data) \ #define EXPAND_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), ) BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
...@@ -219,7 +226,14 @@ class ExpandGradKernel : public framework::OpKernel<T> { ...@@ -219,7 +226,14 @@ class ExpandGradKernel : public framework::OpKernel<T> {
"for Op(expand_grad) must be less than or equal " "for Op(expand_grad) must be less than or equal "
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) } switch (dims) {
REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"received tensor's rank = %d.",
dims));
}
} }
} }
......
...@@ -29,7 +29,14 @@ limitations under the License. */ ...@@ -29,7 +29,14 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_TEMPLATE(z, n, data) \ #define EXPAND_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
Expand<n + 1>(context); \ Expand<n + 1>(context); \
...@@ -37,10 +44,10 @@ limitations under the License. */ ...@@ -37,10 +44,10 @@ limitations under the License. */
} }
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~) #define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_GRAD_CASE(n) \ #define EXPAND_GRAD_CASE(n) \
case n: { \ case n + 1: { \
ExpandBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ ExpandBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \ break; \
} }
#define EXPAND_GRAD_TEMPLATE(z, n, data) \ #define EXPAND_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), ) BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
...@@ -263,7 +270,14 @@ class ExpandV2GradKernel : public framework::OpKernel<T> { ...@@ -263,7 +270,14 @@ class ExpandV2GradKernel : public framework::OpKernel<T> {
"expand_v2_grad op must be less than or equal " "expand_v2_grad op must be less than or equal "
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) } switch (dims) {
REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"received tensor's rank = %d.",
dims));
}
} }
} }
......
...@@ -29,7 +29,14 @@ ...@@ -29,7 +29,14 @@
#include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/errors.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define MESHGRID_TEMPLATE(z, n, data) \ #define MESHGRID_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
MeshgridForward<n + 1>(context); \ MeshgridForward<n + 1>(context); \
...@@ -38,10 +45,10 @@ ...@@ -38,10 +45,10 @@
#define REP_MESHGRID_TEMPLATE(n) BOOST_PP_REPEAT(n, MESHGRID_TEMPLATE, ~) #define REP_MESHGRID_TEMPLATE(n) BOOST_PP_REPEAT(n, MESHGRID_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define MESHGRID_GRAD_CASE(n) \ #define MESHGRID_GRAD_CASE(n) \
case n: { \ case n + 1: { \
MeshgridBackward<n>(context); \ MeshgridBackward<n + 1>(context); \
break; \ break; \
} }
#define MESHGRID_GRAD_TEMPLATE(z, n, data) \ #define MESHGRID_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), MESHGRID_GRAD_CASE(n), ) BOOST_PP_IF(COND(n), MESHGRID_GRAD_CASE(n), )
......
...@@ -29,7 +29,14 @@ limitations under the License. */ ...@@ -29,7 +29,14 @@ limitations under the License. */
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define TILE_TEMPLATE(z, n, data) \ #define TILE_TEMPLATE(z, n, data) \
case n + 1: { \ case n + 1: { \
Tile<n + 1>(context); \ Tile<n + 1>(context); \
...@@ -37,10 +44,10 @@ limitations under the License. */ ...@@ -37,10 +44,10 @@ limitations under the License. */
} }
#define REP_TILE_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_TEMPLATE, ~) #define REP_TILE_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define TILE_GRAD_CASE(n) \ #define TILE_GRAD_CASE(n) \
case n: { \ case n + 1: { \
TileBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ TileBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \ break; \
} }
#define TILE_GRAD_TEMPLATE(z, n, data) BOOST_PP_IF(COND(n), TILE_GRAD_CASE(n), ) #define TILE_GRAD_TEMPLATE(z, n, data) BOOST_PP_IF(COND(n), TILE_GRAD_CASE(n), )
#define REP_TILE_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_GRAD_TEMPLATE, ~) #define REP_TILE_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_GRAD_TEMPLATE, ~)
...@@ -243,7 +250,14 @@ class TileGradKernel : public framework::OpKernel<T> { ...@@ -243,7 +250,14 @@ class TileGradKernel : public framework::OpKernel<T> {
"must be less than or equal " "must be less than or equal "
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { REP_TILE_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) } switch (dims) {
REP_TILE_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"received tensor's rank = %d.",
dims));
}
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册