未验证 提交 3669868d 编写于 作者: zhouweiwei2014's avatar zhouweiwei2014 提交者: GitHub

revert reshape 0 represent copy and support perm < 0 for paddle.transpose (#50720)

上级 a5827f0e
......@@ -38,8 +38,8 @@ class TransposeOp : public framework::OperatorWithKernel {
auto x_dims = ctx->GetInputDim("X");
std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
size_t x_rank = x_dims.size();
size_t axis_size = axis.size();
int x_rank = x_dims.size();
int axis_size = axis.size();
// Note: x_rank > axis_size when fuse squeeze2 + transpose2, else x_rank ==
// axis_size
......@@ -53,31 +53,38 @@ class TransposeOp : public framework::OperatorWithKernel {
x_rank,
axis_size));
std::vector<int> formated_axis = axis;
std::vector<int> count(axis_size, 0);
for (size_t i = 0; i < axis_size; i++) {
PADDLE_ENFORCE_GE(axis[i],
0,
for (int i = 0; i < axis_size; i++) {
PADDLE_ENFORCE_LT(axis[i],
axis_size,
platform::errors::InvalidArgument(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]",
axis[i],
i));
PADDLE_ENFORCE_EQ(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1,
true,
"The reduce dim index %d should be in the "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
axis_size,
axis[i]));
PADDLE_ENFORCE_GE(axis[i],
-axis_size,
platform::errors::InvalidArgument(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d",
"The reduce dim index %d should be in the "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
axis[i],
axis_size,
axis[i]));
if (axis[i] < 0) {
formated_axis[i] = axis[i] + axis_size;
}
PADDLE_ENFORCE_EQ(++count[formated_axis[i]],
1,
platform::errors::InvalidArgument(
"Each element of axis should be unique. but "
"axis[%d] is %d appear not only once",
i,
count[axis[i]]));
axis[i]));
}
framework::DDim out_dims(x_dims);
......@@ -94,8 +101,8 @@ class TransposeOp : public framework::OperatorWithKernel {
<< "Rotating Shape in Transpose from: kMKLDNN to: kNHWC output_shape";
}
#endif
for (size_t i = 0; i < axis_size; i++) {
out_dims[i] = x_dims[axis[i]];
for (int i = 0; i < axis_size; i++) {
out_dims[i] = x_dims[formated_axis[i]];
}
ctx->SetOutputDim("Out", out_dims);
}
......
......@@ -1658,25 +1658,17 @@ static phi::DDim ValidateShape(const std::vector<int64_t> shape,
phi::make_ddim(shape),
i));
unk_dim_idx = i;
output_shape[i] = shape[i];
} else if (shape[i] == 0) {
// for 0-Size Tensor, 0 is 0
// for not 0-Size Tensor, 0 represent copy origin shape
if (in_size > 0) {
PADDLE_ENFORCE_LT(
static_cast<int>(i),
in_dims.size(),
phi::errors::InvalidArgument(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d.",
phi::make_ddim(shape),
i,
in_dims,
in_dims.size()));
if (static_cast<int>(i) < in_dims.size()) {
output_shape[i] = in_dims[i];
} else {
output_shape[i] = shape[i];
PADDLE_ENFORCE_EQ(
in_size,
0,
phi::errors::InvalidArgument("If The index of 0 in `shape` >= "
"the input tensor X's dimensions, "
"It can only be Zero-Sized Tensor"));
}
capacity *= output_shape[i];
} else {
......@@ -4233,8 +4225,8 @@ void TransposeInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out) {
auto x_dims = x.dims();
size_t x_rank = x_dims.size();
size_t axis_size = axis.size();
int x_rank = x_dims.size();
int axis_size = axis.size();
PADDLE_ENFORCE_EQ(
x_rank,
......@@ -4246,36 +4238,43 @@ void TransposeInferMeta(const MetaTensor& x,
x_rank,
axis_size));
std::vector<int> formated_axis = axis;
std::vector<int> count(axis_size, 0);
for (size_t i = 0; i < axis_size; i++) {
PADDLE_ENFORCE_GE(
axis[i],
0,
errors::InvalidArgument("The axis should be greater than or equal to 0."
"But received %d of axis[%d]",
axis[i],
i));
PADDLE_ENFORCE_EQ(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1,
true,
for (int i = 0; i < axis_size; i++) {
PADDLE_ENFORCE_LT(axis[i],
x_rank,
errors::InvalidArgument(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d",
"The reduce dim index %d should be in the "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
axis[i],
axis_size,
x_rank,
axis[i]));
PADDLE_ENFORCE_GE(axis[i],
-x_rank,
errors::InvalidArgument(
"The reduce dim index %d should be in the "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
count[axis[i]]));
x_rank,
axis[i]));
if (axis[i] < 0) {
formated_axis[i] = axis[i] + x_rank;
}
PADDLE_ENFORCE_EQ(
++count[formated_axis[i]],
1,
errors::InvalidArgument("Each element of axis should be unique. but "
"axis[%d] is %d appear not only once",
i,
axis[i]));
}
phi::DDim out_dims(x_dims);
for (size_t i = 0; i < axis_size; ++i) {
out_dims[i] = x_dims[axis[i]];
for (int i = 0; i < axis_size; ++i) {
out_dims[i] = x_dims[formated_axis[i]];
}
out->set_dims(out_dims);
......@@ -4285,9 +4284,17 @@ void TransposeInferMeta(const MetaTensor& x,
void TransposeGradInferMeta(const MetaTensor& x,
const std::vector<int>& axis,
MetaTensor* out) {
std::vector<int> reversed_axis(axis);
size_t x_rank = x.dims().size();
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
if (axis[i] < 0) {
formated_axis[i] = axis[i] + x_rank;
}
}
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < formated_axis.size(); i++) {
reversed_axis[formated_axis[i]] = i;
}
TransposeInferMeta(x, reversed_axis, out);
......
......@@ -20,7 +20,6 @@
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/transpose_grad_kernel_impl.h"
namespace phi {
......@@ -29,45 +28,54 @@ void TransposeKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
size_t x_rank = x.dims().size();
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis.size(); i++) {
if (axis[i] < 0) {
formated_axis[i] = axis[i] + x_rank;
}
}
ctx.template Alloc<T>(out);
if (out->numel() == 0) {
return;
}
int rank = axis.size();
int rank = formated_axis.size();
switch (rank) {
case 0:
phi::Copy<Context>(ctx, x, ctx.GetPlace(), false, out);
break;
case 1:
funcs::Transpose<Context, T, 1> trans1;
trans1(ctx, x, out, axis);
trans1(ctx, x, out, formated_axis);
break;
case 2:
funcs::Transpose<Context, T, 2> trans2;
trans2(ctx, x, out, axis);
trans2(ctx, x, out, formated_axis);
break;
case 3:
funcs::Transpose<Context, T, 3> trans3;
trans3(ctx, x, out, axis);
trans3(ctx, x, out, formated_axis);
break;
case 4:
funcs::Transpose<Context, T, 4> trans4;
trans4(ctx, x, out, axis);
trans4(ctx, x, out, formated_axis);
break;
case 5:
funcs::Transpose<Context, T, 5> trans5;
trans5(ctx, x, out, axis);
trans5(ctx, x, out, formated_axis);
break;
case 6:
funcs::Transpose<Context, T, 6> trans6;
trans6(ctx, x, out, axis);
trans6(ctx, x, out, formated_axis);
break;
default:
// for rank >= 7 situation
funcs::TransposeNormal<Context, T> trans_normal;
trans_normal(ctx, x, out, axis);
trans_normal(ctx, x, out, formated_axis);
}
}
} // namespace phi
PD_REGISTER_KERNEL(transpose,
......
......@@ -30,15 +30,23 @@ void TransposeKernel(const Context& ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
size_t x_rank = x.dims().size();
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis.size(); i++) {
if (axis[i] < 0) {
formated_axis[i] = axis[i] + x_rank;
}
}
ctx.template Alloc<T>(out);
if (out->numel() == 0) {
return;
}
if (axis.size() == 0) {
if (formated_axis.size() == 0) {
phi::Copy<Context>(ctx, x, ctx.GetPlace(), false, out);
return;
}
phi::funcs::TransposeGPUKernelDriver<T>(ctx, x, axis, out);
phi::funcs::TransposeGPUKernelDriver<T>(ctx, x, formated_axis, out);
}
} // namespace phi
......
......@@ -25,11 +25,18 @@ void TransposeGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const std::vector<int>& axis,
DenseTensor* x_grad) {
std::vector<int> reversed_axis(axis);
size_t axis_size = axis.size();
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis_size; i++) {
if (axis[i] < 0) {
formated_axis[i] = axis[i] + axis_size;
}
}
std::vector<int> reversed_axis(axis);
dev_ctx.template Alloc<T>(x_grad);
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
for (size_t i = 0; i < axis_size; i++) {
reversed_axis[formated_axis[i]] = i;
}
TransposeKernel<T, Context>(dev_ctx, out_grad, reversed_axis, x_grad);
......
......@@ -29,25 +29,31 @@ void TransposeGradKernel(const Context& dev_ctx,
if (x_grad->numel() == 0) {
return;
}
if (axis.size() == 0) {
size_t axis_size = axis.size();
if (axis_size == 0) {
phi::Copy<Context>(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad);
return;
}
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < axis.size(); i++) {
reversed_axis[axis[i]] = i;
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis_size; i++) {
if (axis[i] < 0) {
formated_axis[i] = axis[i] + axis_size;
}
}
int ndims = axis.size();
std::vector<int> out_shape_host(ndims, 0);
for (int i = 0; i < ndims; ++i) {
out_shape_host[i] = out_grad.dims()[i];
std::vector<int> reversed_axis(axis);
for (size_t i = 0; i < axis_size; i++) {
reversed_axis[formated_axis[i]] = i;
}
std::vector<int> out_grad_dim_vec = phi::vectorize<int>(out_grad.dims());
int r = xpu::transpose<XPUType>(
dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(out_grad.data<T>()),
reinterpret_cast<XPUType*>(x_grad->data<T>()),
out_shape_host,
out_grad_dim_vec,
reversed_axis);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "transpose_grad");
}
......
......@@ -24,26 +24,31 @@ void TransposeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
size_t x_rank = x.dims().size();
std::vector<int> formated_axis = axis;
for (size_t i = 0; i < axis.size(); i++) {
if (axis[i] < 0) {
formated_axis[i] = axis[i] + x_rank;
}
}
using XPUType = typename XPUTypeTrait<T>::Type;
dev_ctx.template Alloc<T>(out);
if (out->numel() == 0) {
return;
}
if (axis.size() == 0) {
if (formated_axis.size() == 0) {
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, out);
return;
}
int ndims = axis.size();
std::vector<int> x_shape_host(ndims, 0);
for (int i = 0; i < ndims; ++i) {
x_shape_host[i] = x.dims()[i];
}
std::vector<int> x_dim_vec = phi::vectorize<int>(x.dims());
int r = xpu::transpose<XPUType>(dev_ctx.x_context(),
reinterpret_cast<const XPUType*>(x.data<T>()),
reinterpret_cast<XPUType*>(out->data<T>()),
x_shape_host,
axis);
x_dim_vec,
formated_axis);
PADDLE_ENFORCE_XDNN_SUCCESS(r, "transpose");
}
......
......@@ -117,6 +117,12 @@ class TestCase9(TestTransposeOp):
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
class TestCase10(TestTransposeOp):
def initTestCase(self):
self.shape = (10, 8, 2)
self.axis = (-1, 1, -3)
class TestCase_ZeroDim(TestTransposeOp):
def initTestCase(self):
self.shape = ()
......
......@@ -127,5 +127,11 @@ class TestCase9(TestXPUTransposeOp):
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
class TestCase10(TestXPUTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2)
self.axis = (-1, 1, -3)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册