提交 eb12cbe7 编写于 作者: G guosheng

Refine reshape_op infershape

上级 a6e64242
......@@ -17,93 +17,6 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ReshapeOp : public framework::OperatorWithKernel {
public:
ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of ReshapeOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of ReshapeOp should not be null.");
const std::vector<int> &shape = ctx->Attrs().Get<std::vector<int>>("shape");
PADDLE_ENFORCE(!shape.empty(),
"The shape information must be set by Attr(shape).");
std::vector<int64_t> output_shape;
auto x_dims = ctx->GetInputDim("X");
bool need_copy_dim = ValidateShape(shape, x_dims, output_shape);
if (need_copy_dim) {
// Some dimensions can only be determined during runtime. Here temporarily
// set output tensor's shape the same as that of the input tensor.
ctx->SetOutputDim("Out", x_dims);
} else {
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
}
// NOTE: Reshape op cannot reshape an input sequence batch into an output
// sequence batch that has a different number of time steps.
// Here output always shares the LoD information with input. But if
// Attr(shape) contains 0 or -1, the actual output shape can only be
// determined during runtime. The check for wheather it is a valid output
// sequence batch is performed in runtime.
ctx->ShareLoD("X", /*->*/ "Out");
}
private:
bool ValidateShape(const std::vector<int> &shape,
const framework::DDim &input_dim,
std::vector<int64_t> &output_shape) const {
// only one dimension can be set to -1, whose size will be automatically
// infered.
const int64_t unknown_index = -1;
const auto in_size = framework::product(input_dim);
const auto x_rank = input_dim.size();
bool need_dim_copy = false;
std::vector<size_t> neg_dims_idx;
for (size_t i = 0; i < shape.size(); ++i) {
PADDLE_ENFORCE(shape[i] >= 0 || shape[i] == unknown_index,
"Each input dimension of Attr(shape) must be positive, or "
"only one input dimension can be -1.");
if (shape[i] == unknown_index) {
neg_dims_idx.push_back(i);
} else if (shape[i] == 0) {
PADDLE_ENFORCE_LT(
i, x_rank,
"Only dimension less than rank of Input(X) can be set to 0.");
need_dim_copy = true;
}
}
PADDLE_ENFORCE_LE(
neg_dims_idx.size(), 1,
"Only one input dimension of Attr(shape) can be unknown.");
output_shape.resize(shape.size(), 0);
std::transform(shape.begin(), shape.end(), output_shape.begin(),
[](int a) { return static_cast<int64_t>(a); });
// some dimension can only be determinted during runtime.
if (need_dim_copy) return need_dim_copy;
int64_t inferred_dim = 0;
if (neg_dims_idx.size()) {
int64_t capacity = std::accumulate(shape.begin(), shape.end(), 1,
std::multiplies<int>());
inferred_dim = in_size / (-capacity);
PADDLE_ENFORCE_EQ(inferred_dim * (-capacity), in_size,
"Invalid shape is given.");
output_shape[neg_dims_idx[0]] = inferred_dim;
}
return false;
}
};
class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker)
......@@ -150,7 +63,7 @@ the actual dimension value will be infered from the total element number of
Input(X) and remaining dimensions.
1. More than one dimensions in Attr(shape) can be set to 0, which means the real
dimension value will be copied from Input(X) at runtime. Note that the index of
0 can not access Rank(X). For example, Input(X) is a 3-D tensor with shape
0 can not exceed Rank(X). For example, Input(X) is a 3-D tensor with shape
[2, 3, 4], Attr(shape) = [2, 3, 2, 0] is an invalid input.
)DOC");
......
......@@ -20,15 +20,90 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ReshapeOp : public framework::OperatorWithKernel {
public:
ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of ReshapeOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of ReshapeOp should not be null.");
const std::vector<int> &shape = ctx->Attrs().Get<std::vector<int>>("shape");
PADDLE_ENFORCE(!shape.empty(),
"The shape information must be set by Attr(shape).");
std::vector<int64_t> output_shape;
auto x_dims = ctx->GetInputDim("X");
auto out_dims = ValidateShape(shape, x_dims);
ctx->SetOutputDim("Out", out_dims);
// NOTE: Reshape op cannot reshape an input sequence batch into an
// output sequence batch that has a different number of time steps. Here
// output always shares the LoD information with input. But if
// Attr(shape) contains 0 or -1, the actual output shape can only be
// determined during runtime. The check for wheather it is a valid
// output sequence batch is performed in runtime.
ctx->ShareLoD("X", /*->*/ "Out");
}
static framework::DDim ValidateShape(const std::vector<int> shape,
const framework::DDim &in_dims) {
const int64_t in_size = framework::product(in_dims);
// only one dimension canbe set to -1, whose size will be automatically
// infered.
const int64_t unk_dim_val = -1;
const int64_t copy_dim_val = 0;
std::vector<int64_t> output_shape(shape.size(), 0);
int64_t capacity = 1;
int unk_dim_idx = -1;
for (size_t i = 0; i < shape.size(); ++i) {
if (shape[i] == unk_dim_val) {
PADDLE_ENFORCE(
unk_dim_idx == -1,
"Only one input dimension of Attr(shape) can be unknown.");
unk_dim_idx = i;
} else if (shape[i] == copy_dim_val) {
PADDLE_ENFORCE(
static_cast<int>(i) < in_dims.size(),
"The index of dimension to copy from input shape must be less "
"than the size of input shape.");
} else {
PADDLE_ENFORCE(
shape[i] > 0,
"Each input dimension of Attr(shape) must not be negtive except "
"one unknown dimension.");
}
capacity *= (shape[i] ? shape[i] : in_dims[i]);
output_shape[i] =
(shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]);
}
if (unk_dim_idx != -1) {
output_shape[unk_dim_idx] = -in_size / capacity;
PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size,
"Invalid shape is given.");
} else {
PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given.");
}
return framework::make_ddim(output_shape);
}
};
template <typename DeviceContext, typename T>
class ReshapeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out = ctx.Output<framework::LoDTensor>("Out");
auto* in = ctx.Input<framework::LoDTensor>("X");
void Compute(const framework::ExecutionContext &ctx) const {
auto *out = ctx.Output<framework::LoDTensor>("Out");
auto *in = ctx.Input<framework::LoDTensor>("X");
auto out_dims =
ValidateShape(ctx.Attr<std::vector<int>>("shape"), in->dims());
auto out_dims = ReshapeOp::ValidateShape(
ctx.Attr<std::vector<int>>("shape"), in->dims());
if (!in->lod().empty()) {
PADDLE_ENFORCE_EQ(
......@@ -49,42 +124,14 @@ class ReshapeKernel : public framework::OpKernel<T> {
out->Resize(out_dims);
}
}
private:
framework::DDim ValidateShape(const std::vector<int> shape_attr,
const framework::DDim& in_dims) const {
const int64_t in_size = framework::product(in_dims);
// only one dimension canbe set to -1, whose size will be automatically
// infered.
const int64_t unknown_index = -1;
std::vector<int64_t> output_shape(shape_attr.size(), 0);
int64_t capacity = 1;
int neg_dim_idx = -1;
for (size_t i = 0; i < shape_attr.size(); ++i) {
if (shape_attr[i] == unknown_index) neg_dim_idx = i;
capacity *= (shape_attr[i] ? shape_attr[i] : in_dims[i]);
output_shape[i] =
(shape_attr[i] ? static_cast<int64_t>(shape_attr[i]) : in_dims[i]);
}
if (neg_dim_idx != -1) {
output_shape[neg_dim_idx] = -in_size / capacity;
PADDLE_ENFORCE_EQ(output_shape[neg_dim_idx] * capacity, -in_size,
"Invalid shape is given.");
} else {
PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given.");
}
return framework::make_ddim(output_shape);
}
};
template <typename DeviceContext, typename T>
class ReshapeGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
void Compute(const framework::ExecutionContext &ctx) const {
auto *d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto *d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_x->mutable_data<T>(ctx.GetPlace());
bool inplace = ctx.Attr<bool>("inplace");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册