reshape_op.cc 18.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yibing Liu 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yibing Liu 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yibing Liu 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yibing Liu 已提交
14

Y
Yi Wang 已提交
15 16
#include <string>
#include <vector>
Y
yuyang18 已提交
17
#include "paddle/fluid/framework/op_registry.h"
Y
Yi Wang 已提交
18

Y
Yibing Liu 已提交
19 20 21
namespace paddle {
namespace operators {

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
using Tensor = framework::Tensor;

inline std::vector<int> get_new_shape(
    const std::vector<const Tensor *> &list_new_shape_tensor) {
  // get tensor from
  std::vector<int> vec_new_shape;
  for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
    auto tensor = list_new_shape_tensor[i];
    PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
                      "shape of dim tensor should be [1]");
    if (platform::is_gpu_place(tensor->place())) {
      framework::Tensor temp;
      TensorCopySync(*tensor, platform::CPUPlace(), &temp);

      vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
    } else {
      vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
    }
  }

  return vec_new_shape;
}

Y
yuyang18 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57
class ReshapeOp : public framework::OperatorWithKernel {
 public:
  ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs,
            const framework::VariableNameMap &outputs,
            const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"),
                   "Input(X) of ReshapeOp should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of ReshapeOp should not be null.");

58 59 60 61 62 63
    if (ctx->HasInputs("ShapeTensor")) {
      // top prority shape
      auto inputs_name = ctx->Inputs("ShapeTensor");
      PADDLE_ENFORCE(inputs_name.size() > 0, "shape tensor size can't be zero");
      auto out_dims = std::vector<int>(inputs_name.size(), -1);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
Y
yuyang18 已提交
64

65 66
      return;
    }
Y
yuyang18 已提交
67 68 69 70 71 72
    if (ctx->HasInput("Shape") && ctx->IsRuntime()) {
      // If true, set the shape of Output(Out) according to Input(Shape) in
      // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel.
      ctx->ShareLoD("X", /*->*/ "Out");
      return;
    }
73 74 75
    const std::vector<int> &shape = ctx->Attrs().Get<std::vector<int>>("shape");
    PADDLE_ENFORCE(!shape.empty(),
                   "The shape information must be set by Attr(shape).");
Y
yuyang18 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88
    auto x_dims = ctx->GetInputDim("X");
    auto out_dims = ValidateShape(shape, x_dims);
    ctx->SetOutputDim("Out", out_dims);
    if (x_dims[0] == out_dims[0]) {
      // Only pass LoD when the first dimension of output and Input(X)
      // are the same.
      ctx->ShareLoD("X", /*->*/ "Out");
    }
  }

  static framework::DDim ValidateShape(const std::vector<int> shape,
                                       const framework::DDim &in_dims) {
    const int64_t in_size = framework::product(in_dims);
C
chengduo 已提交
89 90 91
    auto in_dims_vec = framework::vectorize(in_dims);
    bool all_positive = std::all_of(in_dims_vec.cbegin(), in_dims_vec.cend(),
                                    [](int64_t i) { return i > 0; });
Y
yuyang18 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
    // only one dimension can be set to -1, whose size will be automatically
    // infered.
    const int64_t unk_dim_val = -1;
    const int64_t copy_dim_val = 0;

    std::vector<int64_t> output_shape(shape.size(), 0);
    int64_t capacity = 1;
    int unk_dim_idx = -1;
    for (size_t i = 0; i < shape.size(); ++i) {
      if (shape[i] == unk_dim_val) {
        PADDLE_ENFORCE(
            unk_dim_idx == -1,
            "Only one input dimension of Attr(shape) can be unknown.");
        unk_dim_idx = i;
      } else if (shape[i] == copy_dim_val) {
        PADDLE_ENFORCE(
            static_cast<int>(i) < in_dims.size(),
            "The index of dimension to copy from input shape must be less "
            "than the size of input shape.");
      } else {
        PADDLE_ENFORCE(
            shape[i] > 0,
            "Each input dimension of Attr(shape) must not be negtive except "
            "one unknown dimension.");
      }

      capacity *= (shape[i] ? shape[i] : in_dims[i]);
      output_shape[i] =
          (shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]);
    }

    if (unk_dim_idx != -1) {
C
chengduo 已提交
124
      if (all_positive) {
Y
yuyang18 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
        // in_size < 0 and is un-determinate in compile time, skip the check,
        // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8],
        // capacity = -24, in_size = -8, output_shape[0] = 0
        // the following check will fail.
        output_shape[unk_dim_idx] = -in_size / capacity;
        PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size,
                          "Invalid shape is given.");
      } else {
        output_shape[unk_dim_idx] = -1;
      }
    } else {
      PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given.");
    }
    return framework::make_ddim(output_shape);
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
Y
Yu Yang 已提交
144 145
    return framework::OpKernelType(ctx.Input<framework::LoDTensor>("X")->type(),
                                   ctx.device_context());
Y
yuyang18 已提交
146
  }
147 148 149 150 151 152 153 154 155 156

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
Y
yuyang18 已提交
157 158
};

Y
Yibing Liu 已提交
159 160
class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
161
  void Make() override {
162 163 164 165 166 167 168
    AddInput("X", "(Tensor). The input tensor of reshape operator.");
    AddInput("Shape",
             "(Tensor<int32>, optional). If provided, reshape according to "
             "this given shape. That is to say it has a higher priority than "
             "the shape attribute, while the shape attribute still should be "
             "set correctly to gurantee shape inference in compile time.")
        .AsDispensable();
169 170 171 172 173 174 175 176
    AddInput(
        "ShapeTensor",
        "(vector<Tensor<int32>>, optional). If provided, reshape will use this"
        "The shape of the tensor in vector MUST BE [1]"
        "it has the highest priority compare with Input(Shape) and "
        "attr(shape).")
        .AsDuplicable()
        .AsDispensable();
177
    AddOutput("Out", "(Tensor). The output tensor of reshape operator.");
C
caoying03 已提交
178
    AddAttr<std::vector<int>>(
179 180
        "shape", "(std::vector<int>) Target shape of reshape operator.")
        .SetDefault({});
K
kexinzhao 已提交
181 182
    AddComment(R"DOC(
Reshape Operator.
Y
Yibing Liu 已提交
183

184 185
Reshape Input(X) into the shape specified by Attr(shape) or Input(Shape). The
data in Input(X) are unchanged.
Y
Yibing Liu 已提交
186

C
caoying03 已提交
187
Examples:
Y
Yibing Liu 已提交
188

C
caoying03 已提交
189 190 191 192
1. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
specified by Attr(shape) is [6, 8], the reshape operator will transform Input(X)
into a 2-D tensor with shape [6, 8] and leaving Input(X)'s data unchanged.

193
2. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
C
caoying03 已提交
194 195 196 197 198 199
specified by Attr(shape) is [2, 3, -1, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 3, 4, 2] and leaving Input(X)'s data
unchanged. In this case, one and only dimension of Attr(shape) can be set to -1,
the value of this dimension is inferred from the total element number of
Input(X) and remaining dimensions.

200
3. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
C
caoying03 已提交
201 202 203 204
specified by Attr(shape) is [-1, 0, 3, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 4, 3, 2] and leaving Input(X)'s data
unchanged. In this case, besides -1, 0 means the actual dimension value is going
to be copied from the corresponding dimension of Input(X).
Y
Yibing Liu 已提交
205

C
caoying03 已提交
206
Note:
Y
Yibing Liu 已提交
207

C
caoying03 已提交
208 209 210
1. One and only one dimension in Attr(shape) can be set -1. In this case,
the actual dimension value will be infered from the total element number of
Input(X) and remaining dimensions.
211 212

2. More than one dimensions in Attr(shape) can be set to 0, which means the real
C
caoying03 已提交
213
dimension value will be copied from Input(X) at runtime. Note that the index of
G
guosheng 已提交
214
0 can not exceed Rank(X). For example, Input(X) is a 3-D tensor with shape
C
caoying03 已提交
215
[2, 3, 4], Attr(shape) = [2, 3, 2, 0] is an invalid input.
216 217

3. Input(Shape) has a higher priority than Attr(shape) if it is provided, while
M
minqiyang 已提交
218
Attr(shape) still should be set correctly to gurantee shape inference in
219
compile-time.
Y
Yibing Liu 已提交
220

Y
Yibing Liu 已提交
221 222 223 224 225 226 227 228 229 230 231 232
)DOC");
  }
};

class ReshapeGradOp : public framework::OperatorWithKernel {
 public:
  ReshapeGradOp(const std::string &type,
                const framework::VariableNameMap &inputs,
                const framework::VariableNameMap &outputs,
                const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

233
  void InferShape(framework::InferShapeContext *ctx) const override {
Q
Qiao Longfei 已提交
234 235 236 237
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null.");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) shouldn't be null.");
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
Y
Yibing Liu 已提交
238
  }
239 240 241 242

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
Y
Yu Yang 已提交
243 244
    return framework::OpKernelType(ctx.Input<framework::LoDTensor>("X")->type(),
                                   ctx.device_context());
245
  }
Y
Yibing Liu 已提交
246 247
};

Y
yuyang18 已提交
248 249 250 251 252
class ReshapeKernel {
 public:
  void operator()(const framework::ExecutionContext &ctx) const {
    auto *out = ctx.Output<framework::LoDTensor>("Out");
    auto *in = ctx.Input<framework::LoDTensor>("X");
Y
yuyang18 已提交
253

Y
yuyang18 已提交
254
    framework::DDim out_dims = out->dims();
Y
yuyang18 已提交
255

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
    auto list_new_shape_tensor =
        ctx.MultiInput<framework::Tensor>("ShapeTensor");
    if (list_new_shape_tensor.size() > 0) {
      // have shape tensor
      auto new_shape = get_new_shape(list_new_shape_tensor);
      out_dims = ReshapeOp::ValidateShape(new_shape, in->dims());

    } else {
      auto *shape_tensor = ctx.HasInput("Shape")
                               ? ctx.Input<framework::LoDTensor>("Shape")
                               : nullptr;

      if (shape_tensor) {
        auto *shape_data = shape_tensor->data<int>();
        framework::Tensor cpu_shape_tensor;
        if (platform::is_gpu_place(shape_tensor->place())) {
          TensorCopySync(*shape_tensor, platform::CPUPlace(),
                         &cpu_shape_tensor);
          shape_data = cpu_shape_tensor.data<int>();
        }
        auto shape =
            std::vector<int>(shape_data, shape_data + shape_tensor->numel());
        out_dims = ReshapeOp::ValidateShape(shape, in->dims());
Y
yuyang18 已提交
279 280
      }
    }
Y
yuyang18 已提交
281

282
    out->Resize(out_dims);
283
    out->mutable_data(ctx.GetPlace(), in->type());
Y
Yiqun Liu 已提交
284 285 286
    framework::TensorCopy(
        *in, ctx.GetPlace(),
        ctx.template device_context<platform::DeviceContext>(), out);
Y
yuyang18 已提交
287 288
    out->Resize(out_dims);
  }
Y
yuyang18 已提交
289 290 291 292 293 294 295
};

class ReshapeGradKernel {
 public:
  void operator()(const framework::ExecutionContext &ctx) const {
    auto *d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto *d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
D
dzhwinter 已提交
296
    auto in_dims = d_x->dims();
Y
yuyang18 已提交
297

298 299
    d_x->mutable_data(ctx.GetPlace(), d_out->type());
    framework::TensorCopySync(*d_out, ctx.GetPlace(), d_x);
D
dzhwinter 已提交
300
    d_x->Resize(in_dims);
Y
yuyang18 已提交
301
  }
Y
yuyang18 已提交
302 303
};

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
// FIXME(zcd): reshape2 adds an intermediate output(XShape) based on reshape,
// the XShape is used to carry the shape and lod of X which will be used in
// reshape_grad, in this way, the framework can reuse the memory of X
// immediately the reshape_op is finished.
// Considering compatibility issues, we could not fix reshape_op
class Reshape2Op : public ReshapeOp {
 public:
  Reshape2Op(const std::string &type, const framework::VariableNameMap &inputs,
             const framework::VariableNameMap &outputs,
             const framework::AttributeMap &attrs)
      : ReshapeOp(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasOutput("XShape"),
                   "Output(XShape) of ReshapeOp should not be null.");
    const auto &x_dims = ctx->GetInputDim("X");
    std::vector<int64_t> xshape_dims(x_dims.size() + 1);
    xshape_dims[0] = 0;
    for (int i = 0; i < x_dims.size(); ++i) {
      xshape_dims[i + 1] = x_dims[i];
    }
    ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims));
    ctx->ShareLoD("X", /*->*/ "XShape");
M
minqiyang 已提交
327 328

    ReshapeOp::InferShape(ctx);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
  }
};

class Reshape2OpMaker : public ReshapeOpMaker {
 public:
  void Make() override {
    ReshapeOpMaker::Make();
    AddOutput("XShape",
              "XShape is just used to store the shape and lod of X, which will "
              "be used in FlattenGradOp.")
        .AsIntermediate();
  }
};

class Reshape2GradMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

  std::unique_ptr<framework::OpDesc> Apply() const override {
    auto *grad_op = new framework::OpDesc();
    grad_op->SetType("reshape2_grad");
    grad_op->SetInput("XShape", Output("XShape"));
351
    grad_op->SetInput("ShapeTensor", Input("ShapeTensor"));
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
    grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    grad_op->SetAttrMap(Attrs());
    return std::unique_ptr<framework::OpDesc>(grad_op);
  }
};

class Reshape2GradOp : public framework::OperatorWithKernel {
 public:
  Reshape2GradOp(const std::string &type,
                 const framework::VariableNameMap &inputs,
                 const framework::VariableNameMap &outputs,
                 const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) shouldn't be null.");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) shouldn't be null.");
    auto xshape_dims = ctx->GetInputDim("XShape");
    auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    ctx->ShareLoD("XShape", framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    return framework::OpKernelType(
Y
Yu Yang 已提交
381
        ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->type(),
382 383
        ctx.device_context());
  }
384 385 386 387 388 389 390 391 392 393

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
394 395
};

L
liuwei1031 已提交
396
class ReshapeOpInplaceInToOut : public framework::InplaceOpInference {
D
dzhwinter 已提交
397
 public:
L
liuwei1031 已提交
398
  std::unordered_map<std::string, std::string> operator()(
399 400
      const framework::OpDesc &op_desc, bool use_cuda) const override {
    return {{"X", "Out"}};
D
dzhwinter 已提交
401 402 403
  }
};

L
liuwei1031 已提交
404 405 406
class ReshapeGradInplaceInToOut : public framework::InplaceOpInference {
 public:
  std::unordered_map<std::string, std::string> operator()(
407 408
      const framework::OpDesc &op_desc, bool use_cuda) const override {
    return {{framework::GradVarName("Out"), framework::GradVarName("X")}};
D
dzhwinter 已提交
409 410 411
  }
};

Y
Yibing Liu 已提交
412 413 414
}  // namespace operators
}  // namespace paddle
namespace ops = paddle::operators;
415
namespace plat = paddle::platform;
Y
Yibing Liu 已提交
416

Y
Yang Yang 已提交
417
REGISTER_OPERATOR(reshape, ops::ReshapeOp, ops::ReshapeOpMaker,
D
dzhwinter 已提交
418 419 420 421
                  paddle::framework::DefaultGradOpDescMaker<true>,
                  ops::ReshapeOpInplaceInToOut);
REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp,
                  ops::ReshapeGradInplaceInToOut);
422 423 424 425 426 427 428
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
                               ops::ReshapeKernel, int, ops::ReshapeKernel,
                               int64_t, ops::ReshapeKernel);
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
                               double, ops::ReshapeGradKernel, int,
                               ops::ReshapeGradKernel, int64_t,
                               ops::ReshapeGradKernel);
Y
yuyang18 已提交
429

430
REGISTER_OPERATOR(reshape2, ops::Reshape2Op, ops::Reshape2OpMaker,
D
dzhwinter 已提交
431 432 433
                  ops::Reshape2GradMaker, ops::ReshapeOpInplaceInToOut);
REGISTER_OPERATOR(reshape2_grad, ops::Reshape2GradOp,
                  ops::ReshapeGradInplaceInToOut);
434 435 436 437 438 439 440
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double,
                               ops::ReshapeKernel, int, ops::ReshapeKernel,
                               int64_t, ops::ReshapeKernel);
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel,
                               double, ops::ReshapeGradKernel, int,
                               ops::ReshapeGradKernel, int64_t,
                               ops::ReshapeGradKernel);
441

Y
yuyang18 已提交
442
#ifdef PADDLE_WITH_CUDA
443 444
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
                                ops::ReshapeKernel, int, ops::ReshapeKernel,
445 446
                                int64_t, ops::ReshapeKernel, plat::float16,
                                ops::ReshapeKernel);
447 448 449
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
                                double, ops::ReshapeGradKernel, int,
                                ops::ReshapeGradKernel, int64_t,
450
                                ops::ReshapeGradKernel, plat::float16,
451 452 453
                                ops::ReshapeGradKernel);
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double,
                                ops::ReshapeKernel, int, ops::ReshapeKernel,
454 455
                                int64_t, ops::ReshapeKernel, plat::float16,
                                ops::ReshapeKernel);
456 457 458
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel,
                                double, ops::ReshapeGradKernel, int,
                                ops::ReshapeGradKernel, int64_t,
459
                                ops::ReshapeGradKernel, plat::float16,
460
                                ops::ReshapeGradKernel);
Y
yuyang18 已提交
461
#endif