reshape_op.cc 34.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yibing Liu 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yibing Liu 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yibing Liu 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yibing Liu 已提交
14

Y
Yi Wang 已提交
15
#include <string>
W
wanghuancoder 已提交
16

Y
yuyang18 已提交
17
#include "paddle/fluid/framework/op_registry.h"
18
#include "paddle/fluid/framework/pten_utils.h"
Y
Yi Wang 已提交
19

20 21
// only can include the headers in paddle/pten/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h"
22
#include "paddle/pten/common/scalar_array.h"
23
#include "paddle/pten/kernels/reshape_grad_kernel.h"
24
#include "paddle/pten/kernels/reshape_kernel.h"
W
wanghuancoder 已提交
25 26 27 28 29 30 31 32 33 34
namespace paddle {
namespace framework {
class InferShapeContext;
class OpDesc;
}  // namespace framework
namespace imperative {
class OpBase;
}  // namespace imperative
}  // namespace paddle

Y
Yibing Liu 已提交
35 36 37
namespace paddle {
namespace operators {

38 39 40 41 42 43 44 45
using Tensor = framework::Tensor;

inline std::vector<int> get_new_shape(
    const std::vector<const Tensor *> &list_new_shape_tensor) {
  // get tensor from
  std::vector<int> vec_new_shape;
  for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
    auto tensor = list_new_shape_tensor[i];
46 47
    PADDLE_ENFORCE_EQ(
        tensor->dims(), framework::make_ddim({1}),
48 49 50 51 52
        platform::errors::InvalidArgument(
            "If the element type of 'shape' in ReshapeOp is Tensor, "
            "the element's shape must be [1]. But received the element's shape "
            "is [%s]",
            tensor->dims()));
53 54
    if (platform::is_gpu_place(tensor->place()) ||
        platform::is_xpu_place(tensor->place())) {
55
      framework::Tensor temp;
56
      paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp);
57 58 59 60 61 62 63 64 65 66

      vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
    } else {
      vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
    }
  }

  return vec_new_shape;
}

Y
yuyang18 已提交
67 68 69 70 71 72 73 74
class ReshapeOp : public framework::OperatorWithKernel {
 public:
  ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs,
            const framework::VariableNameMap &outputs,
            const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
75
    PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
76 77
                      platform::errors::InvalidArgument(
                          "Input(X) of ReshapeOp should not be null."));
78
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
79 80
                      platform::errors::InvalidArgument(
                          "Output(Out) of ReshapeOp should not be null."));
Y
yuyang18 已提交
81

82 83
    if (ctx->HasInputs("ShapeTensor")) {
      // top prority shape
84
      auto ShapeTensor = ctx->Inputs("ShapeTensor");
85 86
      PADDLE_ENFORCE_GT(
          ShapeTensor.size(), 0,
87 88 89 90 91
          platform::errors::InvalidArgument(
              "When `shape` in ReshapeOp is a list or tuple "
              "which contains Tensor, the shape's size can't be zero. "
              "But received shape's size is %d.",
              ShapeTensor.size()));
92 93 94 95 96 97 98
      auto infer_shape = ctx->Attrs().Get<std::vector<int>>("shape");
      const int64_t copy_dim_val = 0;
      auto in_dims = ctx->GetInputDim("X");
      for (size_t i = 0; i < infer_shape.size(); ++i) {
        if (infer_shape[i] == copy_dim_val) {
          PADDLE_ENFORCE_LT(
              static_cast<int>(i), in_dims.size(),
99 100 101 102 103
              platform::errors::InvalidArgument(
                  "The index of 0 in `shape` must be less than "
                  "the input tensor X's dimensions. But received shape[%d] "
                  "= 0, X's dimensions = %d, X's shape = [%s].",
                  i, in_dims.size(), in_dims));
104 105 106 107 108 109 110
          infer_shape[i] = in_dims[i];
        }
      }
      auto infer_out_dims = framework::make_ddim(infer_shape);
      ctx->SetOutputDim("Out", infer_out_dims);
      return;
    }
Y
yuyang18 已提交
111

112 113 114 115 116 117 118 119 120 121 122
    const std::vector<int> &shape = ctx->Attrs().Get<std::vector<int>>("shape");
    if (ctx->HasInput("Shape") && shape.empty()) {
      auto shape_dims = ctx->GetInputDim("Shape");
      int num_ele = 1;
      for (int i = 0; i < shape_dims.size(); ++i) {
        num_ele *= shape_dims[i];
      }
      auto vec_dims = std::vector<int>(num_ele, -1);
      auto out_dims = framework::make_ddim(vec_dims);
      ctx->SetOutputDim("Out", out_dims);
      ctx->ShareLoD("X", /*->*/ "Out");
123 124
      return;
    }
125 126

    if (ctx->HasInput("Shape") && !shape.empty() && ctx->IsRuntime()) {
Y
yuyang18 已提交
127 128 129 130 131
      // If true, set the shape of Output(Out) according to Input(Shape) in
      // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel.
      ctx->ShareLoD("X", /*->*/ "Out");
      return;
    }
132

133 134 135 136
    PADDLE_ENFORCE_EQ(!shape.empty(), true,
                      platform::errors::InvalidArgument(
                          "The parameter 'shape' in ReshapeOp must be set. "
                          "But received 'shape' is empty."));
Y
yuyang18 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149
    auto x_dims = ctx->GetInputDim("X");
    auto out_dims = ValidateShape(shape, x_dims);
    ctx->SetOutputDim("Out", out_dims);
    if (x_dims[0] == out_dims[0]) {
      // Only pass LoD when the first dimension of output and Input(X)
      // are the same.
      ctx->ShareLoD("X", /*->*/ "Out");
    }
  }

  static framework::DDim ValidateShape(const std::vector<int> shape,
                                       const framework::DDim &in_dims) {
    const int64_t in_size = framework::product(in_dims);
C
chengduo 已提交
150 151 152
    auto in_dims_vec = framework::vectorize(in_dims);
    bool all_positive = std::all_of(in_dims_vec.cbegin(), in_dims_vec.cend(),
                                    [](int64_t i) { return i > 0; });
Y
yuyang18 已提交
153 154 155 156 157 158 159 160 161 162
    // only one dimension can be set to -1, whose size will be automatically
    // infered.
    const int64_t unk_dim_val = -1;
    const int64_t copy_dim_val = 0;

    std::vector<int64_t> output_shape(shape.size(), 0);
    int64_t capacity = 1;
    int unk_dim_idx = -1;
    for (size_t i = 0; i < shape.size(); ++i) {
      if (shape[i] == unk_dim_val) {
163 164
        PADDLE_ENFORCE_EQ(
            unk_dim_idx, -1,
165 166 167 168
            platform::errors::InvalidArgument(
                "Only one dimension value of 'shape' in ReshapeOp can "
                "be -1. But received shape = [%s], shape[%d] is also -1.",
                framework::make_ddim(shape), i));
Y
yuyang18 已提交
169 170
        unk_dim_idx = i;
      } else if (shape[i] == copy_dim_val) {
171 172
        PADDLE_ENFORCE_LT(
            static_cast<int>(i), in_dims.size(),
173 174 175 176 177 178
            platform::errors::InvalidArgument(
                "The index of 0 in `shape` must be less than "
                "the input tensor X's dimensions. "
                "But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
                "X's dimensions = %d.",
                framework::make_ddim(shape), i, in_dims, in_dims.size()));
Y
yuyang18 已提交
179
      } else {
180 181
        PADDLE_ENFORCE_GT(
            shape[i], 0,
182 183
            platform::errors::InvalidArgument(
                "Each dimension value of 'shape' in ReshapeOp must not "
T
tianshuo78520a 已提交
184
                "be negative except one unknown dimension. "
185 186
                "But received  shape = [%s], shape[%d] = %d.",
                framework::make_ddim(shape), i, shape[i]));
Y
yuyang18 已提交
187 188
      }

189 190
      // NOTE all non-zero values will be converted to True (include negative
      // value)
Y
yuyang18 已提交
191 192 193 194 195 196
      capacity *= (shape[i] ? shape[i] : in_dims[i]);
      output_shape[i] =
          (shape[i] ? static_cast<int64_t>(shape[i]) : in_dims[i]);
    }

    if (unk_dim_idx != -1) {
C
chengduo 已提交
197
      if (all_positive) {
Y
yuyang18 已提交
198 199 200 201 202
        // in_size < 0 and is un-determinate in compile time, skip the check,
        // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8],
        // capacity = -24, in_size = -8, output_shape[0] = 0
        // the following check will fail.
        output_shape[unk_dim_idx] = -in_size / capacity;
203 204 205 206 207 208 209
        PADDLE_ENFORCE_EQ(
            output_shape[unk_dim_idx] * capacity, -in_size,
            platform::errors::InvalidArgument(
                "The 'shape' attribute in ReshapeOp is invalid. "
                "The input tensor X'size must be divisible by known "
                "capacity of 'shape'. "
                "But received X's shape = [%s], X's size = %d, "
210
                "'shape' is [%s], known capacity of 'shape' is %d.",
211
                in_dims, in_size, framework::make_ddim(shape), capacity));
Y
yuyang18 已提交
212 213 214 215
      } else {
        output_shape[unk_dim_idx] = -1;
      }
    } else {
Y
Yamei-Lee 已提交
216 217 218
      if (all_positive) {
        PADDLE_ENFORCE_EQ(
            capacity, in_size,
219 220 221 222 223 224 225
            platform::errors::InvalidArgument(
                "The 'shape' in ReshapeOp is invalid. "
                "The input tensor X'size must be equal to the capacity of "
                "'shape'. "
                "But received X's shape = [%s], X's size = %d, 'shape' is "
                "[%s], the capacity of 'shape' is %d.",
                in_dims, in_size, framework::make_ddim(shape), capacity));
Y
Yamei-Lee 已提交
226
      }
Y
yuyang18 已提交
227
    }
228 229 230 231 232

    // support reshape with zero-input(input tensor with product(shape) == 0)
    // by now we require that if the input tensor is zero shape, the target
    // shape of output must be zero
    if (in_size == 0) {
J
JZ-LIANG 已提交
233
      PADDLE_ENFORCE_LE(
234 235 236 237 238 239 240 241 242
          capacity, in_size,
          platform::errors::InvalidArgument(
              "The 'shape' in ReshapeOp is invalid. "
              "The input tensor X's shape = [%s], X's capacity = %d."
              "But the target shape of Out is [%s],  the "
              "capacity of 'Out' is %d.",
              in_dims, in_size, framework::make_ddim(shape), capacity));
    }

Y
yuyang18 已提交
243 244 245 246 247 248
    return framework::make_ddim(output_shape);
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
249 250 251 252
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

    return framework::OpKernelType(input_data_type, ctx.GetPlace());
Y
yuyang18 已提交
253
  }
254 255 256 257 258 259 260 261 262 263

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
Y
yuyang18 已提交
264 265
};

Y
Yibing Liu 已提交
266 267
class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
268
  void Make() override {
269 270
    AddInput("X", "(Tensor). The input tensor of reshape operator.");
    AddInput("Shape",
271 272 273
             "(Tensor<int32>, optional). Target shape of reshape operator. "
             "It has a higher priority than Attr(shape) but a lower priority "
             "than Input(ShapeTensor). The Attr(shape) still should be "
T
tianshuo78520a 已提交
274
             "set correctly to guarantee shape inference in compile time.")
275
        .AsDispensable();
276 277
    AddInput(
        "ShapeTensor",
278 279 280 281
        "(vector<Tensor<int32>>, optional). Target shape of reshape operator. "
        "It has the highest priority compare with Input(Shape) and "
        "Attr(shape)."
        "The shape of the element in vector must be [1].")
282 283
        .AsDuplicable()
        .AsDispensable();
284
    AddOutput("Out", "(Tensor). The output tensor of reshape operator.");
C
caoying03 已提交
285
    AddAttr<std::vector<int>>(
286 287 288 289
        "shape",
        "(std::vector<int>) Target shape of reshape operator."
        "It has the lowest priority compare with Input(Shape) and "
        " Input(ShapeTensor).")
290
        .SetDefault({});
291 292
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
Z
zmx 已提交
293 294
        .SetDefault(false)
        .AsExtra();
K
kexinzhao 已提交
295 296
    AddComment(R"DOC(
Reshape Operator.
Y
Yibing Liu 已提交
297

298 299
Reshape Input(X) into the shape specified by Attr(shape) or Input(Shape). The
data in Input(X) are unchanged.
Y
Yibing Liu 已提交
300

C
caoying03 已提交
301
Examples:
Y
Yibing Liu 已提交
302

C
caoying03 已提交
303 304 305 306
1. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
specified by Attr(shape) is [6, 8], the reshape operator will transform Input(X)
into a 2-D tensor with shape [6, 8] and leaving Input(X)'s data unchanged.

307
2. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
C
caoying03 已提交
308 309 310 311 312 313
specified by Attr(shape) is [2, 3, -1, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 3, 4, 2] and leaving Input(X)'s data
unchanged. In this case, one and only dimension of Attr(shape) can be set to -1,
the value of this dimension is inferred from the total element number of
Input(X) and remaining dimensions.

314
3. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
C
caoying03 已提交
315 316 317 318
specified by Attr(shape) is [-1, 0, 3, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 4, 3, 2] and leaving Input(X)'s data
unchanged. In this case, besides -1, 0 means the actual dimension value is going
to be copied from the corresponding dimension of Input(X).
Y
Yibing Liu 已提交
319

C
caoying03 已提交
320
Note:
Y
Yibing Liu 已提交
321

C
caoying03 已提交
322 323 324
1. One and only one dimension in Attr(shape) can be set -1. In this case,
the actual dimension value will be infered from the total element number of
Input(X) and remaining dimensions.
325 326

2. More than one dimensions in Attr(shape) can be set to 0, which means the real
C
caoying03 已提交
327
dimension value will be copied from Input(X) at runtime. Note that the index of
G
guosheng 已提交
328
0 can not exceed Rank(X). For example, Input(X) is a 3-D tensor with shape
C
caoying03 已提交
329
[2, 3, 4], Attr(shape) = [2, 3, 2, 0] is an invalid input.
330 331

3. Input(Shape) has a higher priority than Attr(shape) if it is provided, while
T
tianshuo78520a 已提交
332
Attr(shape) still should be set correctly to guarantee shape inference in
333
compile-time.
Y
Yibing Liu 已提交
334

Y
Yibing Liu 已提交
335 336 337 338 339 340 341 342 343 344 345 346
)DOC");
  }
};

class ReshapeGradOp : public framework::OperatorWithKernel {
 public:
  ReshapeGradOp(const std::string &type,
                const framework::VariableNameMap &inputs,
                const framework::VariableNameMap &outputs,
                const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

347
  void InferShape(framework::InferShapeContext *ctx) const override {
348 349 350
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("X"), true,
        platform::errors::InvalidArgument("Input(X) shouldn't be null."));
351
    PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
352 353
                      platform::errors::InvalidArgument(
                          "Input(Out@GRAD) shouldn't be null."));
Q
Qiao Longfei 已提交
354
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
Y
Yibing Liu 已提交
355
  }
356 357 358 359

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
360 361 362 363
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

    return framework::OpKernelType(input_data_type, ctx.GetPlace());
364
  }
Y
Yibing Liu 已提交
365 366
};

Y
yuyang18 已提交
367 368 369 370 371
class ReshapeKernel {
 public:
  void operator()(const framework::ExecutionContext &ctx) const {
    auto *out = ctx.Output<framework::LoDTensor>("Out");
    auto *in = ctx.Input<framework::LoDTensor>("X");
372 373 374
    // framework::DDim out_dims = out->dims();
    auto pt_x = paddle::experimental::MakePtenDenseTensor(*in);

375 376 377 378 379 380 381
    // we can't MakePtenDenseTensor by out, because the out of reshape may have
    // multiple states, some can MakePtenDenseTensor but other's cannot:
    // 1. out tensor is not initialized
    // 2. out tensor is input (complete inplace)
    // 3. out tensor is view of input
    // We can't MakePtenDenseTensor for case 2, so we solve this case by
    // creating a temporary tensor here:
382
    pten::DenseTensorMeta meta{pten::TransToPtenDataType(in->type()),
383
                               in->dims(), in->layout()};
384 385 386 387
    auto pt_out_tmp = std::make_shared<pten::DenseTensor>(
        pten::make_intrusive<paddle::experimental::SharedStorage>(
            ctx.GetPlace()),
        std::move(meta));
388
    pten::DenseTensor *pt_out = nullptr;
389 390 391
    if (in != nullptr && out != nullptr && in->Holder() != nullptr &&
        out->Holder() != nullptr &&
        in->Holder()->ptr() == out->Holder()->ptr()) {
392 393 394 395
      pt_out = pt_x.get();
    } else {
      pt_out = pt_out_tmp.get();
    }
Y
yuyang18 已提交
396

397 398
    auto list_new_shape_tensor =
        ctx.MultiInput<framework::Tensor>("ShapeTensor");
399 400 401
    auto *shape_tensor = ctx.HasInput("Shape")
                             ? ctx.Input<framework::LoDTensor>("Shape")
                             : nullptr;
402
    pten::ScalarArray pt_scalar_shape;
403 404
    if (list_new_shape_tensor.size() > 0) {
      // have shape tensor
405 406 407 408 409
      std::vector<pten::DenseTensor> pt_vec_shape;
      for (auto &tensor : list_new_shape_tensor) {
        if (platform::is_gpu_place(tensor->place()) ||
            platform::is_xpu_place(tensor->place())) {
          framework::Tensor temp;
410 411
          paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(),
                                            &temp);
412 413 414 415 416 417 418
          pt_vec_shape.push_back(
              std::move(*(paddle::experimental::MakePtenDenseTensor(temp))));
        } else {
          pt_vec_shape.push_back(
              std::move(*(paddle::experimental::MakePtenDenseTensor(*tensor))));
        }
      }
419
      pt_scalar_shape = pten::ScalarArray(pt_vec_shape);
420 421 422 423 424
    } else if (shape_tensor) {
      std::unique_ptr<pten::DenseTensor> pt_shape;
      if (platform::is_gpu_place(shape_tensor->place()) ||
          platform::is_xpu_place(shape_tensor->place())) {
        framework::Tensor temp;
425 426
        paddle::framework::TensorCopySync(*shape_tensor, platform::CPUPlace(),
                                          &temp);
427 428 429 430
        pt_shape = paddle::experimental::MakePtenDenseTensor(temp);
      } else {
        pt_shape = paddle::experimental::MakePtenDenseTensor(*shape_tensor);
      }
431
      pt_scalar_shape = pten::ScalarArray(*pt_shape.get());
432
    } else {
433
      auto &shape_attr = ctx.Attr<std::vector<int>>("shape");
434 435 436 437
      pt_scalar_shape = pten::ScalarArray(shape_attr);
    }
    if (platform::is_cpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CPUDeviceContext>();
438
      pten::ReshapeKernel(dev_ctx, *pt_x.get(), pt_scalar_shape, pt_out);
439
    }
440
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
441 442
    if (platform::is_gpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CUDADeviceContext>();
443
      pten::ReshapeKernel(dev_ctx, *pt_x.get(), pt_scalar_shape, pt_out);
444
    }
445 446
#endif
#ifdef PADDLE_WITH_XPU
447 448
    if (platform::is_xpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::XPUDeviceContext>();
449
      pten::ReshapeKernel(dev_ctx, *pt_x.get(), pt_scalar_shape, pt_out);
450
    }
451
#endif
452 453 454
    // non-inplace need move all result from pt_out to out, inplace need set
    // result dims.
    if (in != out) {
455
      paddle::experimental::SharesStorage(pt_out, static_cast<Tensor *>(out));
456 457
    } else {
      out->Resize(pt_out->dims());
Y
yuyang18 已提交
458
    }
Y
yuyang18 已提交
459
  }
Y
yuyang18 已提交
460 461 462 463 464 465 466
};

class ReshapeGradKernel {
 public:
  void operator()(const framework::ExecutionContext &ctx) const {
    auto *d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto *d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
467
    d_x->mutable_data(ctx.GetPlace(), d_out->type());
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487

    auto pt_d_x = paddle::experimental::MakePtenDenseTensor(*d_x);
    auto pt_d_out = paddle::experimental::MakePtenDenseTensor(*d_out);

    if (platform::is_cpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CPUDeviceContext>();
      pten::ReshapeGradKernel(dev_ctx, *pt_d_out.get(), pt_d_x.get());
    }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (platform::is_gpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CUDADeviceContext>();
      pten::ReshapeGradKernel(dev_ctx, *pt_d_out.get(), pt_d_x.get());
    }
#endif
#ifdef PADDLE_WITH_XPU
    if (platform::is_xpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::XPUDeviceContext>();
      pten::ReshapeGradKernel(dev_ctx, *pt_d_out.get(), pt_d_x.get());
    }
#endif
Y
yuyang18 已提交
488
  }
Y
yuyang18 已提交
489 490
};

491 492 493 494 495
class ReshapeDoubleGradKernel {
 public:
  void operator()(const framework::ExecutionContext &ctx) const {
    auto *dd_x = ctx.Input<framework::Tensor>("DDX");
    auto *dd_out = ctx.Output<framework::Tensor>("DDOut");
496
    dd_out->mutable_data(ctx.GetPlace(), dd_x->type());
497

498 499
    auto pt_dd_x = paddle::experimental::MakePtenDenseTensor(*dd_x);
    auto pt_dd_out = paddle::experimental::MakePtenDenseTensor(*dd_out);
500

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
    if (platform::is_cpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CPUDeviceContext>();
      pten::ReshapeDoubleGradKernel(dev_ctx, *pt_dd_x.get(), pt_dd_out.get());
    }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (platform::is_gpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::CUDADeviceContext>();
      pten::ReshapeDoubleGradKernel(dev_ctx, *pt_dd_x.get(), pt_dd_out.get());
    }
#endif
#ifdef PADDLE_WITH_XPU
    if (platform::is_xpu_place(ctx.GetPlace())) {
      auto &dev_ctx = ctx.device_context<platform::XPUDeviceContext>();
      pten::ReshapeDoubleGradKernel(dev_ctx, *pt_dd_x.get(), pt_dd_out.get());
    }
#endif
517 518 519
  }
};

520 521 522 523 524 525 526 527 528 529 530 531 532
// FIXME(zcd): reshape2 adds an intermediate output(XShape) based on reshape,
// the XShape is used to carry the shape and lod of X which will be used in
// reshape_grad, in this way, the framework can reuse the memory of X
// immediately the reshape_op is finished.
// Considering compatibility issues, we could not fix reshape_op
class Reshape2Op : public ReshapeOp {
 public:
  Reshape2Op(const std::string &type, const framework::VariableNameMap &inputs,
             const framework::VariableNameMap &outputs,
             const framework::AttributeMap &attrs)
      : ReshapeOp(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
533
    PADDLE_ENFORCE_EQ(ctx->HasOutput("XShape"), true,
534 535
                      platform::errors::InvalidArgument(
                          "Output(XShape) of ReshapeOp should not be null."));
536 537 538 539 540 541 542 543
    const auto &x_dims = ctx->GetInputDim("X");
    std::vector<int64_t> xshape_dims(x_dims.size() + 1);
    xshape_dims[0] = 0;
    for (int i = 0; i < x_dims.size(); ++i) {
      xshape_dims[i + 1] = x_dims[i];
    }
    ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims));
    ctx->ShareLoD("X", /*->*/ "XShape");
M
minqiyang 已提交
544 545

    ReshapeOp::InferShape(ctx);
546
  }
547 548 549

  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext &ctx) const override {
550
    std::string shape;
551 552
    auto multi_inputs = ctx.MultiInput<framework::Tensor>("ShapeTensor");
    if (multi_inputs.size() > 0) {
553
      shape = "ShapeTensor";
554
    } else if (ctx.HasInput("Shape")) {
555
      shape = "Shape";
556
    } else {
557
      shape = "shape";
558
    }
559
    return framework::KernelSignature("reshape", {"X"}, {shape}, {"Out"});
560
  }
561 562 563 564 565 566 567 568 569 570
};

class Reshape2OpMaker : public ReshapeOpMaker {
 public:
  void Make() override {
    ReshapeOpMaker::Make();
    AddOutput("XShape",
              "XShape is just used to store the shape and lod of X, which will "
              "be used in FlattenGradOp.")
        .AsIntermediate();
571 572 573 574
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
575
        .SetDefault(false);
576 577 578 579 580
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "int8", "bfloat16"});
581 582 583
  }
};

H
hong 已提交
584 585
template <typename T>
class Reshape2GradMaker : public framework::SingleGradOpMaker<T> {
586
 public:
H
hong 已提交
587
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
588

589
  void Apply(GradOpPtr<T> grad_op) const override {
590
    grad_op->SetType("reshape2_grad");
H
hong 已提交
591 592 593 594
    grad_op->SetInput("XShape", this->Output("XShape"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    grad_op->SetAttrMap(this->Attrs());
595 596 597
  }
};

H
hong 已提交
598 599
template <typename T>
class Reshape2DoubleGradMaker : public framework::SingleGradOpMaker<T> {
600
 public:
H
hong 已提交
601
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
602

603
  void Apply(GradOpPtr<T> grad_op) const override {
604
    grad_op->SetType("reshape2_grad_grad");
H
hong 已提交
605 606 607 608
    grad_op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    grad_op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    grad_op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
    grad_op->SetAttrMap(this->Attrs());
609 610 611
  }
};

612 613 614 615 616 617 618 619 620
class Reshape2GradOp : public framework::OperatorWithKernel {
 public:
  Reshape2GradOp(const std::string &type,
                 const framework::VariableNameMap &inputs,
                 const framework::VariableNameMap &outputs,
                 const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
621 622 623
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("XShape"), true,
        platform::errors::InvalidArgument("Input(XShape) shouldn't be null."));
624
    PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
625 626
                      platform::errors::InvalidArgument(
                          "Input(Out@GRAD) shouldn't be null."));
627 628 629 630 631 632 633 634 635
    auto xshape_dims = ctx->GetInputDim("XShape");
    auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    ctx->ShareLoD("XShape", framework::GradVarName("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
636 637 638 639
    auto input_data_type = framework::OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));

    return framework::OpKernelType(input_data_type, ctx.GetPlace());
640
  }
641 642 643 644 645 646 647 648 649 650

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
651 652 653 654 655 656 657

  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext &ctx) const override {
    return framework::KernelSignature("reshape_grad",
                                      {framework::GradVarName("Out")}, {},
                                      {framework::GradVarName("X")});
  }
658 659
};

660 661 662 663 664 665 666 667 668 669
class Reshape2DoubleGradOp : public framework::OperatorWithKernel {
 public:
  Reshape2DoubleGradOp(const std::string &type,
                       const framework::VariableNameMap &inputs,
                       const framework::VariableNameMap &outputs,
                       const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE_EQ(ctx->HasInput("DDX"), true,
670 671
                      platform::errors::InvalidArgument(
                          "Input(X@GRAD_GRAD) shouldn't be null."));
672 673 674 675 676 677 678 679
    if (ctx->HasOutput("DDOut") && ctx->HasInput("DDX")) {
      ctx->ShareDim("DOut", "DDOut");
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
680 681 682
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "DDX"),
        ctx.device_context());
683 684 685 686 687 688 689 690 691 692 693
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
694 695 696 697 698
  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext &ctx) const override {
    return framework::KernelSignature("reshape_double_grad", {"DDX"}, {},
                                      {"DDOut"});
  }
699 700
};

701 702
DECLARE_INPLACE_OP_INFERER(ReshapeOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ReshapeGradInplaceInferer,
703 704
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
705 706
DECLARE_INPLACE_OP_INFERER(ReshapeDoubleGradInplaceInferer, {"DDX", "DDOut"});
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ReshapeDoubleGradOpNoNeedBufferVarInferer,
Z
Zeng Jinle 已提交
707
                                    "DOut");
D
dzhwinter 已提交
708

Y
Yibing Liu 已提交
709 710 711
}  // namespace operators
}  // namespace paddle
namespace ops = paddle::operators;
712
namespace plat = paddle::platform;
Y
Yibing Liu 已提交
713

H
hong 已提交
714 715 716 717
REGISTER_OPERATOR(
    reshape, ops::ReshapeOp, ops::ReshapeOpMaker,
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
718
    ops::ReshapeOpInplaceInferer);
D
dzhwinter 已提交
719
REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp,
720
                  ops::ReshapeGradInplaceInferer);
721

722 723 724 725 726 727 728
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
                               ops::ReshapeKernel, int, ops::ReshapeKernel,
                               int64_t, ops::ReshapeKernel);
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
                               double, ops::ReshapeGradKernel, int,
                               ops::ReshapeGradKernel, int64_t,
                               ops::ReshapeGradKernel);
729
REGISTER_OPERATOR(reshape2, ops::Reshape2Op, ops::Reshape2OpMaker,
H
hong 已提交
730 731
                  ops::Reshape2GradMaker<paddle::framework::OpDesc>,
                  ops::Reshape2GradMaker<paddle::imperative::OpBase>,
732
                  ops::ReshapeOpInplaceInferer);
D
dzhwinter 已提交
733
REGISTER_OPERATOR(reshape2_grad, ops::Reshape2GradOp,
H
hong 已提交
734 735
                  ops::Reshape2DoubleGradMaker<paddle::framework::OpDesc>,
                  ops::Reshape2DoubleGradMaker<paddle::imperative::OpBase>,
736
                  ops::ReshapeGradInplaceInferer);
737
REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp,
738 739
                  ops::ReshapeDoubleGradInplaceInferer,
                  ops::ReshapeDoubleGradOpNoNeedBufferVarInferer);
740

741 742 743 744
REGISTER_OP_CPU_KERNEL_FUNCTOR(
    reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int8_t,
    ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int, ops::ReshapeKernel,
    int64_t, ops::ReshapeKernel, bool, ops::ReshapeKernel,
745 746 747
    paddle::platform::bfloat16, ops::ReshapeKernel,
    paddle::platform::complex<float>, ops::ReshapeKernel,
    paddle::platform::complex<double>, ops::ReshapeKernel);
748 749 750 751 752

REGISTER_OP_CPU_KERNEL_FUNCTOR(
    reshape2_grad, float, ops::ReshapeGradKernel, double,
    ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t,
    ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, bool,
J
Jacek Czaja 已提交
753
    ops::ReshapeGradKernel, paddle::platform::bfloat16, ops::ReshapeGradKernel,
754 755
    paddle::platform::complex<float>, ops::ReshapeGradKernel,
    paddle::platform::complex<double>, ops::ReshapeGradKernel);
756 757 758 759
REGISTER_OP_CPU_KERNEL_FUNCTOR(
    reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double,
    ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t,
    ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, bool,
J
Jacek Czaja 已提交
760
    ops::ReshapeDoubleGradKernel, paddle::platform::bfloat16,
761 762
    ops::ReshapeDoubleGradKernel, paddle::platform::complex<float>,
    ops::ReshapeDoubleGradKernel, paddle::platform::complex<double>,
763
    ops::ReshapeDoubleGradKernel);
764

765
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
766 767
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
                                ops::ReshapeKernel, int, ops::ReshapeKernel,
J
joejiong 已提交
768 769
                                uint8_t, ops::ReshapeKernel, int64_t,
                                ops::ReshapeKernel, plat::float16,
770
                                ops::ReshapeKernel);
771 772 773
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
                                double, ops::ReshapeGradKernel, int,
                                ops::ReshapeGradKernel, int64_t,
J
joejiong 已提交
774
                                ops::ReshapeGradKernel, uint8_t,
775
                                ops::ReshapeGradKernel, plat::float16,
776

777 778 779
                                ops::ReshapeGradKernel);
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double,
                                ops::ReshapeKernel, int, ops::ReshapeKernel,
J
joejiong 已提交
780 781
                                uint8_t, ops::ReshapeKernel, int64_t,
                                ops::ReshapeKernel, plat::float16,
782
                                ops::ReshapeKernel, bool, ops::ReshapeKernel,
783 784
                                plat::complex<float>, ops::ReshapeKernel,
                                plat::complex<double>, ops::ReshapeKernel);
785 786 787 788
REGISTER_OP_CUDA_KERNEL_FUNCTOR(
    reshape2_grad, float, ops::ReshapeGradKernel, double,
    ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t,
    ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16,
789 790
    ops::ReshapeGradKernel, bool, ops::ReshapeGradKernel, plat::complex<float>,
    ops::ReshapeGradKernel, plat::complex<double>, ops::ReshapeGradKernel);
791 792 793 794 795 796

REGISTER_OP_CUDA_KERNEL_FUNCTOR(
    reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double,
    ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t,
    ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel,
    plat::float16, ops::ReshapeDoubleGradKernel, bool,
797 798 799
    ops::ReshapeDoubleGradKernel, plat::complex<float>,
    ops::ReshapeDoubleGradKernel, plat::complex<double>,
    ops::ReshapeDoubleGradKernel);
Y
yuyang18 已提交
800
#endif
801 802 803 804 805

#ifdef PADDLE_WITH_XPU
REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double,
                               ops::ReshapeKernel, int, ops::ReshapeKernel,
                               int64_t, ops::ReshapeKernel, plat::float16,
806
                               ops::ReshapeKernel, bool, ops::ReshapeKernel,
807 808
                               plat::complex<float>, ops::ReshapeKernel,
                               plat::complex<double>, ops::ReshapeKernel);
809 810 811 812
REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel,
                               double, ops::ReshapeGradKernel, int,
                               ops::ReshapeGradKernel, int64_t,
                               ops::ReshapeGradKernel, plat::float16,
813
                               ops::ReshapeGradKernel, bool,
814 815
                               ops::ReshapeGradKernel, plat::complex<float>,
                               ops::ReshapeGradKernel, plat::complex<double>,
816
                               ops::ReshapeGradKernel);
817
#endif