transpose_op.cc 10.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
X
xzl 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
xzl 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
X
xzl 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
xzl 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/transpose_op.h"
16
#include <string>
17
#include <vector>
X
xzl 已提交
18

19 20 21 22
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

X
xzl 已提交
23 24 25 26 27 28 29 30 31
namespace paddle {
namespace operators {

using framework::Tensor;

class TransposeOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

32
  void InferShape(framework::InferShapeContext *ctx) const override {
Q
Qiao Longfei 已提交
33 34 35 36
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
    PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null");
    auto x_dims = ctx->GetInputDim("X");
    std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
X
xzl 已提交
37
    size_t x_rank = x_dims.size();
X
xzl 已提交
38
    size_t axis_size = axis.size();
X
xzl 已提交
39

X
xzl 已提交
40
    PADDLE_ENFORCE_EQ(x_rank, axis_size,
41
                      "The input tensor's rank(%d) "
42
                      "should be equal to the axis's size(%d)",
X
xzl 已提交
43
                      x_rank, axis_size);
44 45 46 47 48 49 50 51

    std::vector<int> count(axis_size, 0);
    for (size_t i = 0; i < axis_size; i++) {
      PADDLE_ENFORCE(
          axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1,
          "Each element of Attribute axis should be a unique value "
          "range from 0 to (dims - 1), "
          "where the dims is the axis's size");
X
xzl 已提交
52
    }
X
xzl 已提交
53

X
xzl 已提交
54
    framework::DDim out_dims(x_dims);
55
    for (size_t i = 0; i < axis_size; i++) {
X
xzl 已提交
56
      out_dims[i] = x_dims[axis[i]];
X
xzl 已提交
57
    }
Q
Qiao Longfei 已提交
58
    ctx->SetOutputDim("Out", out_dims);
X
xzl 已提交
59
  }
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    framework::LibraryType library_{framework::LibraryType::kPlain};
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
    return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
                                   ctx.GetPlace(), layout_, library_);
  }
X
xzl 已提交
77 78 79 80
};

class TransposeOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
81
  void Make() override {
82
    AddInput(
X
xzl 已提交
83
        "X",
84 85
        "(Tensor) The input tensor, tensors with rank up to 6 are supported.");
    AddOutput("Out", "(Tensor)The output tensor.");
X
xzl 已提交
86 87
    AddAttr<std::vector<int>>(
        "axis",
88 89 90
        "(vector<int>) A list of values, and the size of the list should be "
        "the same with the input tensor rank. This operator permutes the input "
        "tensor's axes according to the values given.");
91 92 93 94 95 96 97 98 99 100
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
    AddAttr<std::string>(
        "data_format",
        "(string, default NCHW) Only used in "
        "An optional string from: \"NHWC\", \"NCHW\". "
        "Defaults to \"NHWC\". Specify the data format of the output data, "
        "the input will be transformed automatically. ")
        .SetDefault("AnyLayout");
X
xzl 已提交
101
    AddComment(R"DOC(
102 103
Transpose Operator.

104 105
The input tensor will be permuted according to the axes given.
The behavior of this operator is similar to how `numpy.transpose` works.
Y
ying 已提交
106

107 108 109 110 111 112
- suppose the input `X` is a 2-D tensor:
    $$
    X = \begin{pmatrix}
    0 &1 &2 \\
    3 &4 &5
    \end{pmatrix}$$
W
wanghaoshuang 已提交
113

114
    the given `axes` is: $[1, 0]$, and $Y$ = transpose($X$, axis)
W
wanghaoshuang 已提交
115

116
    then the output $Y$ is:
W
wanghaoshuang 已提交
117

118 119 120 121 122 123
    $$
    Y = \begin{pmatrix}
         0 &3 \\
         1 &4  \\
         2 &5
    \end{pmatrix}$$
W
wanghaoshuang 已提交
124

125
- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is
126
$[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$.
127

X
xzl 已提交
128 129 130 131 132 133 134 135
)DOC");
  }
};

class TransposeOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

136
  void InferShape(framework::InferShapeContext *ctx) const override {
Q
Qiao Longfei 已提交
137 138 139 140 141 142 143 144
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) should not be null");
    auto x_dims = ctx->GetInputDim("X");
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    if (ctx->HasOutput(framework::GradVarName("X"))) {
      ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    }
X
xzl 已提交
145
  }
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    framework::LibraryType library_{framework::LibraryType::kPlain};
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
    return framework::OpKernelType(
        ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->type(),
        ctx.GetPlace(), layout_, library_);
  }
X
xzl 已提交
164 165
};

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
// FIXME(zcd): transpose2 adds an intermediate output(XShape) based on
// transpose, the XShape is used to carry the shape and lod of X which
// will be used in transpose_grad, in this way, the framework can reuse
// the memory of X immediately the transpose2_op is finished.
// Considering compatibility issues, we could not fix transpose2_op
class Transpose2Op : public TransposeOp {
 public:
  Transpose2Op(const std::string &type,
               const framework::VariableNameMap &inputs,
               const framework::VariableNameMap &outputs,
               const framework::AttributeMap &attrs)
      : TransposeOp(type, inputs, outputs, attrs) {}

  void InferShape(framework::InferShapeContext *ctx) const override {
    TransposeOp::InferShape(ctx);
    PADDLE_ENFORCE(ctx->HasOutput("XShape"),
                   "Output(XShape) should not be null");
    const auto &in_dims = ctx->GetInputDim("X");
    std::vector<int64_t> x_shape_dim(in_dims.size() + 1);
    x_shape_dim[0] = 0;
    for (int i = 0; i < in_dims.size(); ++i) {
      x_shape_dim[i + 1] = in_dims[i];
    }
    ctx->SetOutputDim("XShape", framework::make_ddim(x_shape_dim));
    ctx->ShareLoD("X", /*->*/ "XShape");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
196 197 198 199 200 201 202 203 204 205 206 207
    framework::LibraryType library_{framework::LibraryType::kPlain};
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
    return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
                                   ctx.GetPlace(), layout_, library_);
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
  }
};

class Transpose2OpMaker : public TransposeOpMaker {
 public:
  void Make() override {
    TransposeOpMaker::Make();
    AddOutput("XShape", "(Tensor)The output tensor.").AsIntermediate();
  }
};

class Transpose2GradMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

  std::unique_ptr<framework::OpDesc> Apply() const override {
    auto *grad_op = new framework::OpDesc();
    grad_op->SetType("transpose2_grad");
    grad_op->SetInput("XShape", Output("XShape"));
    grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    grad_op->SetAttrMap(Attrs());
    return std::unique_ptr<framework::OpDesc>(grad_op);
  }
};

class Transpose2OpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) should not be null");
    if (ctx->HasOutput(framework::GradVarName("X"))) {
      auto xshape_dim = ctx->GetInputDim("XShape");
      auto x_shape_dim =
          framework::slice_ddim(xshape_dim, 1, xshape_dim.size());
      ctx->SetOutputDim(framework::GradVarName("X"), x_shape_dim);
      ctx->ShareLoD("XShape", framework::GradVarName("X"));
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
254 255 256 257 258 259 260 261 262 263
    framework::LibraryType library_{framework::LibraryType::kPlain};
    std::string data_format = ctx.Attr<std::string>("data_format");
    framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
#ifdef PADDLE_WITH_MKLDNN
    if (library_ == framework::LibraryType::kPlain &&
        platform::CanMKLDNNBeUsed(ctx)) {
      library_ = framework::LibraryType::kMKLDNN;
      layout_ = framework::DataLayout::kMKLDNN;
    }
#endif
264
    return framework::OpKernelType(
Y
Yu Yang 已提交
265
        ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->type(),
266
        ctx.GetPlace(), layout_, library_);
267 268 269
  }
};

X
xzl 已提交
270 271 272 273
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
274
REGISTER_OPERATOR(transpose, ops::TransposeOp, ops::TransposeOpMaker,
275 276
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(transpose_grad, ops::TransposeOpGrad);
277

Q
QI JUN 已提交
278
REGISTER_OP_CPU_KERNEL(
P
phlrain 已提交
279 280
    transpose, ops::TransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::TransposeKernel<paddle::platform::CPUDeviceContext, double>);
X
xzl 已提交
281 282
REGISTER_OP_CPU_KERNEL(
    transpose_grad,
P
phlrain 已提交
283 284
    ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, double>);
285 286 287 288 289 290

REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker,
                  ops::Transpose2GradMaker);
REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad);

REGISTER_OP_CPU_KERNEL(
P
phlrain 已提交
291 292
    transpose2, ops::TransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::TransposeKernel<paddle::platform::CPUDeviceContext, double>);
293 294
REGISTER_OP_CPU_KERNEL(
    transpose2_grad,
P
phlrain 已提交
295 296
    ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, double>);