transpose_op.h 3.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
X
xzl 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
xzl 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
X
xzl 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
xzl 已提交
14 15 16

#pragma once

17
#include <vector>
Y
Yi Wang 已提交
18
#include "paddle/fluid/framework/op_registry.h"
19
#include "paddle/phi/kernels/funcs/math_function.h"
X
xzl 已提交
20 21 22 23

namespace paddle {
namespace operators {

24 25
enum { kTransposeMKLDNNFP32 = 1, kTransposeMKLDNNINT8 = 2 };

Q
QI JUN 已提交
26 27
template <typename DeviceContext, typename T>
inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
28 29 30 31
                         const framework::Tensor& in, framework::Tensor* out,
                         const std::vector<int>& axis) {
  switch (dim) {
    case 1:
32
      phi::funcs::Transpose<DeviceContext, T, 1> trans1;
33 34 35
      trans1(dev_ctx, in, out, axis);
      break;
    case 2:
36
      phi::funcs::Transpose<DeviceContext, T, 2> trans2;
37 38 39
      trans2(dev_ctx, in, out, axis);
      break;
    case 3:
40
      phi::funcs::Transpose<DeviceContext, T, 3> trans3;
41 42 43
      trans3(dev_ctx, in, out, axis);
      break;
    case 4:
44
      phi::funcs::Transpose<DeviceContext, T, 4> trans4;
45 46 47
      trans4(dev_ctx, in, out, axis);
      break;
    case 5:
48
      phi::funcs::Transpose<DeviceContext, T, 5> trans5;
49 50 51
      trans5(dev_ctx, in, out, axis);
      break;
    case 6:
52
      phi::funcs::Transpose<DeviceContext, T, 6> trans6;
53 54 55
      trans6(dev_ctx, in, out, axis);
      break;
    default:
56
      // for dim >= 7 situation
57
      phi::funcs::TransposeNormal<DeviceContext, T> trans_normal;
58
      trans_normal(dev_ctx, in, out, axis);
X
xzl 已提交
59 60 61
  }
}

Q
QI JUN 已提交
62
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
63
class TransposeKernel : public framework::OpKernel<T> {
X
xzl 已提交
64 65
 public:
  void Compute(const framework::ExecutionContext& context) const override {
66 67 68 69 70 71 72 73 74 75
    auto* x = context.InputVar("X");
    auto* out = context.OutputVar("Out");

    const framework::Tensor* x_tensor =
        GetLoDTensorOrSelectedRowsValueFromVar(*x);
    framework::Tensor* out_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(out);

    out_tensor->mutable_data<T>(context.GetPlace());
    if (out_tensor->numel() == 0) {
76 77
      return;
    }
78

79
    std::vector<int> axis = context.Attr<std::vector<int>>("axis");
X
xzl 已提交
80
    int ndims = axis.size();
Q
QI JUN 已提交
81
    auto& dev_ctx = context.template device_context<DeviceContext>();
82
    TransCompute<DeviceContext, T>(ndims, dev_ctx, *x_tensor, out_tensor, axis);
X
xzl 已提交
83 84 85
  }
};

Q
QI JUN 已提交
86
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
87
class TransposeGradKernel : public framework::OpKernel<T> {
X
xzl 已提交
88 89
 public:
  void Compute(const framework::ExecutionContext& context) const override {
90 91 92 93 94 95 96 97 98 99 100 101 102
    auto* out_grad = context.InputVar(framework::GradVarName("Out"));
    auto* x_grad = context.OutputVar(framework::GradVarName("X"));

    if (!x_grad) {
      return;
    }
    const framework::Tensor* out_grad_tensor =
        GetLoDTensorOrSelectedRowsValueFromVar(*out_grad);
    framework::Tensor* x_grad_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(x_grad);

    x_grad_tensor->mutable_data<T>(context.GetPlace());
    if (x_grad_tensor->numel() == 0) {
103 104 105
      return;
    }

106 107
    std::vector<int> axis = context.Attr<std::vector<int>>("axis");
    std::vector<int> reversed_axis(axis);
108

109 110
    for (size_t i = 0; i < axis.size(); i++) {
      reversed_axis[axis[i]] = i;
X
xzl 已提交
111
    }
112 113

    int ndims = axis.size();
Q
QI JUN 已提交
114
    auto& dev_ctx = context.template device_context<DeviceContext>();
115 116
    TransCompute<DeviceContext, T>(ndims, dev_ctx, *out_grad_tensor,
                                   x_grad_tensor, reversed_axis);
X
xzl 已提交
117 118 119 120 121
  }
};

}  // namespace operators
}  // namespace paddle