transpose_op.cu 5.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/operators/transpose_op.cu.h"
16 17 18 19 20 21 22 23 24 25
#include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/platform/float16.h"

namespace paddle {
namespace operators {

template <typename DeviceContext, typename T>
class TransposeGPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
26 27 28 29 30 31 32 33 34 35
    auto* x = context.InputVar("X");
    auto* out = context.OutputVar("Out");

    const framework::Tensor* x_tensor =
        GetLoDTensorOrSelectedRowsValueFromVar(*x);
    framework::Tensor* out_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(out);

    out_tensor->mutable_data<T>(context.GetPlace());
    if (out_tensor->numel() == 0) {
36 37
      return;
    }
38 39 40 41

    std::vector<int> axis = context.Attr<std::vector<int>>("axis");
    int ndims = axis.size();
    const auto& dev_ctx = context.template device_context<DeviceContext>();
42
    TransposeGPUKernelDriver<T>(dev_ctx, ndims, *x_tensor, axis, out_tensor);
43 44 45 46 47 48
  }
};
template <typename DeviceContext, typename T>
class TransposeGradGPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
49 50 51 52 53 54 55 56 57 58 59 60 61
    auto* out_grad = context.InputVar(framework::GradVarName("Out"));
    auto* x_grad = context.OutputVar(framework::GradVarName("X"));
    if (!x_grad) {
      return;
    }

    const framework::Tensor* out_grad_tensor =
        GetLoDTensorOrSelectedRowsValueFromVar(*out_grad);
    framework::Tensor* x_grad_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(x_grad);

    x_grad_tensor->mutable_data<T>(context.GetPlace());
    if (x_grad_tensor->numel() == 0) {
62 63
      return;
    }
64 65 66 67 68 69 70 71 72
    std::vector<int> axis = context.Attr<std::vector<int>>("axis");
    std::vector<int> reversed_axis(axis);

    for (size_t i = 0; i < axis.size(); i++) {
      reversed_axis[axis[i]] = i;
    }

    int ndims = axis.size();
    const auto& dev_ctx = context.template device_context<DeviceContext>();
73 74
    TransposeGPUKernelDriver<T>(dev_ctx, ndims, *out_grad_tensor, reversed_axis,
                                x_grad_tensor);
75 76 77 78 79 80 81 82 83 84 85
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OP_CUDA_KERNEL(
    transpose,
86
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, bool>,
87 88
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>,
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>,
89
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>,
90
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext,
91
                            paddle::platform::complex<float>>,
92
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext,
93
                            paddle::platform::complex<double>>);
94 95
REGISTER_OP_CUDA_KERNEL(
    transpose_grad,
96
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, bool>,
97 98 99
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
100 101
                                plat::float16>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
102
                                paddle::platform::complex<float>>,
103
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
104
                                paddle::platform::complex<double>>);
105 106 107

REGISTER_OP_CUDA_KERNEL(
    transpose2,
108
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, bool>,
109 110 111 112
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int32_t>,
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int64_t>,
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>,
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>,
113
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>,
114
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext,
115
                            paddle::platform::complex<float>>,
116
    ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext,
117
                            paddle::platform::complex<double>>);
118 119
REGISTER_OP_CUDA_KERNEL(
    transpose2_grad,
120
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, bool>,
121 122 123 124 125
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int32_t>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int64_t>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
126 127
                                plat::float16>,
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
128
                                paddle::platform::complex<float>>,
129
    ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext,
130
                                paddle::platform::complex<double>>);