transpose_op_xpu.cc 4.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/transpose_op.h"
#include <memory>
#include <string>
#include <vector>
Q
QingshuChen 已提交
20
#include "paddle/fluid/platform/xpu/xpu_header.h"
21 22 23 24 25 26 27 28

namespace paddle {
namespace operators {

using framework::Tensor;

template <typename DeviceContext, typename T>
class TransposeXPUKernel : public framework::OpKernel<T> {
29 30
  using XPUType = typename XPUTypeTrait<T>::Type;

31 32 33 34
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto x = context.Input<framework::Tensor>("X");
    auto out = context.Output<framework::Tensor>("Out");
35

36 37 38 39 40 41
    // axis is permute
    auto axis = context.Attr<std::vector<int>>("axis");
    int ndims = axis.size();
    const auto x_dims = x->dims();
    const T* x_data = x->data<T>();
    T* y_data = out->mutable_data<T>(context.GetPlace());
42
    if (out->numel() == 0) {
43 44 45 46 47 48 49 50
      return;
    }

    std::vector<int> x_shape_host(ndims, 0);
    for (int i = 0; i < ndims; ++i) {
      x_shape_host[i] = x_dims[i];
    }
    auto& dev_ctx = context.template device_context<DeviceContext>();
51 52 53
    int r = xpu::transpose<XPUType>(
        dev_ctx.x_context(), reinterpret_cast<const XPUType*>(x_data),
        reinterpret_cast<XPUType*>(y_data), x_shape_host, axis);
54 55 56 57 58 59 60 61
    PADDLE_ENFORCE_EQ(
        r, xpu::Error_t::SUCCESS,
        platform::errors::External("XPU kernel error! error code=%d", r));
  }
};

template <typename DeviceContext, typename T>
class TransposeGradXPUKernel : public framework::OpKernel<T> {
62 63
  using XPUType = typename XPUTypeTrait<T>::Type;

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* out_grad =
        context.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto* x_grad =
        context.Output<framework::Tensor>(framework::GradVarName("X"));
    if (!x_grad) return;

    x_grad->mutable_data<T>(context.GetPlace());
    std::vector<int> axis = context.Attr<std::vector<int>>("axis");
    std::vector<int> reversed_axis(axis);
    for (size_t i = 0; i < axis.size(); i++) {
      reversed_axis[axis[i]] = i;
    }

    int ndims = axis.size();
    std::vector<int> out_shape_host(ndims, 0);
    for (int i = 0; i < ndims; ++i) {
      out_shape_host[i] = out_grad->dims()[i];
    }
    auto& dev_ctx = context.template device_context<DeviceContext>();
85 86 87 88 89
    int r = xpu::transpose<XPUType>(
        dev_ctx.x_context(),
        reinterpret_cast<const XPUType*>(out_grad->data<T>()),
        reinterpret_cast<XPUType*>(x_grad->data<T>()), out_shape_host,
        reversed_axis);
90 91 92 93 94 95 96 97 98 99 100 101 102
    PADDLE_ENFORCE_EQ(
        r, xpu::Error_t::SUCCESS,
        platform::errors::External("XPU kernel error! error code=%d", r));
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_XPU_KERNEL(
    transpose,
103 104 105
    ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext, float>,
    ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext,
                            paddle::platform::float16>);
106 107
REGISTER_OP_XPU_KERNEL(
    transpose_grad,
108 109 110
    ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext, float>,
    ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext,
                                paddle::platform::float16>);
111 112
REGISTER_OP_XPU_KERNEL(
    transpose2,
113 114 115
    ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext, float>,
    ops::TransposeXPUKernel<paddle::platform::XPUDeviceContext,
                            paddle::platform::float16>);
116 117
REGISTER_OP_XPU_KERNEL(
    transpose2_grad,
118 119 120
    ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext, float>,
    ops::TransposeGradXPUKernel<paddle::platform::XPUDeviceContext,
                                paddle::platform::float16>);
121 122

#endif  // PADDLE_WITH_XPU