elementwise_div_op_npu.cc 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include <string>

#include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

template <typename DeviceContext, typename T>
class ElementwiseDivNPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* x = ctx.Input<Tensor>("X");
    auto* y = ctx.Input<Tensor>("Y");

    auto* out = ctx.Output<Tensor>("Out");

    auto place = ctx.GetPlace();

    out->mutable_data<T>(place);

    auto stream =
        ctx.template device_context<paddle::platform::NPUDeviceContext>()
            .stream();

L
Leo Chen 已提交
43
    const auto& runner = NpuOpRunner("Div", {*x, *y}, {*out}, {});
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
    runner.Run(stream);
  }
};

template <typename DeviceContext, typename T>
class ElementwiseDivGradNPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* out = ctx.Input<Tensor>("Out");
    auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto* x = ctx.Input<Tensor>("X");
    auto* y = ctx.Input<Tensor>("Y");

    auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));

    auto place = ctx.GetPlace();

    auto stream =
        ctx.template device_context<paddle::platform::NPUDeviceContext>()
            .stream();

    Tensor y_power(y->type());
    y_power.mutable_data<T>(y->dims(), place);
L
Leo Chen 已提交
68 69 70
    const auto& runner_y_power = NpuOpRunner(
        "Power", {*y}, {y_power}, {{"power", static_cast<float>(-1)}});
    runner_y_power.Run(stream);
71 72 73 74 75 76

    if (dx) {
      dx->mutable_data<T>(place);

      Tensor tensor_zeros(x->type());
      tensor_zeros.mutable_data<T>(x->dims(), place);
L
Leo Chen 已提交
77
      const auto& runner_tensor_zeros =
78
          NpuOpRunner("ZerosLike", {*x}, {tensor_zeros}, {});
L
Leo Chen 已提交
79
      runner_tensor_zeros.Run(stream);
80 81 82

      Tensor x_zero(paddle::framework::proto::VarType::BOOL);
      x_zero.mutable_data<bool>(x->dims(), place);
L
Leo Chen 已提交
83
      const auto& runner_x_zero =
84
          NpuOpRunner("Equal", {*x, tensor_zeros}, {x_zero}, {});
L
Leo Chen 已提交
85
      runner_x_zero.Run(stream);
86 87 88

      Tensor x_nozero(paddle::framework::proto::VarType::BOOL);
      x_nozero.mutable_data<bool>(x->dims(), place);
L
Leo Chen 已提交
89
      const auto& runner_x_nonzero =
90
          NpuOpRunner("LogicalNot", {x_zero}, {x_nozero}, {});
L
Leo Chen 已提交
91
      runner_x_nonzero.Run(stream);
92 93 94

      Tensor x_nozero_f(x->type());
      x_nozero_f.mutable_data<T>(x->dims(), place);
L
Leo Chen 已提交
95
      const auto& runner_x_nonzero_f =
96 97
          NpuOpRunner("Cast", {x_nozero}, {x_nozero_f},
                      {{"dst_type", static_cast<int32_t>(0)}});
L
Leo Chen 已提交
98
      runner_x_nonzero_f.Run(stream);
99 100 101

      Tensor x_grad_w(x->type());
      x_grad_w.mutable_data<T>(x->dims(), place);
L
Leo Chen 已提交
102
      const auto& runner_x_grad_w =
103
          NpuOpRunner("Mul", {x_nozero_f, y_power}, {x_grad_w}, {});
L
Leo Chen 已提交
104
      runner_x_grad_w.Run(stream);
105

L
Leo Chen 已提交
106 107 108
      const auto& runner_x_grad =
          NpuOpRunner("Mul", {x_grad_w, *dout}, {*dx}, {});
      runner_x_grad.Run(stream);
109 110 111 112 113 114 115
    }

    if (dy) {
      dy->mutable_data<T>(place);

      Tensor neg_out(y->type());
      neg_out.mutable_data<T>(y->dims(), place);
L
Leo Chen 已提交
116 117
      const auto& runner_neg_out = NpuOpRunner("Neg", {*out}, {neg_out}, {});
      runner_neg_out.Run(stream);
118 119 120

      Tensor y_grad_w(y->type());
      y_grad_w.mutable_data<T>(y->dims(), place);
L
Leo Chen 已提交
121 122 123
      const auto& runner_y_grad_w =
          NpuOpRunner("Div", {neg_out, *y}, {y_grad_w}, {});
      runner_y_grad_w.Run(stream);
124

L
Leo Chen 已提交
125 126 127
      const auto& runner_y_grad =
          NpuOpRunner("Mul", {y_grad_w, *dout}, {*dy}, {});
      runner_y_grad.Run(stream);
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
    }
  }
};

}  // namespace operators
}  // namespace paddle
namespace ops = paddle::operators;

REGISTER_OP_NPU_KERNEL(
    elementwise_div,
    ops::ElementwiseDivNPUKernel<paddle::platform::NPUDeviceContext, float>,
    ops::ElementwiseDivNPUKernel<paddle::platform::NPUDeviceContext,
                                 paddle::platform::float16>);

REGISTER_OP_NPU_KERNEL(
    elementwise_div_grad,
    ops::ElementwiseDivGradNPUKernel<paddle::platform::NPUDeviceContext, float>,
    ops::ElementwiseDivGradNPUKernel<paddle::platform::NPUDeviceContext,
                                     paddle::platform::float16>);