activation_grad_compute.cc 3.3 KB
Newer Older
M
mapingshuo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/kernels/host/activation_grad_compute.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace host {

void SquareGradCompute::Run() {
  auto& param = this->Param<param_t>();
  CHECK(param.X);
  auto out_grad_dims = param.Out_grad->dims();
  auto out_grad_data = param.Out_grad->data<float>();

  auto x_data = param.X->data<float>();
  auto x_grad_data = param.X_grad->mutable_data<float>();
  for (int i = 0; i < out_grad_dims.production(); i++) {
    x_grad_data[i] = out_grad_data[i] * 2.0 * x_data[i];
  }
}

void ReluGradCompute::Run() {
  auto& param = this->Param<param_t>();
  CHECK(param.X);
  auto out_grad_dims = param.Out_grad->dims();
  auto out_grad_data = param.Out_grad->data<float>();

  auto x_data = param.X->data<float>();
  auto x_grad_data = param.X_grad->mutable_data<float>();
  for (int i = 0; i < out_grad_dims.production(); i++) {
    x_grad_data[i] = x_data[i] > 0 ? out_grad_data[i] : 0.0;
  }
}

void TanhGradCompute::Run() {
  auto& param = this->Param<param_t>();
  CHECK(param.Out);
  auto out_grad_dims = param.Out_grad->dims();
  auto out_grad_data = param.Out_grad->data<float>();

  auto out_data = param.Out->data<float>();
  auto x_grad_data = param.X_grad->mutable_data<float>();
  for (int i = 0; i < out_grad_dims.production(); i++) {
    x_grad_data[i] = out_grad_data[i] *
                     (static_cast<float>(1.0) - out_data[i] * out_data[i]);
  }
}

}  // namespace host
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_KERNEL(square_grad,
                     kHost,
                     kFloat,
                     kNCHW,
                     paddle::lite::kernels::host::SquareGradCompute,
                     def)
    .BindInput("X", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindInput("Out@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindOutput("X@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .Finalize();

REGISTER_LITE_KERNEL(relu_grad,
                     kHost,
                     kFloat,
                     kNCHW,
                     paddle::lite::kernels::host::SquareGradCompute,
                     def)
    .BindInput("X", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindInput("Out@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindOutput("X@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .Finalize();

REGISTER_LITE_KERNEL(tanh_grad,
                     kHost,
                     kFloat,
                     kNCHW,
                     paddle::lite::kernels::host::SquareGradCompute,
                     def)
    .BindInput("Out", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindInput("Out@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .BindOutput("X@GRAD", {LiteType::GetTensorTy(TARGET(kHost))})
    .Finalize();