composite_backward_api.h 9.3 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16
#include "paddle/fluid/prim/api/all.h"
17 18 19
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/ddim.h"

J
Jiabin Yang 已提交
20 21
namespace paddle {
namespace prim {
22 23 24 25 26
using Tensor = paddle::experimental::Tensor;
using IntArray =
    paddle::experimental::IntArrayBase<paddle::experimental::Tensor>;
//  This function should have as same signature as phi, which defined in
//  paddle/phi/api/backward/backward_api.h
J
Jiabin Yang 已提交
27 28
template <typename T>
void tanh_grad(const Tensor& out, const Tensor& grad_out, Tensor* grad_x) {
29
  if (!grad_x) return;
J
Jiabin Yang 已提交
30 31
  auto tmp = pow<T>(out, 2.0);
  tmp = scale<T>(tmp, -1.0, 1.0, true);
32
  auto grad_x_tmp = grad_out * tmp;
33
  set_output<T>(grad_x_tmp, grad_x);
J
Jiabin Yang 已提交
34
}
35

36 37 38 39 40 41 42 43 44
template <typename T>
void subtract_grad(const Tensor& x,
                   const Tensor& y,
                   const Tensor& out_grad,
                   int axis,
                   Tensor* dx,
                   Tensor* dy) {
  if (dy) {
    auto scale_out_grad = scale<T>(out_grad, -1.0, 0.0, true);
45
    if (x.dims() != y.dims()) {
46
      // Maybe need reduce here
47 48 49 50 51 52 53
      phi::DDim reduce_dim = get_reduce_dims(y.dims(), x.dims());
      if (!reduce_dim.size()) {
        by_pass<T>(scale_out_grad, dy);
      } else {
        auto dy_reduce_res = sum<T>(
            scale_out_grad, phi::vectorize(reduce_dim), y.dtype(), false);
        auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
54
        set_output<T>(dy_tmp, dy);
55
      }
56 57 58 59 60
    } else {
      by_pass<T>(scale_out_grad, dy);
    }
  }
  if (dx) {
61
    if (y.dims() != x.dims()) {
62
      // Maybe need reduce here
63 64 65 66 67 68 69
      auto reduce_dim = get_reduce_dims(x.dims(), y.dims());
      if (!reduce_dim.size()) {
        by_pass<T>(out_grad, dx);
      } else {
        auto dx_reduce_res =
            sum<T>(out_grad, phi::vectorize(reduce_dim), x.dtype(), false);
        auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
70
        set_output<T>(dx_tmp, dx);
71
      }
72 73 74 75 76 77 78 79 80 81 82 83 84 85
    } else {
      by_pass<T>(out_grad, dx);
    }
  }
}

template <typename T>
void add_grad(const Tensor& x,
              const Tensor& y,
              const Tensor& out_grad,
              int axis,
              Tensor* dx,
              Tensor* dy) {
  if (dy) {
86
    if (x.dims() != y.dims()) {
87
      // Maybe need reduce here
88 89 90 91 92 93 94
      phi::DDim reduce_dim = get_reduce_dims(y.dims(), x.dims());
      if (!reduce_dim.size()) {
        by_pass<T>(out_grad, dy);
      } else {
        auto dy_reduce_res =
            sum<T>(out_grad, phi::vectorize(reduce_dim), y.dtype(), false);
        auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
95
        set_output<T>(dy_tmp, dy);
96 97
      }

98 99 100 101 102
    } else {
      by_pass<T>(out_grad, dy);
    }
  }
  if (dx) {
103
    if (y.dims() != x.dims()) {
104
      // Maybe need reduce here
105 106 107 108 109 110 111
      auto reduce_dim = get_reduce_dims(x.dims(), y.dims());
      if (!reduce_dim.size()) {
        by_pass<T>(out_grad, dx);
      } else {
        auto dx_reduce_res =
            sum<T>(out_grad, phi::vectorize(reduce_dim), x.dtype(), false);
        auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
112
        set_output<T>(dx_tmp, dx);
113
      }
114 115 116 117 118 119
    } else {
      by_pass<T>(out_grad, dx);
    }
  }
}

120 121 122 123 124 125 126 127 128 129
template <typename T>
void sum_grad(const Tensor& x,
              const Tensor& out_grad,
              const IntArray& axis,
              bool keepdim,
              bool reduce_all,
              Tensor* x_grad) {
  if (!x_grad) {
    return;
  }
R
risemeup1 已提交
130
  std::vector<int64_t> x_dim = phi::vectorize<int64_t>(x.dims());
131 132 133 134 135 136 137 138 139
  int64_t axis_size = axis.size();
  int64_t x_dim_size = x_dim.size();
  reduce_all = false;
  if (reduce_all || axis_size == 0 || axis_size == x_dim_size) {
    reduce_all = true;
  } else {
    reduce_all = false;
  }
  auto x_grad_tmp = Tensor();
140 141 142 143 144 145 146 147 148 149 150
  if (x_dim_size == 1) {
    x_grad_tmp = expand<T>(out_grad, IntArray(x_dim));
  } else {
    if (!keepdim) {
      auto axis_ = std::vector<int64_t>();
      if (reduce_all) {
        for (int64_t i = 1; i < x_dim_size; i++) {
          axis_.push_back(i);
        }
      } else {
        axis_ = axis.GetData();
151
      }
152 153
      auto out_grad_ = unsqueeze<T>(out_grad, axis_);
      x_grad_tmp = expand<T>(out_grad_, IntArray(x_dim));
154
    } else {
155
      x_grad_tmp = expand<T>(out_grad, IntArray(x_dim));
156 157 158
    }
  }

159
  set_output<T>(x_grad_tmp, x_grad);
160 161
}

162 163 164 165 166 167 168 169 170 171 172
template <typename T>
void divide_grad(const Tensor& x,
                 const Tensor& y,
                 const Tensor& out,
                 const Tensor& out_grad,
                 int axis,
                 Tensor* dx,
                 Tensor* dy) {
  if (dy) {
    // dy = -(x/y^2) * dout
    auto tmp0 = pow<T>(y, 2.0);
173
    auto tmp1 = x / tmp0;
174
    auto tmp2 = scale<T>(tmp1, -1.0, 0.0, true);
175
    auto dy_res = tmp2 * out_grad;
176
    if (x.dims() != y.dims()) {
177
      // Maybe need reduce here
178 179
      phi::DDim reduce_dim = get_reduce_dims(y.dims(), x.dims());
      if (!reduce_dim.size()) {
180
        set_output<T>(dy_res, dy);
181 182 183 184
      } else {
        auto dy_reduce_res =
            sum<T>(dy_res, phi::vectorize(reduce_dim), y.dtype(), false);
        auto dy_tmp = reshape<T>(dy_reduce_res, phi::vectorize(y.dims()));
185
        set_output<T>(dy_tmp, dy);
186
      }
187
    } else {
188
      set_output<T>(dy_res, dy);
189 190 191 192
    }
  }  // indicate we will compute dy
  if (dx) {
    // dx = (1/y) * dout
193
    auto one_tensor = full<T>(phi::vectorize(y.dims()), 1.0, y.dtype());
194
    auto dx_res = one_tensor / y * out_grad;
195
    if (y.dims() != x.dims()) {
196
      // Maybe need reduce here
197 198
      auto reduce_dim = get_reduce_dims(x.dims(), y.dims());
      if (!reduce_dim.size()) {
199
        set_output<T>(dx_res, dx);
200 201 202 203
      } else {
        auto dx_reduce_res =
            sum<T>(dx_res, phi::vectorize(reduce_dim), x.dtype(), false);
        auto dx_tmp = reshape<T>(dx_reduce_res, phi::vectorize(x.dims()));
204
        set_output<T>(dx_tmp, dx);
205 206
      }

207
    } else {
208
      set_output<T>(dx_res, dx);
209 210 211
    }
  }  // indicate we will compute dx
}
212 213 214 215 216

template <typename T>
void sqrt_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) {
  if (x_grad) {
    auto div_x = full<T>(phi::vectorize(out.dims()), 0.5);
217
    auto x_grad_tmp = out_grad * div_x / out;
218
    set_output<T>(x_grad_tmp, x_grad);
219 220
  }
}
221 222 223 224 225 226 227 228 229

template <typename T>
void multiply_grad(const Tensor& x,
                   const Tensor& y,
                   const Tensor& out_grad,
                   int axis,
                   Tensor* x_grad,
                   Tensor* y_grad) {
  if (x_grad) {
230
    auto x_grad_unreduce = out_grad * y;
231 232
    if (x_grad_unreduce.dims() != x.dims()) {
      auto axes = get_reduce_dims_from_out(x_grad_unreduce.dims(), x.dims());
233
      if (!axes.size()) {
234
        set_output<T>(x_grad_unreduce, x_grad);
235 236 237 238 239 240 241 242
      } else {
        auto x_grad_reduced = sum<T>(x_grad_unreduce,
                                     phi::vectorize(axes),
                                     x_grad_unreduce.dtype(),
                                     false);
        if (x_grad_reduced.dims().size() != x.dims().size()) {
          x_grad_reduced = reshape<T>(x_grad_reduced, x.shape());
        }
243
        set_output<T>(x_grad_reduced, x_grad);
244 245
      }
    } else {
246
      set_output<T>(x_grad_unreduce, x_grad);
247 248 249
    }
  }
  if (y_grad) {
250
    auto y_grad_unreduce = out_grad * x;
251 252
    if (y_grad_unreduce.dims() != y.dims()) {
      auto axes = get_reduce_dims_from_out(y_grad_unreduce.dims(), y.dims());
253
      if (!axes.size()) {
254
        set_output<T>(y_grad_unreduce, y_grad);
255 256 257 258 259 260 261 262
      } else {
        auto y_grad_reduced = sum<T>(y_grad_unreduce,
                                     phi::vectorize(axes),
                                     y_grad_unreduce.dtype(),
                                     false);
        if (y_grad_reduced.dims().size() != y.dims().size()) {
          y_grad_reduced = reshape<T>(y_grad_reduced, y.shape());
        }
263
        set_output<T>(y_grad_reduced, y_grad);
264 265
      }
    } else {
266
      set_output<T>(y_grad_unreduce, y_grad);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
    }
  }
}

template <typename T>
void expand_grad(const Tensor& x,
                 const Tensor& out_grad,
                 const IntArray& shape,
                 Tensor* x_grad) {
  if (x_grad) {
    auto out_dims = phi::make_ddim(shape.GetData());
    if (out_dims != x.dims()) {
      auto axes = get_reduce_dims(x.dims(), out_dims);
      if (!axes.size()) {
        by_pass<T>(out_grad, x_grad);
      } else {
        auto reduced = sum<T>(out_grad, phi::vectorize(axes), x.dtype(), false);
        if (reduced.dims().size() != x.dims().size()) {
          reduced = reshape<T>(reduced, x.shape());
        }
287
        set_output<T>(reduced, x_grad);
288 289 290 291 292 293 294 295 296 297
      }
    } else {
      by_pass<T>(out_grad, x_grad);
    }
  }
}

template <typename T>
void exp_grad(const Tensor& out, const Tensor& out_grad, Tensor* x_grad) {
  if (x_grad) {
298
    set_output<T>(out_grad * out, x_grad);
299 300 301
  }
}

J
Jiabin Yang 已提交
302 303
}  // namespace prim
}  // namespace paddle