elementwise_op_function.h 48.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15

#pragma once
16

17
#include <glog/logging.h>
18

19
#include <algorithm>
20
#include <functional>  // for multiplies
D
dzhwinter 已提交
21
#include <iterator>
22
#include <vector>
23

Y
Yi Wang 已提交
24 25 26
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
27
#include "paddle/fluid/memory/malloc.h"
28
#include "paddle/fluid/operators/elementwise/elementwise_functor.h"
29
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
Y
Yi Wang 已提交
30
#include "paddle/fluid/platform/transform.h"
31

32 33
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/kernels/cpu/elementwise.h"
34

35
#if defined(__NVCC__) || defined(__HIPCC__)
C
chengduoZH 已提交
36
#ifdef __NVCC__
37
#include <cuda.h>
38 39 40
#elif defined(__HIPCC__)
#include <hip/hip_runtime.h>
#endif
C
chengduoZH 已提交
41
#include <thrust/iterator/iterator_adaptor.h>
42

43
#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h"
44
#include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h"
45 46
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
47

C
chengduoZH 已提交
48 49
#endif

Y
Yu Yang 已提交
50
#include "paddle/fluid/platform/for_range.h"
51
#include "paddle/phi/kernels/funcs/math_function.h"
52

53 54 55 56
#define DIVUP(x, y) (((x) + (y)-1) / (y))

#define ROUNDUP(x, y) (DIVUP((x), (y)) * (y))

57 58 59
namespace paddle {
namespace operators {

60
/*
61 62 63 64 65 66 67
*  Pack input and output tensors into respective vectors with
*  consideration of varible X`s class type.
*  Input variable X is supported to be whether LoDTensor or
*  SelectedRows class type in this package function, once X
*  was SelectedRows type, a valid pointer x_for_selectedrows
*  is excepted to be passed in from op kernel for acquisition
*  of the valid address of LoDTensor created ahead in the function.
68
*/
69 70 71
template <typename OutT>
int PackTensorsIntoVector(const framework::ExecutionContext &ctx,
                          std::vector<const framework::Tensor *> *ins,
72 73
                          std::vector<framework::Tensor *> *outs,
                          framework::Tensor *x_for_selectedrows = nullptr) {
74
  int axis = -1;
75 76 77 78 79
  auto x_var = ctx.InputVar("X");
  PADDLE_ENFORCE_NOT_NULL(
      x_var, platform::errors::InvalidArgument(
                 "Unable to get input Variable X, Variable name is %s.\n",
                 ctx.InputName("X")));
80
  auto *y = ctx.Input<framework::LoDTensor>("Y");
81 82 83 84 85 86
  framework::Tensor *z;

  if (x_var->IsType<framework::LoDTensor>()) {
    auto *x = ctx.Input<framework::LoDTensor>("X");
    z = ctx.Output<framework::LoDTensor>("Out");
    ins->emplace_back(x);
87
  } else if (x_var->IsType<phi::SelectedRows>()) {
88 89 90 91 92 93 94 95 96 97 98
    PADDLE_ENFORCE_EQ(y->dims().size() == 1 && y->dims()[0] == 1, true,
                      platform::errors::InvalidArgument(
                          "For elementwise_op, if X is Sparse, Y must be "
                          "scalar. But reveived the size of Y = %d.",
                          y->dims().size()));
    PADDLE_ENFORCE_NOT_NULL(
        x_for_selectedrows,
        platform::errors::InvalidArgument(
            "The parameter x_for_selectedrows is excepted to "
            "be valid, once input varible X`s class type is "
            "SelectedRows.\n"));
99 100
    auto &x_sele = x_var->Get<phi::SelectedRows>();
    auto out_sele = ctx.Output<phi::SelectedRows>("Out");
101 102 103 104 105 106
    *x_for_selectedrows = x_sele.value();
    out_sele->set_rows(x_sele.rows());
    out_sele->set_height(x_sele.height());
    out_sele->mutable_value()->Resize(x_sele.value().dims());
    out_sele->mutable_value()->mutable_data(ctx.GetPlace(),
                                            x_for_selectedrows->type());
107
    z = ctx.Output<phi::SelectedRows>("Out")->mutable_value();
108 109 110 111 112 113 114
    ins->emplace_back(x_for_selectedrows);
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "X's type[%s] is not supported by elementwise_op. X's type should be "
        "LoDTensor or SelectedRows.",
        framework::ToTypeName(x_var->Type())));
  }
115
  z->mutable_data<OutT>(ctx.GetPlace());
116 117 118 119
  outs->emplace_back(z);

  if (y != nullptr) {
    ins->emplace_back(y);
120
    axis = ctx.HasAttr("axis") ? ctx.Attr<int>("axis") : -1;
121
  }
122
  return axis;
123 124
}

125 126 127 128 129
inline void GetBroadcastDimsArrays(const framework::DDim &x_dims,
                                   const framework::DDim &y_dims,
                                   int *x_dims_array, int *y_dims_array,
                                   int *out_dims_array, const int max_dim,
                                   const int axis) {
130 131
  phi::funcs::GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array, y_dims_array,
                                     out_dims_array, max_dim, axis);
132
}
133

134
inline framework::DDim trim_trailing_singular_dims(
135
    const framework::DDim &dims) {
136
  return phi::funcs::trim_trailing_singular_dims(dims);
137 138
}

F
Feiyu Chan 已提交
139 140
template <typename DeviceContext, typename T, typename DX_OP, typename DY_OP,
          typename Tout = T>
141 142 143 144 145
void ElemwiseGradCompute(const framework::ExecutionContext &ctx,
                         const framework::Tensor &x, const framework::Tensor &y,
                         const framework::Tensor &out,
                         const framework::Tensor &dout, int axis,
                         framework::Tensor *dx, framework::Tensor *dy,
Y
Yu Yang 已提交
146
                         DX_OP dx_op, DY_OP dy_op) {
147 148
  const framework::DDim &x_dim = x.dims();
  const framework::DDim &y_dim = y.dims();
149
  const auto &dev_ctx = ctx.template device_context<DeviceContext>();
Y
Yu Yang 已提交
150
  if (x.dims() == y.dims()) {
151 152
    phi::funcs::ElemwiseGradComputeNoBroadcast<DeviceContext, T, DX_OP, DY_OP,
                                               Tout>(
153
        dev_ctx, x_dim, y_dim, x, y, out, dout, axis, dx, dy, dx_op, dy_op);
154
  } else {
155
    phi::ElemwiseGradComputeWithBroadcast<T, DX_OP, DY_OP, Tout>(
156
        dev_ctx, x_dim, y_dim, x, y, out, dout, axis, dx, dy, dx_op, dy_op);
157 158 159
  }
}

160 161 162 163
// It is a common implementation to compute binary calculation with the support
// of broadcast, supporting both CPU and GPU.
// - CPU implementation cannot support the case when x needs broadcast, thus
//   this function need to be called with XxxFunctor and XxxInverseFunctor,
164
//   like AddFunctor and InverseAddFunctor.
165 166 167 168
// - GPU implementation supports all the broadcast cases, thus there is no need
//   to define and call with XxxInverseFunctor.
// TODO(liuyiqun): optimize the CPU implementation to support all broadcast
// cases and avoid the need of XxxInverseFunctor.
169 170
template <typename Functor, typename DeviceContext, typename T,
          typename OutType = T>
171 172 173 174
void ElementwiseComputeEx(const framework::ExecutionContext &ctx,
                          const framework::Tensor *x,
                          const framework::Tensor *y, int axis, Functor func,
                          framework::Tensor *z) {
175
  z->mutable_data<OutType>(ctx.GetPlace());
176 177 178 179
  if (platform::is_gpu_place(ctx.GetPlace())) {
#if defined(__NVCC__) || defined(__HIPCC__)
    const auto &dev_ctx =
        ctx.template device_context<platform::CUDADeviceContext>();
180 181
    phi::ElementwiseCompute<Functor, T, OutType>(dev_ctx, *x, *y, axis, func,
                                                 z);
182

183 184 185
#endif
    return;
  }
186 187
  const auto &dev_ctx =
      ctx.template device_context<platform::CPUDeviceContext>();
188
  phi::ElementwiseCompute<Functor, T, OutType>(dev_ctx, *x, *y, axis, func, z);
F
fengjiayi 已提交
189 190
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
// FusedElemwiseAndAct
// --- forward
template <typename T, typename CompoundFunctor, bool KeepIntermediateOut>
struct FusedElemwiseAndActNoBroadcast {
  HOSTDEVICE void operator()(size_t i) {
    T y_val = y_[i];
    T x_val = x_[i];
    if (KeepIntermediateOut) {
      T intermeidiate_out = compound_functor_.GetIntermediateOut(x_val, y_val);
      intermediate_out_[i] = intermeidiate_out;
      out_[i] =
          compound_functor_.GetOutUseIntermediateOut(x_val, intermeidiate_out);
    } else {
      out_[i] = compound_functor_.GetOut(x_val, y_val);
    }
  }

  const T *x_;
  const T *y_;
  CompoundFunctor compound_functor_;
  T *out_;
  T *intermediate_out_;
};

// FusedElemwiseAndActBroadcast1:
// In this case, X and Y can be reshaped to a matrix.
// For example shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) and axis = -1 or 2,
// X can be reshaped to (6, 20) and Y can be reshaped to (1, 20)
template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
static void FusedElemwiseAndActBroadcast1CPU(const T *x, const T *y,
                                             CompoundFunctor compound_functor,
                                             int h, int w, T *out,
                                             T *intermediate_out) {
  for (int i = 0; i < h; ++i) {
    for (int j = 0; j < w; ++j) {
      int offset = i * w + j;

      T y_val = BcastY ? y[j] : y[offset];
      T x_val = BcastY ? x[offset] : x[j];
      int64_t intermediate_out_offset;
      if (KeepIntermediateOut) {
        T intermeidiate_out = compound_functor.GetIntermediateOut(x_val, y_val);

        if (SameShapeOfIntermediateOutAndOut) {
          // for the case of f1(f2(x, y))
          intermediate_out_offset = offset;
        } else if (BcastY) {
          intermediate_out_offset = j;
        } else {
          intermediate_out_offset = offset;
        }

        intermediate_out[intermediate_out_offset] = intermeidiate_out;
        out[offset] =
            compound_functor.GetOutUseIntermediateOut(x_val, intermeidiate_out);
      } else {
        out[offset] = compound_functor.GetOut(x_val, y_val);
      }
    }
  }
}

// FusedElemwiseAndActBroadcast2
// In this case, X and Y can be reshaped to a matrix.
// For example shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4) and axis = 1,
// X can be reshaped to (2, 12, 5) and Y can be reshaped to (1, 12, 1)
// pre = 2, n = 12, post = 5
template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
static void FusedElemwiseAndActBroadcast2CPU(const T *x, const T *y, int pre,
                                             int n, int post,
                                             CompoundFunctor compound_functor,
                                             T *out, T *intermediate_out) {
  for (int i = 0; i < pre; ++i) {
    for (int j = 0; j < n; ++j) {
      for (int k = 0; k < post; ++k) {
        int offset = i * n * post + j * post + k;

        T y_val = BcastY ? y[j] : y[offset];
        T x_val = BcastY ? x[offset] : x[j];
        int64_t intermediate_out_offset;

        if (KeepIntermediateOut) {
          T intermeidiate_out =
              compound_functor.GetIntermediateOut(x_val, y_val);

          if (SameShapeOfIntermediateOutAndOut) {
            // for the case of f1(f2(x, y))
            intermediate_out_offset = offset;
          } else if (BcastY) {
            intermediate_out_offset = j;
          } else {
            intermediate_out_offset = offset;
          }

          intermediate_out[intermediate_out_offset] = intermeidiate_out;
          out[offset] = compound_functor.GetOutUseIntermediateOut(
              x_val, intermeidiate_out);
        } else {
          out[offset] = compound_functor.GetOut(x_val, y_val);
        }
      }
    }
  }
}

298
#if defined(__NVCC__) || defined(__HIPCC__)
299 300 301 302 303
template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
static __global__ void FusedElemwiseAndActBroadcast1CUDAKernel(
    const T *x, const T *y, int h, int w, CompoundFunctor compound_functor,
    T *out, T *intermediate_out) {
304 305
  int i = blockIdx.x;
  int j = threadIdx.x;
306

307
  while (j < w) {
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
    int offset = i * w + j;

    T y_val = BcastY ? y[j] : y[offset];
    T x_val = BcastY ? x[offset] : x[j];
    int64_t intermediate_out_offset;

    if (KeepIntermediateOut) {
      T intermeidiate_out = compound_functor.GetIntermediateOut(x_val, y_val);

      if (SameShapeOfIntermediateOutAndOut) {
        // for the case of f1(f2(x, y))
        intermediate_out_offset = offset;
      } else if (BcastY) {
        intermediate_out_offset = j;
      } else {
        intermediate_out_offset = offset;
      }

      intermediate_out[intermediate_out_offset] = intermeidiate_out;
      out[offset] =
          compound_functor.GetOutUseIntermediateOut(x_val, intermeidiate_out);
    } else {
      out[offset] = compound_functor.GetOut(x_val, y_val);
    }

333
    j += ELEMWISE_MAX_BLOCK_DIM;
334 335 336 337 338
  }
}

template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
339
static void FusedElemwiseAndActBroadcast1CUDA(gpuStream_t stream, const T *x,
340 341 342 343
                                              const T *y,
                                              CompoundFunctor compound_functor,
                                              int h, int w, T *out,
                                              T *intermediate_out) {
344 345
  int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, w);
  int gird_size = h;
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
  FusedElemwiseAndActBroadcast1CUDAKernel<
      T, CompoundFunctor, BcastY, KeepIntermediateOut,
      SameShapeOfIntermediateOutAndOut><<<gird_size, block_size, 0, stream>>>(
      x, y, h, w, compound_functor, out, intermediate_out);
}

template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
static __global__ void FusedElemwiseAndActBroadcast2CUDAKernel(
    const T *x, const T *y, CompoundFunctor compound_functor, int pre, int n,
    int post, T *out, T *intermediate_out) {
  int tid = threadIdx.x;
  int j = blockIdx.x;

  while (true) {
    int i = tid / post;
    int k = tid % post;
    if (i >= pre) break;

    int offset = i * n * post + j * post + k;

    T y_val = BcastY ? y[j] : y[offset];
    T x_val = BcastY ? x[offset] : x[j];
    int64_t intermediate_out_offset;

    if (KeepIntermediateOut) {
      T intermeidiate_out = compound_functor.GetIntermediateOut(x_val, y_val);

      if (SameShapeOfIntermediateOutAndOut) {
        // for the case of f1(f2(x, y))
        intermediate_out_offset = offset;
      } else if (BcastY) {
        intermediate_out_offset = j;
      } else {
        intermediate_out_offset = offset;
      }

      intermediate_out[intermediate_out_offset] = intermeidiate_out;
      out[offset] =
          compound_functor.GetOutUseIntermediateOut(x_val, intermeidiate_out);
    } else {
      out[offset] = compound_functor.GetOut(x_val, y_val);
    }

    tid += ELEMWISE_MAX_BLOCK_DIM;
  }
}

template <typename T, typename CompoundFunctor, bool BcastY,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
396
static void FusedElemwiseAndActBroadcast2CUDA(gpuStream_t stream, const T *x,
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
                                              const T *y, int pre, int n,
                                              int post,
                                              CompoundFunctor compound_functor,
                                              T *out, T *intermediate_out) {
  int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, pre * post);
  int gird_size = n;

  FusedElemwiseAndActBroadcast2CUDAKernel<
      T, CompoundFunctor, BcastY, KeepIntermediateOut,
      SameShapeOfIntermediateOutAndOut><<<gird_size, block_size, 0, stream>>>(
      x, y, compound_functor, pre, n, post, out, intermediate_out);
}

#endif

template <typename DeviceContext, typename T, typename CompoundFunctor,
          bool KeepIntermediateOut>
void FusedElemwiseAndActComputeNoBroadcast(
    const framework::ExecutionContext &ctx, const framework::DDim &x_dim,
    const framework::Tensor &x, const framework::Tensor &y,
    CompoundFunctor compound_functor, framework::Tensor *out,
    framework::Tensor *intermediate_out) {
419
  size_t N = static_cast<size_t>(phi::product(x_dim));
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444

  platform::ForRange<DeviceContext> for_range(
      ctx.template device_context<DeviceContext>(), N);

  for_range(
      FusedElemwiseAndActNoBroadcast<T, CompoundFunctor, KeepIntermediateOut>{
          x.data<T>(), y.data<T>(), compound_functor,
          out->mutable_data<T>(ctx.GetPlace()),
          intermediate_out == nullptr
              ? nullptr
              : intermediate_out->mutable_data<T>(ctx.GetPlace())});
}

template <typename DeviceContext, typename T, typename CompoundFunctor,
          bool BcastY, bool KeepIntermediateOut,
          bool SameShapeOfIntermediateOutAndOut>
void FusedElemwiseAndActComputeWithBroadcast(
    const framework::ExecutionContext &ctx, const framework::DDim &x_dim,
    const framework::DDim &y_dim_untrimed, const framework::Tensor &x,
    const framework::Tensor &y, CompoundFunctor compound_functor, int axis,
    framework::Tensor *out, framework::Tensor *intermediate_out) {
  axis = (axis == -1 ? x_dim.size() - y_dim_untrimed.size() : axis);
  auto y_dim = trim_trailing_singular_dims(y_dim_untrimed);
  axis = (y_dim.size() == 0) ? x_dim.size() : axis;

445
  int pre, n, post, is_run_common_broadcast;
446 447
  phi::funcs::get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post,
                           &is_run_common_broadcast);
448 449 450 451
  if (post == 1) {
    int h = pre;
    int w = n;
    if (platform::is_gpu_place(ctx.GetPlace())) {
452
#if defined(__NVCC__) || defined(__HIPCC__)
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
      FusedElemwiseAndActBroadcast1CUDA<T, CompoundFunctor, BcastY,
                                        KeepIntermediateOut,
                                        SameShapeOfIntermediateOutAndOut>(
          ctx.template device_context<DeviceContext>().stream(), x.data<T>(),
          y.data<T>(), compound_functor, h, w,
          out->mutable_data<T>(ctx.GetPlace()),
          intermediate_out == nullptr
              ? nullptr
              : intermediate_out->mutable_data<T>(ctx.GetPlace()));
#endif
    } else {
      FusedElemwiseAndActBroadcast1CPU<T, CompoundFunctor, BcastY,
                                       KeepIntermediateOut,
                                       SameShapeOfIntermediateOutAndOut>(
          x.data<T>(), y.data<T>(), compound_functor, h, w,
          out->mutable_data<T>(ctx.GetPlace()),
          intermediate_out == nullptr
              ? nullptr
              : intermediate_out->mutable_data<T>(ctx.GetPlace()));
    }
  } else {
    if (platform::is_gpu_place(ctx.GetPlace())) {
475
#if defined(__NVCC__) || defined(__HIPCC__)
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
      FusedElemwiseAndActBroadcast2CUDA<T, CompoundFunctor, BcastY,
                                        KeepIntermediateOut,
                                        SameShapeOfIntermediateOutAndOut>(
          ctx.template device_context<DeviceContext>().stream(), x.data<T>(),
          y.data<T>(), pre, n, post, compound_functor,
          out->mutable_data<T>(ctx.GetPlace()),
          intermediate_out == nullptr
              ? nullptr
              : intermediate_out->mutable_data<T>(ctx.GetPlace()));
#endif
    } else {
      FusedElemwiseAndActBroadcast2CPU<T, CompoundFunctor, BcastY,
                                       KeepIntermediateOut,
                                       SameShapeOfIntermediateOutAndOut>(
          x.data<T>(), y.data<T>(), pre, n, post, compound_functor,
          out->mutable_data<T>(ctx.GetPlace()),
          intermediate_out == nullptr
              ? nullptr
              : intermediate_out->mutable_data<T>(ctx.GetPlace()));
    }
  }
}

// --- backward
C
chengduo 已提交
500 501
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut>
502 503
struct FusedElemwiseAndActGradNoBroadcast {
  HOSTDEVICE void operator()(size_t i) {
504 505 506
    T zero = static_cast<T>(0);
    T x_val = (x_ == nullptr) ? zero : x_[i];
    T y_val = (y_ == nullptr) ? zero : y_[i];
507 508 509 510 511
    T out_val = out_[i];
    T dout_val = dout_[i];
    T intermediate_out_val = UseIntermediateOut
                                 ? intermediate_out_[i]
                                 : dx_op_.GetIntermediateOut(x_val, y_val);
512
    if (dx_ != nullptr) {
513 514
      dx_[i] = dx_op_.UseIntermediateOut(x_val, y_val, intermediate_out_val,
                                         out_val, dout_val);
515 516
    }
    if (dy_ != nullptr) {
517 518
      dy_[i] = dy_op_.UseIntermediateOut(x_val, y_val, intermediate_out_val,
                                         out_val, dout_val);
C
chengduo 已提交
519 520
    }
    if (dintermediate_ != nullptr) {
521 522
      dintermediate_[i] = dintermediate_op_.UseIntermediateOut(
          x_val, intermediate_out_val, out_val, dout_val);
523 524 525 526 527 528 529 530 531 532
    }
  }

  const T *x_;
  const T *y_;
  const T *intermediate_out_;
  const T *out_;
  const T *dout_;
  DX_OP dx_op_;
  DY_OP dy_op_;
C
chengduo 已提交
533
  DIntermediate_OP dintermediate_op_;
534 535
  T *dx_;
  T *dy_;
C
chengduo 已提交
536
  T *dintermediate_;
537 538 539
};

template <typename DeviceContext, typename T, typename DX_OP, typename DY_OP,
C
chengduo 已提交
540
          typename DIntermediate_OP, bool UseIntermediateOut>
541 542 543 544 545
void FusedElemwiseAndActGradComputeNoBroadcast(
    const framework::ExecutionContext &ctx, const framework::DDim &x_dim,
    const framework::DDim &y_dim, const framework::Tensor *x,
    const framework::Tensor *y, const framework::Tensor *intermediate_out,
    const framework::Tensor *out, const framework::Tensor *dout, int axis,
C
chengduo 已提交
546 547 548
    framework::Tensor *dx, framework::Tensor *dy,
    framework::Tensor *dintermediate, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op) {
549
  size_t N = static_cast<size_t>(phi::product(x_dim));
550 551
  platform::ForRange<DeviceContext> for_range(
      ctx.template device_context<DeviceContext>(), N);
552 553 554 555 556 557 558 559 560 561 562 563 564
  const T *x_data = nullptr;
  const T *y_data = nullptr;
  if (x->IsInitialized()) x_data = x->data<T>();
  if (y->IsInitialized()) y_data = y->data<T>();

  for_range(FusedElemwiseAndActGradNoBroadcast<
            T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut>{
      x_data, y_data, intermediate_out ? intermediate_out->data<T>() : nullptr,
      out->data<T>(), dout->data<T>(), dx_op, dy_op, dintermediate_op,
      dx == nullptr ? nullptr : dx->mutable_data<T>(ctx.GetPlace()),
      dy == nullptr ? nullptr : dy->mutable_data<T>(ctx.GetPlace()),
      dintermediate == nullptr ? nullptr : dintermediate->mutable_data<T>(
                                               ctx.GetPlace())});
565 566
}

C
chengduo 已提交
567 568 569 570 571 572 573
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
static void FusedElemwiseAndActGradBroadcast1CPU(
    const T *x, const T *y, const T *intermediate_out, const T *out,
    const T *dout, int h, int w, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op, T *dx, T *dy, T *d_intermediate) {
574
  int64_t tmp_out_idx, x_idx, y_idx;
575
  T zero = static_cast<T>(0);
576 577 578 579 580 581 582
  for (int i = 0; i < h; ++i) {
    for (int j = 0; j < w; ++j) {
      int offset = i * w + j;

      tmp_out_idx = BcastY ? j : offset;
      y_idx = BcastY ? j : offset;
      x_idx = BcastY ? offset : j;
583 584
      T x_val = (x == nullptr) ? zero : x[x_idx];
      T y_val = (y == nullptr) ? zero : y[y_idx];
585 586 587 588 589 590 591

      if (SameShapeOfIntermediateOutAndOut) {
        tmp_out_idx = offset;
      }

      if (dx != nullptr) {
        T tmp = UseIntermediateOut
592
                    ? dx_op.UseIntermediateOut(x_val, y_val,
C
chengduo 已提交
593 594
                                               intermediate_out[tmp_out_idx],
                                               out[offset], dout[offset])
595
                    : dx_op.Recompute(x_val, y_val, out[offset], dout[offset]);
596 597 598 599 600 601 602 603 604 605 606 607 608

        if (BcastY) {
          dx[x_idx] = tmp;
        } else {
          if (i == 0) {
            dx[x_idx] = tmp;
          } else {
            dx[x_idx] += tmp;
          }
        }
      }
      if (dy != nullptr) {
        T tmp = UseIntermediateOut
609
                    ? dy_op.UseIntermediateOut(x_val, y_val,
C
chengduo 已提交
610 611
                                               intermediate_out[tmp_out_idx],
                                               out[offset], dout[offset])
612
                    : dy_op.Recompute(x_val, y_val, out[offset], dout[offset]);
613 614 615 616 617 618 619 620 621 622
        if (BcastY) {
          if (i == 0) {
            dy[y_idx] = tmp;
          } else {
            dy[y_idx] += tmp;
          }
        } else {
          dy[y_idx] = tmp;
        }
      }
C
chengduo 已提交
623 624 625
      if (d_intermediate != nullptr) {
        T tmp = UseIntermediateOut
                    ? dintermediate_op.UseIntermediateOut(
626
                          x_val, intermediate_out[tmp_out_idx], out[offset],
C
chengduo 已提交
627
                          dout[offset])
628 629
                    : dintermediate_op.Recompute(x_val, y_val, out[offset],
                                                 dout[i]);
C
chengduo 已提交
630 631 632 633 634 635 636 637 638 639
        if (SameShapeOfIntermediateOutAndOut) {
          d_intermediate[tmp_out_idx] = tmp;
        } else {
          if (i == 0) {
            d_intermediate[tmp_out_idx] = tmp;
          } else {
            d_intermediate[tmp_out_idx] += tmp;
          }
        }
      }
640 641 642 643
    }
  }
}

C
chengduo 已提交
644 645 646 647 648 649 650
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
static void FusedElemwiseAndActGradBroadcast2CPU(
    const T *x, const T *y, const T *intermediate_out, const T *out,
    const T *dout, int pre, int n, int post, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op, T *dx, T *dy, T *d_intermediate) {
651
  int64_t tmp_out_idx, x_idx, y_idx;
652
  T zero = static_cast<T>(0);
653 654 655 656 657 658 659 660 661
  for (int i = 0; i < pre; ++i) {
    for (int j = 0; j < n; ++j) {
      for (int k = 0; k < post; ++k) {
        int offset = i * n * post + j * post + k;

        tmp_out_idx = BcastY ? j : offset;
        y_idx = BcastY ? j : offset;
        x_idx = BcastY ? offset : j;

662 663 664
        T x_val = (x == nullptr) ? zero : x[x_idx];
        T y_val = (y == nullptr) ? zero : y[y_idx];

665 666 667 668 669
        if (SameShapeOfIntermediateOutAndOut) {
          tmp_out_idx = offset;
        }

        if (dx != nullptr) {
670 671 672 673 674 675
          T tmp =
              UseIntermediateOut
                  ? dx_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dx_op.Recompute(x_val, y_val, out[offset], dout[offset]);
676 677 678 679 680 681 682 683 684 685 686 687

          if (BcastY) {
            dx[x_idx] = tmp;
          } else {
            if (i == 0 && k == 0) {
              dx[x_idx] = tmp;
            } else {
              dx[x_idx] += tmp;
            }
          }
        }
        if (dy != nullptr) {
688 689 690 691 692 693
          T tmp =
              UseIntermediateOut
                  ? dy_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dy_op.Recompute(x_val, y_val, out[offset], dout[offset]);
694 695 696 697 698 699 700 701 702 703
          if (BcastY) {
            if (i == 0 && k == 0) {
              dy[y_idx] = tmp;
            } else {
              dy[y_idx] += tmp;
            }
          } else {
            dy[y_idx] = tmp;
          }
        }
C
chengduo 已提交
704 705 706
        if (d_intermediate != nullptr) {
          T tmp = UseIntermediateOut
                      ? dintermediate_op.UseIntermediateOut(
707 708 709 710
                            x_val, intermediate_out[tmp_out_idx], out[offset],
                            dout[offset])
                      : dintermediate_op.Recompute(x_val, y_val, out[offset],
                                                   dout[i]);
C
chengduo 已提交
711 712 713 714 715 716 717 718 719 720
          if (SameShapeOfIntermediateOutAndOut) {
            d_intermediate[tmp_out_idx] = tmp;
          } else {
            if (i == 0) {
              d_intermediate[tmp_out_idx] = tmp;
            } else {
              d_intermediate[tmp_out_idx] += tmp;
            }
          }
        }
721 722 723 724 725
      }
    }
  }
}

726
#if defined(__NVCC__) || defined(__HIPCC__)
C
chengduo 已提交
727 728 729
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
730 731
static __global__ void FusedElemwiseAndActGradBroadcast1CUDAKernel(
    const T *x, const T *y, const T *intermediate_out, const T *out,
C
chengduo 已提交
732 733
    const T *dout, int h, int w, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op, T *dx, T *dy, T *d_intermediate) {
734 735 736 737 738 739
  __shared__ T sdata[BLOCK_Y][BLOCK_X];
  size_t idx = threadIdx.x + BLOCK_X * blockIdx.x;
  size_t width_stride = gridDim.x * BLOCK_X;

  size_t full_w = ROUNDUP(w, BLOCK_X);

740
  T zero = static_cast<T>(0);
741

742 743 744 745 746
  for (size_t j = idx; j < full_w; j += width_stride) {
    T val(0), inter_val(0);
    if (j < w) {
      for (size_t i = threadIdx.y; i < h; i += BLOCK_Y) {
        size_t offset = i * w + j;
747

748 749 750 751 752
        size_t tmp_out_idx = BcastY ? j : offset;
        size_t y_idx = BcastY ? j : offset;
        size_t x_idx = BcastY ? offset : j;
        T x_val = (x == nullptr) ? zero : x[x_idx];
        T y_val = (y == nullptr) ? zero : y[y_idx];
753

754 755 756
        if (SameShapeOfIntermediateOutAndOut) {
          tmp_out_idx = offset;
        }
757

758 759 760
        if (dx != nullptr) {
          T tmp =
              UseIntermediateOut
761 762 763 764
                  ? dx_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dx_op.Recompute(x_val, y_val, out[offset], dout[offset]);
765

766 767 768 769 770 771 772 773 774
          if (BcastY) {
            dx[x_idx] = tmp;
          } else {
            val += tmp;
          }
        }
        if (dy != nullptr) {
          T tmp =
              UseIntermediateOut
775 776 777 778
                  ? dy_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dy_op.Recompute(x_val, y_val, out[offset], dout[offset]);
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
          if (BcastY) {
            val += tmp;
          } else {
            dy[y_idx] = tmp;
          }
        }
        if (d_intermediate != nullptr) {
          T tmp = UseIntermediateOut
                      ? dintermediate_op.UseIntermediateOut(
                            y[y_idx], intermediate_out[tmp_out_idx],
                            out[offset], dout[offset])
                      : dintermediate_op.Recompute(x_val, y_val, out[offset],
                                                   dout[offset]);
          if (SameShapeOfIntermediateOutAndOut) {
            d_intermediate[tmp_out_idx] = tmp;
          } else {
            inter_val += tmp;
          }
        }
C
chengduo 已提交
798 799
      }
    }
800

801 802 803 804 805 806 807 808 809
    // transpose, for ReduceSum with wrap
    sdata[threadIdx.y][threadIdx.x] = val;
    __syncthreads();
    val = sdata[threadIdx.x][threadIdx.y];
#pragma unroll
    for (int i = BLOCK_X >> 1; i > 0; i >>= 1) {
      // reduce sum with wrap
      val += platform::CudaShuffleXorSync(0xFFFFFFFF, val, i);
    }
810

811 812 813 814
    size_t idx_j = j + threadIdx.y;
    if (BcastY) {
      if (dy) {
        if (threadIdx.x == 0 && (idx_j < w)) dy[idx_j] = val;
815
      }
816 817 818
    } else {
      if (dx) {
        if (threadIdx.x == 0 && (idx_j < w)) dx[idx_j] = val;
819 820
      }
    }
821 822 823 824 825 826 827 828 829 830 831 832

    if (!SameShapeOfIntermediateOutAndOut) {
      if (d_intermediate) {
        sdata[threadIdx.y][threadIdx.x] = inter_val;
        __syncthreads();
        inter_val = sdata[threadIdx.x][threadIdx.y];
#pragma unroll
        for (int i = BLOCK_X >> 1; i > 0; i >>= 1) {
          // reduce sum with wrap
          inter_val += platform::CudaShuffleXorSync(0xFFFFFFFF, inter_val, i);
        }
        if (threadIdx.x == 0 && (idx_j < w)) d_intermediate[idx_j] = inter_val;
C
chengduo 已提交
833 834
      }
    }
835
  }  // end for
836 837
}

C
chengduo 已提交
838 839 840 841
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
static void FusedElemwiseAndActGradBroadcast1CUDA(
842 843 844 845 846 847 848 849 850 851 852 853
    const framework::ExecutionContext &ctx, const T *x, const T *y,
    const T *intermediate_out, const T *out, const T *dout, int h, int w,
    DX_OP dx_op, DY_OP dy_op, DIntermediate_OP dintermediate_op, T *dx, T *dy,
    T *d_intermediate) {
  gpuStream_t stream = ctx.cuda_device_context().stream();

  dim3 blocks(BLOCK_X, BLOCK_Y);
  int max_gpu_threads = ctx.cuda_device_context().GetMaxPhysicalThreadCount();
  int max_blocks = std::max(max_gpu_threads / (BLOCK_X * BLOCK_Y), 1);
  int theory_block = (w + BLOCK_X - 1) / BLOCK_X;
  dim3 grids(std::min(theory_block, max_blocks));

854
  FusedElemwiseAndActGradBroadcast1CUDAKernel<
C
chengduo 已提交
855
      T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut, BcastY,
856
      SameShapeOfIntermediateOutAndOut><<<grids, blocks, 0, stream>>>(
C
chengduo 已提交
857 858
      x, y, intermediate_out, out, dout, h, w, dx_op, dy_op, dintermediate_op,
      dx, dy, d_intermediate);
859 860
}

C
chengduo 已提交
861 862 863
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
864 865
static __global__ void FusedElemwiseAndActGradBroadcast2CUDAKernel(
    const T *x, const T *y, const T *intermediate_out, const T *out,
C
chengduo 已提交
866 867
    const T *dout, int pre, int n, int post, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op, T *dx, T *dy, T *d_intermediate) {
868 869 870
  int tid = threadIdx.x;
  int j = blockIdx.x;

C
chengduo 已提交
871
  T val(0), inter_val(0);
872 873
  int ttid = tid;
  int64_t tmp_out_idx, x_idx, y_idx;
874
  T zero = static_cast<T>(0);
875 876 877 878 879 880 881 882 883 884
  while (true) {
    int i = ttid / post;
    int k = ttid % post;
    if (i >= pre) break;

    int offset = i * n * post + j * post + k;

    tmp_out_idx = BcastY ? j : offset;
    y_idx = BcastY ? j : offset;
    x_idx = BcastY ? offset : j;
885 886
    T x_val = (x == nullptr) ? zero : x[x_idx];
    T y_val = (y == nullptr) ? zero : y[y_idx];
887 888 889 890 891 892

    if (SameShapeOfIntermediateOutAndOut) {
      tmp_out_idx = offset;
    }

    if (dx != nullptr) {
893 894 895 896 897
      T tmp = UseIntermediateOut
                  ? dx_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dx_op.Recompute(x_val, y_val, out[offset], dout[offset]);
898 899 900 901 902 903 904 905

      if (BcastY) {
        dx[x_idx] = tmp;
      } else {
        val += tmp;
      }
    }
    if (dy != nullptr) {
906 907 908 909 910
      T tmp = UseIntermediateOut
                  ? dy_op.UseIntermediateOut(x_val, y_val,
                                             intermediate_out[tmp_out_idx],
                                             out[offset], dout[offset])
                  : dy_op.Recompute(x_val, y_val, out[offset], dout[offset]);
911 912 913 914 915 916
      if (BcastY) {
        val += tmp;
      } else {
        dy[y_idx] = tmp;
      }
    }
C
chengduo 已提交
917 918 919
    if (d_intermediate != nullptr) {
      T tmp = UseIntermediateOut
                  ? dintermediate_op.UseIntermediateOut(
920
                        y_val, intermediate_out[tmp_out_idx], out[offset],
C
chengduo 已提交
921
                        dout[offset])
922
                  : dintermediate_op.Recompute(x_val, y_val, out[offset],
C
chengduo 已提交
923 924 925 926 927 928 929
                                               dout[offset]);
      if (SameShapeOfIntermediateOutAndOut) {
        d_intermediate[tmp_out_idx] = tmp;
      } else {
        inter_val += tmp;
      }
    }
930 931 932
    ttid += ELEMWISE_MAX_BLOCK_DIM;
  }

C
chengduo 已提交
933 934
  int h = pre * post;
  h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h;
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
  if (BcastY) {
    if (dy) {
      val = paddle::platform::reduceSum(val, tid, h);
      if (threadIdx.x == 0) {
        dy[j] = val;
      }
    }
  } else {
    if (dx) {
      val = paddle::platform::reduceSum(val, tid, h);
      if (threadIdx.x == 0) {
        dx[j] = val;
      }
    }
  }
C
chengduo 已提交
950 951 952 953 954 955 956 957
  if (!SameShapeOfIntermediateOutAndOut) {
    if (d_intermediate) {
      inter_val = paddle::platform::reduceSum(inter_val, tid, h);
      if (threadIdx.x == 0) {
        d_intermediate[j] = inter_val;
      }
    }
  }
958 959
}

C
chengduo 已提交
960 961 962
template <typename T, typename DX_OP, typename DY_OP, typename DIntermediate_OP,
          bool UseIntermediateOut, bool BcastY,
          bool SameShapeOfIntermediateOutAndOut>
963
static void FusedElemwiseAndActGradBroadcast2CUDA(
964
    gpuStream_t stream, const T *x, const T *y, const T *intermediate_out,
965
    const T *out, const T *dout, int pre, int n, int post, DX_OP dx_op,
C
chengduo 已提交
966 967
    DY_OP dy_op, DIntermediate_OP dintermediate_op, T *dx, T *dy,
    T *dintermediate) {
968 969 970
  int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, pre * post);
  int gird_size = n;
  FusedElemwiseAndActGradBroadcast2CUDAKernel<
C
chengduo 已提交
971
      T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut, BcastY,
972
      SameShapeOfIntermediateOutAndOut><<<gird_size, block_size, 0, stream>>>(
C
chengduo 已提交
973 974
      x, y, intermediate_out, out, dout, pre, n, post, dx_op, dy_op,
      dintermediate_op, dx, dy, dintermediate);
975 976 977 978
}
#endif

template <typename DeviceContext, typename T, typename DX_OP, typename DY_OP,
C
chengduo 已提交
979
          typename DIntermediate_OP, bool UseIntermediateOut, bool BcastY,
980 981 982 983 984 985
          bool SameShapeOfIntermediateOutAndOut>
void FusedElemwiseAndActGradComputeWithBroadcast(
    const framework::ExecutionContext &ctx, const framework::DDim &x_dim,
    const framework::DDim &y_dim_untrimed, const framework::Tensor *x,
    const framework::Tensor *y, const framework::Tensor *intermediate_out,
    const framework::Tensor *out, const framework::Tensor *dout, int axis,
C
chengduo 已提交
986 987 988
    framework::Tensor *dx, framework::Tensor *dy,
    framework::Tensor *dintermediate, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op) {
989 990 991 992
  axis = (axis == -1 ? x_dim.size() - y_dim_untrimed.size() : axis);
  auto y_dim = trim_trailing_singular_dims(y_dim_untrimed);
  axis = (y_dim.size() == 0) ? x_dim.size() : axis;

993
  int pre, n, post, is_run_common_broadcast;
994 995
  phi::funcs::get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post,
                           &is_run_common_broadcast);
996 997 998 999
  const T *x_data = nullptr;
  const T *y_data = nullptr;
  if (x->IsInitialized()) x_data = x->data<T>();
  if (y->IsInitialized()) y_data = y->data<T>();
1000 1001 1002
  if (post == 1) {
    int h = pre;
    int w = n;
1003

1004
    if (platform::is_gpu_place(ctx.GetPlace())) {
1005
#if defined(__NVCC__) || defined(__HIPCC__)
C
chengduo 已提交
1006 1007
      FusedElemwiseAndActGradBroadcast1CUDA<T, DX_OP, DY_OP, DIntermediate_OP,
                                            UseIntermediateOut, BcastY,
1008
                                            SameShapeOfIntermediateOutAndOut>(
1009
          ctx, x_data, y_data,
1010
          intermediate_out == nullptr ? nullptr : intermediate_out->data<T>(),
C
chengduo 已提交
1011
          out->data<T>(), dout->data<T>(), h, w, dx_op, dy_op, dintermediate_op,
1012
          dx == nullptr ? nullptr : dx->mutable_data<T>(ctx.GetPlace()),
C
chengduo 已提交
1013 1014 1015
          dy == nullptr ? nullptr : dy->mutable_data<T>(ctx.GetPlace()),
          dintermediate == nullptr ? nullptr : dintermediate->mutable_data<T>(
                                                   ctx.GetPlace()));
1016 1017
#endif
    } else {
C
chengduo 已提交
1018 1019
      FusedElemwiseAndActGradBroadcast1CPU<T, DX_OP, DY_OP, DIntermediate_OP,
                                           UseIntermediateOut, BcastY,
1020
                                           SameShapeOfIntermediateOutAndOut>(
1021
          x_data, y_data,
1022
          intermediate_out == nullptr ? nullptr : intermediate_out->data<T>(),
C
chengduo 已提交
1023
          out->data<T>(), dout->data<T>(), h, w, dx_op, dy_op, dintermediate_op,
1024
          dx == nullptr ? nullptr : dx->mutable_data<T>(ctx.GetPlace()),
C
chengduo 已提交
1025 1026 1027
          dy == nullptr ? nullptr : dy->mutable_data<T>(ctx.GetPlace()),
          dintermediate == nullptr ? nullptr : dintermediate->mutable_data<T>(
                                                   ctx.GetPlace()));
1028 1029 1030
    }
  } else {
    if (platform::is_gpu_place(ctx.GetPlace())) {
1031
#if defined(__NVCC__) || defined(__HIPCC__)
C
chengduo 已提交
1032 1033
      FusedElemwiseAndActGradBroadcast2CUDA<T, DX_OP, DY_OP, DIntermediate_OP,
                                            UseIntermediateOut, BcastY,
1034
                                            SameShapeOfIntermediateOutAndOut>(
1035
          ctx.template device_context<DeviceContext>().stream(), x_data, y_data,
1036 1037
          intermediate_out == nullptr ? nullptr : intermediate_out->data<T>(),
          out->data<T>(), dout->data<T>(), pre, n, post, dx_op, dy_op,
C
chengduo 已提交
1038
          dintermediate_op,
1039
          dx == nullptr ? nullptr : dx->mutable_data<T>(ctx.GetPlace()),
C
chengduo 已提交
1040 1041 1042
          dy == nullptr ? nullptr : dy->mutable_data<T>(ctx.GetPlace()),
          dintermediate == nullptr ? nullptr : dintermediate->mutable_data<T>(
                                                   ctx.GetPlace()));
1043 1044
#endif
    } else {
C
chengduo 已提交
1045 1046
      FusedElemwiseAndActGradBroadcast2CPU<T, DX_OP, DY_OP, DIntermediate_OP,
                                           UseIntermediateOut, BcastY,
1047
                                           SameShapeOfIntermediateOutAndOut>(
1048
          x_data, y_data,
1049 1050
          intermediate_out == nullptr ? nullptr : intermediate_out->data<T>(),
          out->data<T>(), dout->data<T>(), pre, n, post, dx_op, dy_op,
C
chengduo 已提交
1051
          dintermediate_op,
1052
          dx == nullptr ? nullptr : dx->mutable_data<T>(ctx.GetPlace()),
C
chengduo 已提交
1053 1054 1055
          dy == nullptr ? nullptr : dy->mutable_data<T>(ctx.GetPlace()),
          dintermediate == nullptr ? nullptr : dintermediate->mutable_data<T>(
                                                   ctx.GetPlace()));
1056 1057 1058 1059 1060
    }
  }
}

template <typename DeviceContext, typename T, typename DX_OP, typename DY_OP,
C
chengduo 已提交
1061 1062
          typename DIntermediate_OP, bool UseIntermediateOut,
          bool SameShapeOfIntermediateOutAndOut>
1063 1064 1065 1066
void FusedElemwiseAndActGradComputeEx(
    const framework::ExecutionContext &ctx, const framework::Tensor *x,
    const framework::Tensor *y, const framework::Tensor *out,
    const framework::Tensor *intermediate_out, const framework::Tensor *dout,
C
chengduo 已提交
1067 1068 1069
    int axis, framework::Tensor *dx, framework::Tensor *dy,
    framework::Tensor *dintermediate, DX_OP dx_op, DY_OP dy_op,
    DIntermediate_OP dintermediate_op) {
1070 1071 1072
  const framework::DDim &x_dim = x->dims();
  const framework::DDim &y_dim = y->dims();
  if (UseIntermediateOut) {
1073 1074 1075
    PADDLE_ENFORCE_NOT_NULL(
        intermediate_out,
        platform::errors::InvalidArgument("Intermediate out is null pointer."));
1076 1077
  }
  if (x_dim == y_dim) {
C
chengduo 已提交
1078 1079
    FusedElemwiseAndActGradComputeNoBroadcast<
        DeviceContext, T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut>(
1080
        ctx, x_dim, y_dim, x, y, intermediate_out, out, dout, axis, dx, dy,
C
chengduo 已提交
1081
        dintermediate, dx_op, dy_op, dintermediate_op);
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
  } else {  // Y is a scalar
    bool bcast_y = x_dim.size() >= y_dim.size();
    if (x_dim.size() == y_dim.size()) {
      for (int i = 0; i < x_dim.size(); ++i) {
        if (x_dim[i] < y_dim[i]) {
          bcast_y = false;
          break;
        }
      }
    }

    // z = f1(x, f2(y))
    // z = f1(f2(x, y))
    if (bcast_y) {  // Y should be broadcast.
      FusedElemwiseAndActGradComputeWithBroadcast<
C
chengduo 已提交
1097 1098 1099 1100
          DeviceContext, T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut,
          true /*BcastY*/, SameShapeOfIntermediateOutAndOut>(
          ctx, x_dim, y_dim, x, y, intermediate_out, out, dout, axis, dx, dy,
          dintermediate, dx_op, dy_op, dintermediate_op);
1101 1102
    } else {
      FusedElemwiseAndActGradComputeWithBroadcast<
C
chengduo 已提交
1103 1104 1105 1106
          DeviceContext, T, DX_OP, DY_OP, DIntermediate_OP, UseIntermediateOut,
          false /*BcastY*/, SameShapeOfIntermediateOutAndOut>(
          ctx, y_dim, x_dim, x, y, intermediate_out, out, dout, axis, dx, dy,
          dintermediate, dx_op, dy_op, dintermediate_op);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
    }
  }
}

template <typename DeviceContext, typename T, typename CompoundFunctor,
          bool KeepIntermediateOut, bool SameShapeOfIntermediateOutAndOut>
void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx,
                                  const framework::Tensor &x,
                                  const framework::Tensor &y, int axis,
                                  CompoundFunctor compound_functor,
                                  framework::Tensor *out,
                                  framework::Tensor *intermediate_out) {
  if (KeepIntermediateOut) {
1120 1121 1122 1123 1124
    PADDLE_ENFORCE_NOT_NULL(
        intermediate_out,
        platform::errors::InvalidArgument(
            "The save_intermediate_out is opened, intermediate "
            "out is null pointer."));
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
  }

  const framework::DDim &x_dim = x.dims();
  const framework::DDim &y_dim = y.dims();
  if (x.dims() == y.dims()) {
    FusedElemwiseAndActComputeNoBroadcast<DeviceContext, T, CompoundFunctor,
                                          KeepIntermediateOut>(
        ctx, x_dim, x, y, compound_functor, out, intermediate_out);
  } else {
    // Whether the shape of Y is a continuous subsequence of X,
    // For more information please refer to the op's introduction.
1136
    bool bcast_y = x.numel() >= y.numel();
1137 1138 1139 1140
    // z = f1(x, f2(y))
    // z = f1(f2(x, y))
    if (bcast_y) {  // Y should be broadcast.
      // In this case,
1141 1142
      // for 'f2(y)', the shape of intermediate_out should be equal to the
      // shape
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
      // of Y.
      // for 'f2(x, y)', the shape of intermediate_out should be equal to the
      // shape of Out.
      // the shape of Out should be equal to the shape of X.
      FusedElemwiseAndActComputeWithBroadcast<
          DeviceContext, T, CompoundFunctor, true /*BcastY*/,
          KeepIntermediateOut, SameShapeOfIntermediateOutAndOut>(
          ctx, x_dim /*OutShape*/, y_dim, x, y, compound_functor, axis, out,
          intermediate_out);
    } else {
      // In this case,
1154 1155
      // for 'f2(y)', the shape of intermediate_out should be equal to the
      // shape
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
      // of Out.
      // for 'f2(x, y)', the shape of intermediate_out should be equal to the
      // shape of Out.
      // the shape of Out should be equal to the shape of Y.
      FusedElemwiseAndActComputeWithBroadcast<
          DeviceContext, T, CompoundFunctor, false /*BcastY*/,
          KeepIntermediateOut, SameShapeOfIntermediateOutAndOut>(
          ctx, y_dim /*OutShape*/, x_dim, x, y, compound_functor, axis, out,
          intermediate_out);
    }
  }
}
1168 1169 1170 1171 1172

template <typename DeviceContext, typename T>
static inline void GetDoubleGradSafeTensor(
    const framework::ExecutionContext &ctx, const framework::Tensor *x,
    const framework::Tensor *ddx, framework::Tensor *ddx_safe) {
1173
  const auto &dev_ctx = ctx.template device_context<DeviceContext>();
1174 1175
  phi::funcs::GetDoubleGradSafeTensor<DeviceContext, T>(dev_ctx, *x, ddx,
                                                        ddx_safe);
1176 1177
}

1178 1179 1180 1181
// for broadcast backwards
static inline std::vector<int> GetReduceDim(const framework::DDim &in,
                                            const framework::DDim &out,
                                            int axis) {
1182
  return phi::funcs::GetReduceDim(in, out, axis);
1183
}
1184 1185 1186 1187 1188 1189

#if defined(__NVCC__) || defined(__HIPCC__)
template <typename T>
void ReduceWrapper(const platform::CUDADeviceContext &dev_ctx, int axis,
                   framework::Tensor *src, framework::Tensor *dst) {
  std::vector<int> reduce_dims = GetReduceDim(dst->dims(), src->dims(), axis);
1190
  TensorReduceImpl<T, T, kps::AddFunctor, kps::IdentityFunctor<T>>(
W
Wilber 已提交
1191 1192
      dev_ctx, *src, dst, kps::IdentityFunctor<T>(), reduce_dims,
      dev_ctx.stream());
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
}

template <ElementwiseType ET, typename T, typename Functor>
void GetGradXAndYOut(const platform::CUDADeviceContext &dev_ctx,
                     const platform::Place &place, int axis,
                     std::vector<const framework::Tensor *> ins,
                     const framework::Tensor *dout, framework::Tensor *dx,
                     framework::Tensor *dy, Functor func) {
  framework::Tensor tmp_dx;
  framework::Tensor tmp_dy;
1203
  dx->mutable_data<T>(place);
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
  dy->mutable_data<T>(place);
  std::vector<framework::Tensor *> outs;
  if (dx->dims() == dout->dims() && dy->dims() == dout->dims()) {
    outs = {dx, dy};
  } else if (dx->dims() != dout->dims() && dy->dims() == dout->dims()) {
    tmp_dx.mutable_data<T>(dout->dims(), place);
    outs = {&tmp_dx, dy};
  } else if (dx->dims() == dout->dims() && dy->dims() != dout->dims()) {
    tmp_dy.mutable_data<T>(dout->dims(), place);
    outs = {dx, &tmp_dy};
  } else if (dx->dims() != dout->dims() && dy->dims() != dout->dims()) {
    tmp_dy.mutable_data<T>(dout->dims(), place);
    tmp_dx.mutable_data<T>(dout->dims(), place);
    outs = {&tmp_dx, &tmp_dy};
  }

1220 1221
  paddle::operators::LaunchElementwiseCudaKernel<ET, T, T, decltype(func), 2>(
      dev_ctx, ins, &outs, axis, func);
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

  if (dx->dims() != dout->dims() && dy->dims() == dout->dims()) {
    ReduceWrapper<T>(dev_ctx, axis, &tmp_dx, dx);
  } else if (dx->dims() == dout->dims() && dy->dims() != dout->dims()) {
    ReduceWrapper<T>(dev_ctx, axis, &tmp_dy, dy);
  } else if (dx->dims() != dout->dims() && dy->dims() != dout->dims()) {
    ReduceWrapper<T>(dev_ctx, axis, &tmp_dx, dx);
    ReduceWrapper<T>(dev_ctx, axis, &tmp_dy, dy);
  }
}

template <ElementwiseType ET, typename T, typename Functor>
void GetGradXOrYOut(const platform::CUDADeviceContext &dev_ctx,
                    const platform::Place &place, int axis,
                    std::vector<const framework::Tensor *> ins,
                    const framework::Tensor *dout, framework::Tensor *dxy,
                    Functor func) {
  framework::Tensor tmp_dxy;
  dxy->mutable_data<T>(place);

  std::vector<framework::Tensor *> outs;
  if (dxy->dims() != dout->dims()) {
    tmp_dxy.mutable_data<T>(dout->dims(), place);
    outs = {&tmp_dxy};
  } else {
    outs = {dxy};
  }

1250 1251
  paddle::operators::LaunchElementwiseCudaKernel<ET, T, T>(dev_ctx, ins, &outs,
                                                           axis, func);
1252 1253 1254 1255 1256 1257 1258
  if (dxy->dims() != dout->dims()) {
    ReduceWrapper<T>(dev_ctx, axis, &tmp_dxy, dxy);
  }
}

#endif

1259 1260
}  // namespace operators
}  // namespace paddle