right_pool_op.cu 4.6 KB
Newer Older
W
wangguanzhong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
wangguanzhong 已提交
14
#include <vector>
W
wangguanzhong 已提交
15 16
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
W
wangguanzhong 已提交
17
#include "paddle/fluid/platform/cuda_primitives.h"
W
wangguanzhong 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include "util.cu.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;

static inline int NumBlocks(const int N) {
  return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
                  kNumMaximumNumBlocks);
}

template <typename T>
class RightPoolOpCUDAKernel : public framework::OpKernel<T> {
W
wangguanzhong 已提交
35 36
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
W
wangguanzhong 已提交
37 38
    PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
                   "This kernel only runs on GPU device.");
W
wangguanzhong 已提交
39 40 41 42
    auto* x = ctx.Input<Tensor>("X");
    auto* max_map = ctx.Output<Tensor>("MaxMap");
    auto* output = ctx.Output<Tensor>("Output");
    auto* x_data = x->data<T>();
W
wangguanzhong 已提交
43 44 45 46 47 48 49
    auto x_dims = x->dims();
    int NC_num = x_dims[0] * x_dims[1];
    int height = x_dims[2];
    int width = x_dims[3];
    int num = x->numel();
    auto& dev_ctx = ctx.cuda_device_context();

W
wangguanzhong 已提交
50 51
    int* max_map_data = max_map->mutable_data<int>(x_dims, dev_ctx.GetPlace());
    T* output_data = output->mutable_data<T>(x_dims, dev_ctx.GetPlace());
W
wangguanzhong 已提交
52
    auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace());
W
wangguanzhong 已提交
53

W
wangguanzhong 已提交
54 55
    int threads = kNumCUDAThreads;
    int blocks = NumBlocks(num / width);
W
wangguanzhong 已提交
56

W
wangguanzhong 已提交
57 58 59 60 61
    auto max_val_ptr = memory::Alloc(gpu_place, num / width * sizeof(T));
    T* max_val_data = reinterpret_cast<T*>(max_val_ptr->ptr());
    auto max_ind_ptr = memory::Alloc(gpu_place, num / width * sizeof(int));
    int* max_ind_data = reinterpret_cast<int*>(max_ind_ptr->ptr());

W
wangguanzhong 已提交
62 63 64 65 66 67 68 69 70
    GetMaxInfo<T><<<blocks, threads, 0, dev_ctx.stream()>>>(x->data<T>(),
                                                            NC_num,
                                                            height,
                                                            width,
                                                            3,
                                                            false,
                                                            max_val_data,
                                                            max_ind_data,
                                                            max_map_data);
W
wangguanzhong 已提交
71 72

    blocks = NumBlocks(num);
W
wangguanzhong 已提交
73 74
    ScatterAddFw<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
        x->data<T>(), max_map_data, NC_num, height, width, 3, output_data);
W
wangguanzhong 已提交
75 76 77 78 79 80 81 82 83 84 85 86
  }
};

template <typename T>
class RightPoolGradOpCUDAKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto* x = ctx.Input<Tensor>("X");
    auto* max_map = ctx.Input<Tensor>("MaxMap");
    auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
    auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto x_dims = x->dims();
W
wangguanzhong 已提交
87

W
wangguanzhong 已提交
88 89 90
    auto& dev_ctx = ctx.cuda_device_context();
    T* in_grad_data = in_grad->mutable_data<T>(x_dims, dev_ctx.GetPlace());
    auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace());
W
wangguanzhong 已提交
91

W
wangguanzhong 已提交
92 93 94 95 96 97
    int threads = kNumCUDAThreads;
    int NC_num = x_dims[0] * x_dims[1];
    int height = x_dims[2];
    int width = x_dims[3];
    int grad_num = in_grad->numel();
    int blocks = NumBlocks(grad_num);
W
wangguanzhong 已提交
98 99 100 101 102 103 104 105 106 107 108
    FillConstant<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
        in_grad_data, 0, grad_num);

    ScatterAddBw<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
        out_grad->data<T>(),
        max_map->data<int>(),
        NC_num,
        height,
        width,
        3,
        in_grad_data);
W
wangguanzhong 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(right_pool,
                        ops::RightPoolOpCUDAKernel<float>,
                        ops::RightPoolOpCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(right_pool_grad,
                        ops::RightPoolGradOpCUDAKernel<float>,
                        ops::RightPoolGradOpCUDAKernel<double>);