clip_op.h 7.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
W
wanghaoshuang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
W
wanghaoshuang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
W
wanghaoshuang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
W
wanghaoshuang 已提交
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
S
sneaxiy 已提交
19
#include "paddle/fluid/operators/math/selected_rows_functor.h"
Y
Yi Wang 已提交
20
#include "paddle/fluid/platform/transform.h"
21 22 23
#if defined(__NVCC__) || defined(__HIPCC__)
#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h"
#endif
W
wanghaoshuang 已提交
24 25 26 27

namespace paddle {
namespace operators {

W
wanghaoshuang 已提交
28 29
using framework::Tensor;
using platform::Transform;
W
wanghaoshuang 已提交
30

W
wanghaoshuang 已提交
31 32 33 34
template <typename T>
class ClipFunctor {
 public:
  explicit ClipFunctor(const T min, const T max) : min_(min), max_(max) {}
35
  HOSTDEVICE T operator()(const T x) const {
36
    return x < min_ ? min_ : x > max_ ? max_ : x;
W
wanghaoshuang 已提交
37 38 39 40 41 42 43 44 45 46 47
  }

 private:
  T min_;
  T max_;
};

template <typename T>
class ClipGradFunctor {
 public:
  explicit ClipGradFunctor(const T min, const T max) : min_(min), max_(max) {}
48
  HOSTDEVICE T operator()(const T x, const T y) const {
Z
zhangbo9674 已提交
49
    return (y > min_ && y < max_) ? x : static_cast<T>(0);
W
wanghaoshuang 已提交
50
  }
W
wanghaoshuang 已提交
51

W
wanghaoshuang 已提交
52 53 54 55
 private:
  T min_;
  T max_;
};
56

Q
QI JUN 已提交
57
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
58
class ClipKernel : public framework::OpKernel<T> {
W
wanghaoshuang 已提交
59 60
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Y
Yang Zhang 已提交
61
    auto max = static_cast<T>(context.Attr<float>("max"));
62 63 64 65 66
    Tensor max_cpu;
    if (context.HasInput("Max")) {
      auto* max_t = context.Input<Tensor>("Max");
      auto* max_data = max_t->data<T>();
      if (platform::is_gpu_place(max_t->place())) {
67 68
        paddle::framework::TensorCopySync(*max_t, platform::CPUPlace(),
                                          &max_cpu);
69 70 71 72
        max_data = max_cpu.data<T>();
      }
      max = max_data[0];
    }
Y
Yang Zhang 已提交
73
    max = static_cast<T>(max);
74

Z
zhangbo9674 已提交
75
    auto min = static_cast<T>(context.Attr<float>("min"));
76 77 78 79 80
    Tensor min_cpu;
    if (context.HasInput("Min")) {
      auto* min_t = context.Input<Tensor>("Min");
      auto* min_data = min_t->data<T>();
      if (platform::is_gpu_place(min_t->place())) {
81 82
        paddle::framework::TensorCopySync(*min_t, platform::CPUPlace(),
                                          &min_cpu);
83 84 85 86
        min_data = min_cpu.data<T>();
      }
      min = min_data[0];
    }
Y
Yang Zhang 已提交
87 88 89 90 91

    PADDLE_ENFORCE_LE(min, max,
                      platform::errors::InvalidArgument(
                          "max should be greater than or equal to min. "
                          "But received min = %f, max = %f",
S
sneaxiy 已提交
92
                          static_cast<float>(min), static_cast<float>(max)));
93

S
sneaxiy 已提交
94 95 96 97 98 99 100
    auto* x_var = context.InputVar("X");
    if (x_var->IsType<framework::LoDTensor>()) {
      auto* x = context.Input<framework::LoDTensor>("X");
      auto* out = context.Output<framework::LoDTensor>("Out");
      T* out_data = out->mutable_data<T>(context.GetPlace());
      const T* x_data = x->data<T>();
      int64_t numel = x->numel();
101
      if (platform::is_gpu_place(context.GetPlace())) {
102
#if defined(__NVCC__) || defined(__HIPCC__)
103 104 105
        std::vector<const framework::Tensor*> ins = {x};
        std::vector<framework::Tensor*> outs = {out};
        auto functor = ClipFunctor<T>(min, max);
106
        paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>(
107 108
            context.template device_context<platform::CUDADeviceContext>(), ins,
            &outs, functor);
109 110 111 112 113 114
#endif
      } else {
        Transform<DeviceContext> trans;
        trans(context.template device_context<DeviceContext>(), x_data,
              x_data + numel, out_data, ClipFunctor<T>(min, max));
      }
115 116 117
    } else if (x_var->IsType<phi::SelectedRows>()) {
      auto* x = context.Input<phi::SelectedRows>("X");
      auto* out = context.Output<phi::SelectedRows>("Out");
118 119 120
      PADDLE_ENFORCE_NE(x, out, platform::errors::InvalidArgument(
                                    "Inplace clip is not allowed "
                                    "when x is SelectedRows"));
S
sneaxiy 已提交
121 122 123 124 125 126 127 128 129
      math::scatter::MergeAdd<DeviceContext, T> merge_func;
      merge_func(context.template device_context<DeviceContext>(), *x, out);
      auto* out_tensor = out->mutable_value();
      auto* out_data = out_tensor->data<T>();
      int64_t numel = out_tensor->numel();
      Transform<DeviceContext> trans;
      trans(context.template device_context<DeviceContext>(), out_data,
            out_data + numel, out_data, ClipFunctor<T>(min, max));
    } else {
130 131
      PADDLE_THROW(platform::errors::Unavailable(
          "ClipOp only supports LoDTensor and SelectedRows."));
S
sneaxiy 已提交
132
    }
W
wanghaoshuang 已提交
133 134 135
  }
};

Q
QI JUN 已提交
136
template <typename DeviceContext, typename T>
Y
Yu Yang 已提交
137
class ClipGradKernel : public framework::OpKernel<T> {
W
wanghaoshuang 已提交
138 139
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Y
Yang Zhang 已提交
140
    auto max = static_cast<T>(context.Attr<float>("max"));
141 142 143 144 145
    Tensor max_cpu;
    if (context.HasInput("Max")) {
      auto* max_t = context.Input<Tensor>("Max");
      auto* max_data = max_t->data<T>();
      if (platform::is_gpu_place(max_t->place())) {
146 147
        paddle::framework::TensorCopySync(*max_t, platform::CPUPlace(),
                                          &max_cpu);
148 149 150 151
        max_data = max_cpu.data<T>();
      }
      max = max_data[0];
    }
Y
Yang Zhang 已提交
152
    max = static_cast<T>(max);
153

Z
zhangbo9674 已提交
154
    auto min = static_cast<T>(context.Attr<float>("min"));
155 156 157 158 159
    Tensor min_cpu;
    if (context.HasInput("Min")) {
      auto* min_t = context.Input<Tensor>("Min");
      auto* min_data = min_t->data<T>();
      if (platform::is_gpu_place(min_t->place())) {
160 161
        paddle::framework::TensorCopySync(*min_t, platform::CPUPlace(),
                                          &min_cpu);
162 163 164 165
        min_data = min_cpu.data<T>();
      }
      min = min_data[0];
    }
Y
Yang Zhang 已提交
166
    min = static_cast<T>(min);
167

S
sneaxiy 已提交
168 169 170 171
    auto* d_out =
        context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
    auto* d_x =
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
W
wanghaoshuang 已提交
172
    if (d_x != nullptr) {
S
sneaxiy 已提交
173
      auto* x = context.Input<framework::LoDTensor>("X");
174 175 176 177 178
#if defined(__NVCC__) || defined(__HIPCC__)
      std::vector<const framework::Tensor*> ins = {d_out, x};
      std::vector<framework::Tensor*> outs = {d_x};
      auto functor = ClipGradFunctor<T>(min, max);
      d_x->mutable_data<T>(context.GetPlace());
179
      LaunchSameDimsElementwiseCudaKernel<T>(
180 181 182
          context.template device_context<platform::CUDADeviceContext>(), ins,
          &outs, functor);
#else
W
wanghaoshuang 已提交
183
      int64_t numel = d_out->numel();
W
wanghaoshuang 已提交
184
      auto* d_x_data = d_x->mutable_data<T>(context.GetPlace());
W
wanghaoshuang 已提交
185 186
      const T* d_out_data = d_out->data<T>();
      const T* x_data = x->data<T>();
Q
QI JUN 已提交
187 188 189
      Transform<DeviceContext> trans;
      trans(context.template device_context<DeviceContext>(), d_out_data,
            d_out_data + numel, x_data, d_x_data, ClipGradFunctor<T>(min, max));
190
#endif
W
wanghaoshuang 已提交
191 192 193 194 195 196
    }
  }
};

}  // namespace operators
}  // namespace paddle