layer.h 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

X
Xin Pan 已提交
17 18 19 20 21 22 23
// clang-format off
#include "paddle/fluid/framework/python_headers.h"
// clang-format on

#include <map>     // NOLINT
#include <string>  // NOLINT
#include <vector>  // NOLINT
M
minqiyang 已提交
24
#include <memory>  // NOLINT
M
minqiyang 已提交
25

26 27 28 29
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/platform/enforce.h"
M
minqiyang 已提交
30
#include "paddle/fluid/platform/device_context.h"
M
minqiyang 已提交
31
#include "paddle/fluid/operators/math/math_function.h"
32

M
minqiyang 已提交
33 34
#include "paddle/fluid/imperative/type_defs.h"

35 36 37
namespace paddle {
namespace imperative {

M
minqiyang 已提交
38 39
class VarBase;

X
Xin Pan 已提交
40 41
namespace py = ::pybind11;

X
Xin Pan 已提交
42 43 44 45 46
class PreparedOp {
 public:
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
             framework::OperatorWithKernel::OpKernelFunc func,
X
polish  
Xin Pan 已提交
47 48 49 50 51 52 53
             platform::DeviceContext* dev_ctx,
             std::vector<framework::KernelConfig>* kernel_configs)
      : op(op),
        ctx(ctx),
        func(func),
        dev_ctx(dev_ctx),
        kernel_configs(kernel_configs) {}
X
Xin Pan 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

  static PreparedOp Prepare(const framework::RuntimeContext& ctx,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto* dev_ctx = pool.Get(place);

    // check if op[type] has kernel registered.
    auto& all_op_kernels = op.AllOpKernels();
    auto kernels_iter = all_op_kernels.find(op.Type());
    if (kernels_iter == all_op_kernels.end()) {
      PADDLE_THROW(
          "There are no kernels which are registered in the %s operator.",
          op.Type());
    }

    framework::OperatorWithKernel::OpKernelMap& kernels = kernels_iter->second;

72 73 74
    auto expected_kernel_key =
        op.GetExpectedKernelType(framework::ExecutionContext(
            op, framework::Scope(), *dev_ctx, ctx, nullptr));
X
Xin Pan 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
    VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

    auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
    // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
    if (kernel_iter == kernels.end() &&
        expected_kernel_key.library_type_ == framework::LibraryType::kMKLDNN) {
      VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
      expected_kernel_key.library_type_ = framework::LibraryType::kPlain;
      expected_kernel_key.data_layout_ = framework::DataLayout::kAnyLayout;
      kernel_iter = kernels.find(expected_kernel_key);
    }
#endif
    if (kernel_iter == kernels.end()) {
      PADDLE_THROW("op %s does not have kernel for %s", op.Type(),
                   KernelTypeToString(expected_kernel_key));
    }
X
polish  
Xin Pan 已提交
92 93 94
    std::vector<framework::KernelConfig>* kernel_configs =
        op.GetKernelConfig(expected_kernel_key);
    return PreparedOp(op, ctx, kernel_iter->second, dev_ctx, kernel_configs);
X
Xin Pan 已提交
95 96
  }

M
minqiyang 已提交
97 98
  inline platform::DeviceContext* GetDeviceContext() const { return dev_ctx; }

X
Xin Pan 已提交
99 100 101 102
  const framework::OperatorBase& op;
  const framework::RuntimeContext& ctx;
  framework::OperatorWithKernel::OpKernelFunc func;
  platform::DeviceContext* dev_ctx;
X
polish  
Xin Pan 已提交
103
  std::vector<framework::KernelConfig>* kernel_configs;
X
Xin Pan 已提交
104
};
X
polish  
Xin Pan 已提交
105

106 107
class OpBase;

M
minqiyang 已提交
108 109 110 111 112
/* The wrapper for Variable which holds a Variable and a VarBase of its
 * gradient. This object should be managed totally by Python intepreter.
 *
 * Nearly all interface should be implemented in C++.
 */
113 114
class VarBase {
 public:
115
  VarBase() : VarBase(new framework::Variable(), new VarBase(true)) {}
X
polish  
Xin Pan 已提交
116

M
minqiyang 已提交
117 118 119 120
  explicit VarBase(bool stop_gradient)
      : VarBase(new framework::Variable(),
                stop_gradient ? nullptr : new VarBase(true), stop_gradient) {}

121
  VarBase(framework::Variable* var, VarBase* grad)
M
minqiyang 已提交
122 123 124 125
      : VarBase(var, grad, false) {}

 private:
  VarBase(framework::Variable* var, VarBase* grad, bool stop_gradient)
126 127
      : name_(),
        var_desc_(nullptr),
X
polish  
Xin Pan 已提交
128 129
        var_(var),
        grads_(grad),
130
        block_(nullptr),
131
        persistable_(false),
X
Xin Pan 已提交
132 133
        stop_gradient_(stop_gradient),
        pre_op_(nullptr),
134
        pre_op_out_name_(),
X
Xin Pan 已提交
135
        pre_op_out_idx_(-1) {}
136

M
minqiyang 已提交
137
 public:
M
minqiyang 已提交
138
  virtual ~VarBase() {
139 140 141
    // LOG(ERROR) << "remove var " << name_;

    if (block_ && !persistable_) {
142 143 144
      block_->RemoveVar(name_);
    }

M
minqiyang 已提交
145 146
    if (var_) {
      delete var_;
147
      var_ = nullptr;
M
minqiyang 已提交
148 149 150 151
    }

    if (grads_) {
      delete grads_;
152
      grads_ = nullptr;
M
minqiyang 已提交
153
    }
154 155 156

    pre_op_ = nullptr;
    pre_op_out_idx_ = -1;
M
minqiyang 已提交
157
  }
158

M
minqiyang 已提交
159 160
  inline OpBase* PreOp() const { return pre_op_; }
  inline int PreOpOutIdx() const { return pre_op_out_idx_; }
X
Xin Pan 已提交
161

M
minqiyang 已提交
162 163 164 165
  inline void SetStopGradient(bool stop_gradient) {
    stop_gradient_ = stop_gradient;
  }
  inline bool IsStopGradient() const { return stop_gradient_; }
X
Xin Pan 已提交
166

X
Xin Pan 已提交
167
  void RunBackward();
168

169 170 171 172 173 174 175 176
  inline void ResetPreOp(OpBase* op) {
    if (op == pre_op_) {
      // clear pre_op info when op equals to var's pre_op
      pre_op_ = nullptr;
      pre_op_out_idx_ = -1;
    }
  }

X
Xin Pan 已提交
177
  void TrackPreOp(OpBase* pre_op, const std::string& pre_op_out_name,
M
minqiyang 已提交
178
                  int pre_op_out_idx, bool pre_op_stop_gradient) {
X
Xin Pan 已提交
179 180 181
    pre_op_ = pre_op;
    pre_op_out_name_ = pre_op_out_name;
    pre_op_out_idx_ = pre_op_out_idx;
M
minqiyang 已提交
182 183 184
    if (pre_op_stop_gradient) {
      stop_gradient_ = pre_op_stop_gradient;
    }
X
Xin Pan 已提交
185 186 187
  }

  void ClearGradient() {
M
minqiyang 已提交
188
    VLOG(1) << "clear gradient of " << var_desc_->Name();
M
minqiyang 已提交
189 190 191 192 193 194 195
    if (grads_ && grads_->var_ && grads_->var_->IsInitialized()) {
      auto grads_t = grads_->var_->GetMutable<framework::LoDTensor>();
      operators::math::set_constant(
          *(platform::DeviceContextPool::Instance().Get(
              grads_->var_->Get<framework::LoDTensor>().place())),
          grads_t, 0.0);
    }
X
Xin Pan 已提交
196 197
  }

M
minqiyang 已提交
198
  framework::LoDTensor& GradValue();
199

M
minqiyang 已提交
200 201
  std::unique_ptr<VarBase> NewVarBase(const platform::Place& dst_place,
                                      const bool blocking) const;
M
minqiyang 已提交
202

M
minqiyang 已提交
203 204 205 206 207 208 209
  inline std::string GradName() const {
    PADDLE_ENFORCE(
        var_desc_,
        "Couldn't get gradient variable's name, please call backward() first");
    return string::Sprintf("%s@IGrad", var_desc_->Name());
  }

210
  std::string name_;
211
  framework::VarDesc* var_desc_;
M
minqiyang 已提交
212

M
minqiyang 已提交
213 214
  framework::Variable* var_;
  VarBase* grads_;
215

216
  framework::BlockDesc* block_;
217
  bool persistable_;
218

X
Xin Pan 已提交
219
 private:
220
  bool stop_gradient_;
X
Xin Pan 已提交
221 222 223
  OpBase* pre_op_;
  std::string pre_op_out_name_;
  int pre_op_out_idx_;
224 225
};

M
minqiyang 已提交
226 227 228
/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its
 * gradient. This object should be managed totally by Python intepreter.
 */
229
class PYBIND11_HIDDEN OpBase {
230
 public:
X
Xin Pan 已提交
231 232 233
  OpBase()
      : op_desc_(nullptr),
        forward_id_(-1),
M
minqiyang 已提交
234
        backward_id_(-1),
M
minqiyang 已提交
235
        trace_id_(-1),
236 237
        place_(platform::CPUPlace()),
        backward_hooks_() {}
238 239

  virtual ~OpBase() {
240 241 242 243 244
    // reset all output vars' pre op
    for (auto iter : output_vars_) {
      for (VarBase* var : iter.second) {
        var->ResetPreOp(this);
      }
X
Xin Pan 已提交
245
    }
246

247
    // remove op desc from block desc
248
    if (block_) {
249
      block_->RemoveOpInternal(op_desc_);
250
    }
251 252 253 254 255

    // release resource
    for (framework::OpDesc* desc : grad_op_descs_) {
      delete desc;
    }
256 257
  }

X
Xin Pan 已提交
258
  std::map<std::string, std::vector<VarBase*>> ApplyGrad();
259

260 261 262 263
  void RegisterBackwardHooks(const py::object& callable);

  void InvokeBackwardHooks();

X
polish  
Xin Pan 已提交
264 265
  // One of `op_desc_` or `forward_id_` is set, not both.
  // For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_.
266
  framework::OpDesc* op_desc_;
X
Xin Pan 已提交
267
  int forward_id_;
X
polish  
Xin Pan 已提交
268

X
Xin Pan 已提交
269
  // When has backward, one of `grad_op_descs_` or `backward_id_` is set,
X
polish  
Xin Pan 已提交
270
  // not both.
X
polish  
Xin Pan 已提交
271
  // Note: each fwd op corresponds to a vector of bwd ops.
X
Xin Pan 已提交
272
  std::vector<framework::OpDesc*> grad_op_descs_;
X
Xin Pan 已提交
273
  int backward_id_;
M
minqiyang 已提交
274
  int trace_id_;
X
Xin Pan 已提交
275

P
Paddle CI 已提交
276
  platform::Place place_;
M
minqiyang 已提交
277

M
minqiyang 已提交
278 279 280
  VarBasePtrMap input_vars_;
  VarBasePtrMap output_vars_;
  OpBasePtrMap pre_ops_;
X
Xin Pan 已提交
281
  std::map<std::string, std::vector<int>> pre_ops_out_idx_;
282

X
polish  
Xin Pan 已提交
283
  // Inputs to a vector of bwd ops.
X
Xin Pan 已提交
284
  std::vector<framework::VariableValueMap> grad_input_vars_;
X
polish  
Xin Pan 已提交
285
  // Outputs to a vector of bwd ops.
X
Xin Pan 已提交
286
  std::vector<framework::VariableValueMap> grad_output_vars_;
X
polish  
Xin Pan 已提交
287

288
  framework::BlockDesc* block_;
289 290

  std::vector<py::object> backward_hooks_;
291 292 293 294 295 296 297 298 299 300
};

class Layer {
 public:
  virtual ~Layer() {}

  virtual std::vector<VarBase> Forward(const std::vector<VarBase>& inputs) {
    std::vector<VarBase> vars;
    return vars;
  }
X
Xin Pan 已提交
301
};
302

X
Xin Pan 已提交
303 304 305 306
class PyLayer {
 public:
  virtual ~PyLayer() {}

X
polish  
Xin Pan 已提交
307 308
  static const char* kFwdInp;
  static const char* kFwdOut;
X
Xin Pan 已提交
309

X
Xin Pan 已提交
310
  static void RegisterFunc(int func_id, const py::object& py_func);
X
Xin Pan 已提交
311

X
polish  
Xin Pan 已提交
312 313
  static int NumFuncs();

X
Xin Pan 已提交
314
  static std::vector<VarBase*> Apply(int func_id,
X
Xin Pan 已提交
315 316
                                     const std::vector<VarBase*>& inputs);

X
polish  
Xin Pan 已提交
317 318
  static std::vector<framework::Variable*> ApplyGrad(
      int func_id, const std::vector<framework::Variable*>& inputs);
319

X
polish  
Xin Pan 已提交
320 321 322
 private:
  static std::vector<framework::Variable*> CallPythonFunc(
      const py::object& callable, const std::vector<framework::Variable*>& ins);
323 324 325 326
};

}  // namespace imperative
}  // namespace paddle