layer.h 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

X
Xin Pan 已提交
17 18 19 20
// clang-format off
#include "paddle/fluid/framework/python_headers.h"
// clang-format on

M
minqiyang 已提交
21 22 23 24 25
#include <map>            // NOLINT
#include <string>         // NOLINT
#include <vector>         // NOLINT
#include <memory>         // NOLINT
#include <unordered_map>  // NOLINT
M
minqiyang 已提交
26

27 28 29
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_desc.h"
M
minqiyang 已提交
30
#include "paddle/fluid/framework/var_type_inference.h"
31
#include "paddle/fluid/platform/enforce.h"
M
minqiyang 已提交
32
#include "paddle/fluid/platform/device_context.h"
M
minqiyang 已提交
33
#include "paddle/fluid/operators/math/math_function.h"
34

M
minqiyang 已提交
35 36
#include "paddle/fluid/imperative/type_defs.h"

37 38 39
namespace paddle {
namespace imperative {

M
minqiyang 已提交
40 41
class VarBase;

X
Xin Pan 已提交
42 43
namespace py = ::pybind11;

X
Xin Pan 已提交
44 45 46 47 48
class PreparedOp {
 public:
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
             framework::OperatorWithKernel::OpKernelFunc func,
X
polish  
Xin Pan 已提交
49 50 51 52 53 54 55
             platform::DeviceContext* dev_ctx,
             std::vector<framework::KernelConfig>* kernel_configs)
      : op(op),
        ctx(ctx),
        func(func),
        dev_ctx(dev_ctx),
        kernel_configs(kernel_configs) {}
X
Xin Pan 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

  static PreparedOp Prepare(const framework::RuntimeContext& ctx,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    auto* dev_ctx = pool.Get(place);

    // check if op[type] has kernel registered.
    auto& all_op_kernels = op.AllOpKernels();
    auto kernels_iter = all_op_kernels.find(op.Type());
    if (kernels_iter == all_op_kernels.end()) {
      PADDLE_THROW(
          "There are no kernels which are registered in the %s operator.",
          op.Type());
    }

    framework::OperatorWithKernel::OpKernelMap& kernels = kernels_iter->second;

74 75 76
    auto expected_kernel_key =
        op.GetExpectedKernelType(framework::ExecutionContext(
            op, framework::Scope(), *dev_ctx, ctx, nullptr));
X
Xin Pan 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
    VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

    auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
    // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
    if (kernel_iter == kernels.end() &&
        expected_kernel_key.library_type_ == framework::LibraryType::kMKLDNN) {
      VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
      expected_kernel_key.library_type_ = framework::LibraryType::kPlain;
      expected_kernel_key.data_layout_ = framework::DataLayout::kAnyLayout;
      kernel_iter = kernels.find(expected_kernel_key);
    }
#endif
    if (kernel_iter == kernels.end()) {
      PADDLE_THROW("op %s does not have kernel for %s", op.Type(),
                   KernelTypeToString(expected_kernel_key));
    }
X
polish  
Xin Pan 已提交
94 95 96
    std::vector<framework::KernelConfig>* kernel_configs =
        op.GetKernelConfig(expected_kernel_key);
    return PreparedOp(op, ctx, kernel_iter->second, dev_ctx, kernel_configs);
X
Xin Pan 已提交
97 98
  }

M
minqiyang 已提交
99 100
  inline platform::DeviceContext* GetDeviceContext() const { return dev_ctx; }

X
Xin Pan 已提交
101 102 103 104
  const framework::OperatorBase& op;
  const framework::RuntimeContext& ctx;
  framework::OperatorWithKernel::OpKernelFunc func;
  platform::DeviceContext* dev_ctx;
X
polish  
Xin Pan 已提交
105
  std::vector<framework::KernelConfig>* kernel_configs;
X
Xin Pan 已提交
106
};
X
polish  
Xin Pan 已提交
107

108 109
class OpBase;

M
minqiyang 已提交
110 111 112 113 114
/* The wrapper for Variable which holds a Variable and a VarBase of its
 * gradient. This object should be managed totally by Python intepreter.
 *
 * Nearly all interface should be implemented in C++.
 */
115 116
class VarBase {
 public:
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
  // Internal interface, create VarBase from exist variable
  VarBase(const std::string& name, framework::Variable* var, VarBase* grad,
          bool stop_gradient)
      : VarBase(name, var->Get<framework::LoDTensor>().type(),
                var->Get<framework::LoDTensor>().dims(),
                var->Get<framework::LoDTensor>().place(), var, grad,
                stop_gradient, false) {}

  // Python interface
  VarBase(const std::string& name, const framework::proto::VarType::Type dtype,
          const std::vector<int64_t>& shape, const platform::Place& place,
          bool stop_gradient, bool persistable)
      : VarBase(name, dtype, framework::make_ddim(shape), place, stop_gradient,
                persistable) {}

  // Internal interface, create VarBase from with ddim
  VarBase(const std::string& name, const framework::proto::VarType::Type dtype,
          const framework::DDim& shape, const platform::Place& place,
          bool stop_gradient, bool persistable)
      : VarBase(name, dtype, shape, place, nullptr, nullptr, stop_gradient,
                persistable) {}
M
minqiyang 已提交
138 139

 private:
140 141 142 143 144 145 146
  VarBase(const std::string& name, framework::proto::VarType::Type dtype,
          const framework::DDim& shape, const platform::Place& place,
          framework::Variable* var, VarBase* grad, bool stop_gradient,
          bool persistable)
      : name_(name),
        dtype_(dtype),
        place_(place),
X
polish  
Xin Pan 已提交
147 148
        var_(var),
        grads_(grad),
X
Xin Pan 已提交
149
        stop_gradient_(stop_gradient),
150
        persistable_(persistable),
X
Xin Pan 已提交
151
        pre_op_(nullptr),
152
        pre_op_out_name_(),
153 154 155 156 157 158 159 160
        pre_op_out_idx_(-1) {
    if (!var_) {
      var_ = new framework::Variable();
      auto tensor = var_->GetMutable<framework::LoDTensor>();
      tensor->Resize(shape);
      tensor->mutable_data(place_, dtype_);
    }
  }
161

M
minqiyang 已提交
162
 public:
M
minqiyang 已提交
163 164 165
  virtual ~VarBase() {
    if (var_) {
      delete var_;
166
      var_ = nullptr;
M
minqiyang 已提交
167 168 169 170
    }

    if (grads_) {
      delete grads_;
171
      grads_ = nullptr;
M
minqiyang 已提交
172
    }
173 174 175

    pre_op_ = nullptr;
    pre_op_out_idx_ = -1;
M
minqiyang 已提交
176
  }
177

178 179 180 181 182 183 184 185 186 187 188
  inline void SetName(const std::string& name) { name_ = name; }
  inline std::string Name() const { return name_; }

  inline std::vector<int64_t> Shape() const {
    if (var_->IsInitialized()) {
      return framework::vectorize(var_->Get<framework::LoDTensor>().dims());
    } else {
      return {};
    }
  }

M
minqiyang 已提交
189 190 191 192
  inline void SetDType(framework::proto::VarType::Type type) {
    auto tensor = var_->GetMutable<framework::LoDTensor>();
    tensor->mutable_data(place_, dtype_);
  }
193
  inline framework::proto::VarType::Type DType() const { return dtype_; }
X
Xin Pan 已提交
194

M
minqiyang 已提交
195 196 197 198
  inline void SetStopGradient(bool stop_gradient) {
    stop_gradient_ = stop_gradient;
  }
  inline bool IsStopGradient() const { return stop_gradient_; }
X
Xin Pan 已提交
199

200 201 202 203 204 205
  inline void SetPersistable(bool persistable) { persistable_ = persistable; }
  inline bool IsPersistable() const { return persistable_; }

  inline OpBase* PreOp() const { return pre_op_; }
  inline int PreOpOutIdx() const { return pre_op_out_idx_; }

X
Xin Pan 已提交
206
  void RunBackward();
207

208 209 210 211 212 213 214 215
  inline void ResetPreOp(OpBase* op) {
    if (op == pre_op_) {
      // clear pre_op info when op equals to var's pre_op
      pre_op_ = nullptr;
      pre_op_out_idx_ = -1;
    }
  }

X
Xin Pan 已提交
216
  void TrackPreOp(OpBase* pre_op, const std::string& pre_op_out_name,
M
minqiyang 已提交
217
                  int pre_op_out_idx, bool pre_op_stop_gradient) {
X
Xin Pan 已提交
218 219 220
    pre_op_ = pre_op;
    pre_op_out_name_ = pre_op_out_name;
    pre_op_out_idx_ = pre_op_out_idx;
M
minqiyang 已提交
221 222 223
    if (pre_op_stop_gradient) {
      stop_gradient_ = pre_op_stop_gradient;
    }
X
Xin Pan 已提交
224 225 226
  }

  void ClearGradient() {
227
    VLOG(1) << "clear gradient of " << Name();
M
minqiyang 已提交
228 229 230 231 232 233 234
    if (grads_ && grads_->var_ && grads_->var_->IsInitialized()) {
      auto grads_t = grads_->var_->GetMutable<framework::LoDTensor>();
      operators::math::set_constant(
          *(platform::DeviceContextPool::Instance().Get(
              grads_->var_->Get<framework::LoDTensor>().place())),
          grads_t, 0.0);
    }
X
Xin Pan 已提交
235 236
  }

M
minqiyang 已提交
237
  framework::LoDTensor& GradValue();
238

M
minqiyang 已提交
239 240
  std::unique_ptr<VarBase> NewVarBase(const platform::Place& dst_place,
                                      const bool blocking) const;
M
minqiyang 已提交
241

M
minqiyang 已提交
242
  inline std::string GradName() const {
243
    return string::Sprintf("%s@IGrad", Name());
M
minqiyang 已提交
244 245
  }

246
  std::string name_;
247 248
  framework::proto::VarType::Type dtype_;
  platform::Place place_;
M
minqiyang 已提交
249

M
minqiyang 已提交
250 251
  framework::Variable* var_;
  VarBase* grads_;
252

X
Xin Pan 已提交
253
 private:
254
  bool stop_gradient_;
255 256
  bool persistable_;

X
Xin Pan 已提交
257 258 259
  OpBase* pre_op_;
  std::string pre_op_out_name_;
  int pre_op_out_idx_;
260 261
};

M
minqiyang 已提交
262 263 264
/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its
 * gradient. This object should be managed totally by Python intepreter.
 */
265
class PYBIND11_HIDDEN OpBase {
266
 public:
267 268 269
  OpBase(const std::string& type)
      : type_(type),
        trace_id_(-1),
X
Xin Pan 已提交
270
        forward_id_(-1),
M
minqiyang 已提交
271
        backward_id_(-1),
272 273
        place_(platform::CPUPlace()),
        backward_hooks_() {}
274 275

  virtual ~OpBase() {
M
minqiyang 已提交
276 277
    // TODO(minqiyang): remove op_desc from block_desc in tracer
    //
278 279 280 281 282
    // reset all output vars' pre op
    for (auto iter : output_vars_) {
      for (VarBase* var : iter.second) {
        var->ResetPreOp(this);
      }
X
Xin Pan 已提交
283
    }
284

285
    // release resource
X
Xin Pan 已提交
286 287 288
    for (framework::OpDesc* desc : grad_op_descs_) {
      delete desc;
    }
289 290
  }

X
Xin Pan 已提交
291
  std::map<std::string, std::vector<VarBase*>> ApplyGrad();
292

293 294 295 296 297 298
  inline std::string Type() const { return type_; }
  inline std::string GradOpType(size_t index) const {
    PADDLE_ENFORCE_NOT_NULL(grad_op_descs_[index]);
    return grad_op_descs_[index]->Type();
  }

299 300 301 302
  void RegisterBackwardHooks(const py::object& callable);

  void InvokeBackwardHooks();

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
  void TrackPreOp(const VarBase* inp_var, const std::string& inp_name) {
    if (inp_var->PreOp() && !inp_var->IsStopGradient()) {
      VLOG(3) << "add pre op " << inp_var->PreOp()->Type() << " in slot "
              << inp_name;
      pre_ops_[inp_name].push_back(inp_var->PreOp());
      pre_ops_out_idx_[inp_name].push_back(inp_var->PreOpOutIdx());
    } else {
      VLOG(3) << "no pre op in slot " << inp_name
              << " input var stop_gradient: " << inp_var->IsStopGradient();
      pre_ops_[inp_name].push_back(nullptr);
      // pre_ops_out_idx_[inp_name].push_back(-1);
    }
  }

  std::string type_;
  // One of `trace_id_` or `forward_id_` is set, not both.
  // For pure python PyLayer, use `forward_id_`, otherwise, use trace_id_.
  int trace_id_;
X
Xin Pan 已提交
321
  int forward_id_;
X
polish  
Xin Pan 已提交
322

X
Xin Pan 已提交
323
  // When has backward, one of `grad_op_descs_` or `backward_id_` is set,
X
polish  
Xin Pan 已提交
324
  // not both.
X
polish  
Xin Pan 已提交
325
  // Note: each fwd op corresponds to a vector of bwd ops.
X
Xin Pan 已提交
326
  std::vector<framework::OpDesc*> grad_op_descs_;
X
Xin Pan 已提交
327
  int backward_id_;
X
Xin Pan 已提交
328

P
Paddle CI 已提交
329
  platform::Place place_;
M
minqiyang 已提交
330

M
minqiyang 已提交
331 332 333
  VarBasePtrMap input_vars_;
  VarBasePtrMap output_vars_;
  OpBasePtrMap pre_ops_;
X
Xin Pan 已提交
334
  std::map<std::string, std::vector<int>> pre_ops_out_idx_;
335

X
polish  
Xin Pan 已提交
336
  // Inputs to a vector of bwd ops.
M
minqiyang 已提交
337
  std::vector<VarBasePtrMap> grad_input_vars_;
X
polish  
Xin Pan 已提交
338
  // Outputs to a vector of bwd ops.
M
minqiyang 已提交
339
  std::vector<VarBasePtrMap> grad_output_vars_;
X
polish  
Xin Pan 已提交
340

341
  std::vector<py::object> backward_hooks_;
342 343 344 345 346 347 348 349 350 351
};

class Layer {
 public:
  virtual ~Layer() {}

  virtual std::vector<VarBase> Forward(const std::vector<VarBase>& inputs) {
    std::vector<VarBase> vars;
    return vars;
  }
X
Xin Pan 已提交
352
};
353

X
Xin Pan 已提交
354 355 356 357
class PyLayer {
 public:
  virtual ~PyLayer() {}

X
polish  
Xin Pan 已提交
358 359
  static const char* kFwdInp;
  static const char* kFwdOut;
X
Xin Pan 已提交
360

X
Xin Pan 已提交
361
  static void RegisterFunc(int func_id, const py::object& py_func);
X
Xin Pan 已提交
362

X
polish  
Xin Pan 已提交
363 364
  static int NumFuncs();

365 366
  static std::vector<framework::Variable*> Apply(
      int func_id, const std::vector<VarBase*>& inputs);
X
Xin Pan 已提交
367

M
minqiyang 已提交
368 369
  static std::vector<VarBase*> ApplyGrad(int func_id,
                                         const std::vector<VarBase*>& inputs);
370

X
polish  
Xin Pan 已提交
371 372
 private:
  static std::vector<framework::Variable*> CallPythonFunc(
M
minqiyang 已提交
373 374 375 376 377 378 379
      const py::object& callable, const std::vector<VarBase*>& ins);
};

// infer var type context for imperative mode
class PYBIND11_HIDDEN RuntimeInferVarTypeContext
    : public framework::InferVarTypeContext {
 public:
M
minqiyang 已提交
380
  RuntimeInferVarTypeContext(const imperative::VarBasePtrMap* inputs,
M
minqiyang 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
                             imperative::VarBasePtrMap* outputs,
                             const framework::AttributeMap* attrs_map)
      : InferVarTypeContext(nullptr, nullptr),
        inputs_(inputs),
        outputs_(outputs),
        attrs_(attrs_map),
        input_names_(),
        output_names_(),
        var_set_() {
    input_names_.reserve(inputs_->size());
    for (auto& it : *inputs_) {
      for (imperative::VarBase* var : it.second) {
        input_names_[it.first].emplace_back(var->Name());
        var_set_[var->Name()] = var;
      }
    }

    output_names_.reserve(outputs_->size());
    for (auto& it : *outputs_) {
      for (imperative::VarBase* var : it.second) {
        output_names_[it.first].emplace_back(var->Name());
        var_set_[var->Name()] = var;
      }
    }
  }

M
minqiyang 已提交
407 408 409
  virtual ~RuntimeInferVarTypeContext() {}

  framework::Attribute GetAttr(const std::string& name) const override {
M
minqiyang 已提交
410 411 412 413
    PADDLE_ENFORCE_NOT_NULL(attrs_);
    return attrs_->at(name);
  }

M
minqiyang 已提交
414
  bool HasVar(const std::string& name) const override {
M
minqiyang 已提交
415 416 417
    return var_set_.count(name) > 0;
  }

M
minqiyang 已提交
418
  bool HasInput(const std::string& name) const override {
M
minqiyang 已提交
419 420 421 422
    PADDLE_ENFORCE_NOT_NULL(inputs_);
    return inputs_->count(name) > 0;
  }

M
minqiyang 已提交
423
  bool HasOutput(const std::string& name) const override {
M
minqiyang 已提交
424 425 426 427
    PADDLE_ENFORCE_NOT_NULL(outputs_);
    return outputs_->count(name) > 0;
  }

M
minqiyang 已提交
428 429
  const std::vector<std::string>& Input(
      const std::string& name) const override {
M
minqiyang 已提交
430 431 432
    return input_names_.at(name);
  }

M
minqiyang 已提交
433 434
  const std::vector<std::string>& Output(
      const std::string& name) const override {
M
minqiyang 已提交
435 436 437
    return output_names_.at(name);
  }

M
minqiyang 已提交
438 439
  framework::proto::VarType::Type GetType(
      const std::string& name) const override {
M
minqiyang 已提交
440 441 442
    return var_set_.at(name)->DType();
  }

M
minqiyang 已提交
443 444
  void SetType(const std::string& name,
               framework::proto::VarType::Type type) override {
M
minqiyang 已提交
445 446 447
    var_set_[name]->SetDType(type);
  }

M
minqiyang 已提交
448 449
  framework::proto::VarType::Type GetDataType(
      const std::string& name) const override {
M
minqiyang 已提交
450 451 452
    return var_set_.at(name)->DType();
  }

M
minqiyang 已提交
453 454
  void SetDataType(const std::string& name,
                   framework::proto::VarType::Type type) override {
M
minqiyang 已提交
455 456 457
    var_set_[name]->SetDType(type);
  }

M
minqiyang 已提交
458 459
  std::vector<framework::proto::VarType::Type> GetDataTypes(
      const std::string& name) const override {
M
minqiyang 已提交
460 461 462
    PADDLE_THROW("GetDataTypes is not supported in runtime InferVarType");
  }

M
minqiyang 已提交
463 464 465
  void SetDataTypes(const std::string& name,
                    const std::vector<framework::proto::VarType::Type>&
                        multiple_data_type) override {
M
minqiyang 已提交
466 467 468
    PADDLE_THROW("SetDataTypes is not supported in runtime InferVarType");
  }

M
minqiyang 已提交
469
  std::vector<int64_t> GetShape(const std::string& name) const override {
M
minqiyang 已提交
470 471 472
    PADDLE_THROW("Do not handle Shape in runtime InferVarType");
  }

M
minqiyang 已提交
473 474
  void SetShape(const std::string& name,
                const std::vector<int64_t>& dims) override {
M
minqiyang 已提交
475 476 477
    PADDLE_THROW("Do not handle Shape in runtime InferVarType");
  }

M
minqiyang 已提交
478
  int32_t GetLoDLevel(const std::string& name) const override {
M
minqiyang 已提交
479 480 481
    PADDLE_THROW("Do not handle LoDLevel in runtime InferVarType");
  }

M
minqiyang 已提交
482
  void SetLoDLevel(const std::string& name, int32_t lod_level) override {
M
minqiyang 已提交
483 484 485 486 487 488 489 490 491 492
    PADDLE_THROW("Do not handle LoDLevel in runtime InferVarType");
  }

 private:
  const imperative::VarBasePtrMap* inputs_;
  imperative::VarBasePtrMap* outputs_;
  const framework::AttributeMap* attrs_;
  std::unordered_map<std::string, std::vector<std::string>> input_names_;
  std::unordered_map<std::string, std::vector<std::string>> output_names_;
  std::unordered_map<std::string, imperative::VarBase*> var_set_;
493 494 495 496
};

}  // namespace imperative
}  // namespace paddle