operator.h 17.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
Q
Qiao Longfei 已提交
20
#include <string>
D
dzhwinter 已提交
21
#include <tuple>
Q
Qiao Longfei 已提交
22
#include <unordered_map>
L
luotao1 已提交
23
#include <utility>
Q
Qiao Longfei 已提交
24 25
#include <vector>

Y
Yu Yang 已提交
26
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
27 28 29 30 31 32
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
X
polish  
Xin Pan 已提交
33
#include "paddle/fluid/framework/operator_kernel_configs.h"
Y
Yi Wang 已提交
34 35 36 37 38
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
Q
Qiao Longfei 已提交
39

Q
Qiao Longfei 已提交
40 41
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
42 43 44
namespace paddle {
namespace framework {

45
/// If a variable is a empty variable, that name will be used.
46
constexpr char kEmptyVarName[] = "@EMPTY@";
47 48 49

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
50
constexpr char kTempVarName[] = "@TEMP@";
51 52 53 54

/// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x".
55
constexpr char kGradVarSuffix[] = "@GRAD";
56

M
minqiyang 已提交
57 58
constexpr size_t kGradVarSuffixSize = 5U;

59
/// Variables with this suffix are supposed to be filled up with zeros.
60
constexpr char kZeroVarSuffix[] = "@ZERO";
61

C
chengduo 已提交
62 63 64
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
65 66 67 68 69 70 71 72
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
73 74 75 76 77 78 79 80 81
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
82
// define some kernel priority
83
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
84 85
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

86
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
87 88 89 90 91
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
92 93
}

M
minqiyang 已提交
94
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
95
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
96 97 98 99 100
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
101 102
}

Q
qiaolongfei 已提交
103
proto::VarType::Type GetDataTypeOfVar(const Variable* var);
C
chengduo 已提交
104 105
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
106

Q
Qiao Longfei 已提交
107
class OperatorBase;
108
class ExecutionContext;
109

X
Xin Pan 已提交
110 111
class RuntimeContext {
 public:
X
Xin Pan 已提交
112 113
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
114

X
Xin Pan 已提交
115 116 117 118
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
119 120 121 122
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
123
/**
X
Xin Pan 已提交
124
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
125 126 127 128 129 130
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
131 132
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
133

Q
Qiao Longfei 已提交
134 135
  virtual ~OperatorBase() {}

136
  /// Executor will call this interface function to Run an op.
137 138
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
139

T
typhoonzero 已提交
140 141 142
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

143 144 145
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
146

147 148
  virtual bool SupportGPU() const { return false; }

149 150
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
151
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
152 153
  template <typename T>
  inline const T& Attr(const std::string& name) const {
M
minqiyang 已提交
154 155
    PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(),
                   "%s should be in AttributeMap", name);
156 157 158
    return boost::get<T>(attrs_.at(name));
  }
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
159

Y
Yu Yang 已提交
160 161
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
162

S
sneaxiy 已提交
163 164 165 166 167
  const OpInfo& Info() const {
    PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_);
    return *info_;
  }

168
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
169
  //! Get a input with argument's name described in `op_proto`
170
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
171
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
172
  const std::vector<std::string>& Inputs(const std::string& name) const;
173
  //! Get all inputs variable names
Q
qijun 已提交
174 175
  std::vector<std::string> InputVars() const;

176
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
177
  //! Get a output with argument's name described in `op_proto`
178
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
179 180
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
181
  const std::vector<std::string>& Outputs(const std::string& name) const;
182
  //! Get all outputs variable names
Y
Yu Yang 已提交
183
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
184

185
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
B
baojun-nervana 已提交
186
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
187 188
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
189

Q
qiaolongfei 已提交
190
 protected:
Q
Qiao Longfei 已提交
191
  std::string type_;
D
dongzhihong 已提交
192
  // NOTE: in case of OpGrad, inputs_ contains:
193
  // I (Inputs)
D
dongzhihong 已提交
194 195
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
196
  VariableNameMap inputs_;
Y
Yu Yang 已提交
197

D
dongzhihong 已提交
198 199
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
200
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
201
  AttributeMap attrs_;
S
sneaxiy 已提交
202 203 204 205

  // OpInfo
  const OpInfo* info_;

206 207
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
208 209 210 211

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
212 213
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
214 215
};

216 217 218 219 220 221 222 223 224 225 226 227 228
#ifdef PADDLE_WITH_CUDA
using KernelConfig = boost::variant<
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionFwdAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdDataAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdFilterAlgo_t>>>;
#else
using KernelConfig = boost::variant<boost::blank>;
#endif

using OpKernelConfigsMap =
    std::unordered_map<OpKernelType, std::vector<KernelConfig>,
                       OpKernelType::Hash>;

229
class ExecutionContext {
Y
Yan Chunwei 已提交
230
 public:
231
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
232
                   const platform::DeviceContext& device_context,
233 234 235 236 237 238 239
                   const RuntimeContext& ctx,
                   std::vector<KernelConfig>* configs)
      : op_(op),
        scope_(scope),
        device_context_(device_context),
        ctx_(ctx),
        kernel_configs_(configs) {}
240

Q
qiaolongfei 已提交
241 242 243 244
  const OperatorBase& op() const { return op_; }

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
245
  template <typename T>
Y
Yu Yang 已提交
246 247
  inline const T& Attr(const std::string& name) const {
    return op_.Attr<T>(name);
Q
qiaolongfei 已提交
248 249
  }

250
  bool HasInput(const std::string& name) const;
251

252
  bool HasOutput(const std::string& name) const;
253

Y
Yu Yang 已提交
254
  size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
255
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
256 257
  }

Y
Yu Yang 已提交
258
  size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
259
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
260 261
  }

X
Xin Pan 已提交
262
  const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
263

X
Xin Pan 已提交
264
  Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
265

266 267
  const std::vector<const Variable*> MultiInputVar(
      const std::string& name) const {
X
Xin Pan 已提交
268 269 270 271
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
272
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
273 274 275 276 277 278 279 280 281 282 283
  }

  std::vector<Variable*> MultiOutputVar(const std::string& name) const {
    auto names = op_.Outputs(name);
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

284 285
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
286
    auto* var = InputVar(name);
287
    return var == nullptr ? nullptr : &var->Get<T>();
288 289 290 291
  }

  template <typename T>
  T* Output(const std::string& name) const {
292
    auto var = OutputVar(name);
293
    return var == nullptr ? nullptr : var->GetMutable<T>();
294 295 296 297
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
X
Xin Pan 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
    const std::vector<Variable*>& vars = it->second;
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> const T* {
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    const std::vector<Variable*>& vars = it->second;
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
    return res;
  }

328
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
329

Q
QI JUN 已提交
330 331 332 333 334
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

335
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
336
    return device_context_;
Q
qijun 已提交
337
  }
Q
qijun 已提交
338

Q
QI JUN 已提交
339 340 341 342 343 344 345 346
#ifdef PADDLE_WITH_CUDA
  const inline platform::CUDADeviceContext& cuda_device_context() const {
    PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace()));
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

D
dzhwinter 已提交
347
  //! Get actual name vector for this input.
D
Dong Zhihong 已提交
348 349 350
  const std::vector<std::string>& Inputs(const std::string& name) const {
    return op_.Inputs(name);
  }
D
Dong Zhihong 已提交
351

D
dzhwinter 已提交
352
  //! Get actual name vector for this output.
D
Dong Zhihong 已提交
353 354 355 356
  const std::vector<std::string>& Outputs(const std::string& name) const {
    return op_.Outputs(name);
  }

X
Xin Pan 已提交
357 358 359 360 361 362 363 364 365 366 367
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
    auto tmp_allocation_ptr = platform::DeviceTemporaryAllocator::Instance()
                                  .Get<DevContext>(dev_ctx)
                                  .Allocate(product(dim) * sizeof(T));
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

Z
Zeng Jinle 已提交
368 369 370
    PADDLE_ENFORCE(
        dynamic_cast<platform::TemporaryAllocation*>(allocation_ptr) != nullptr,
        "The AllocationPtr must be TemporaryAllocation.");
371
    PADDLE_ENFORCE_GE(allocation_ptr->size(),
X
Xin Pan 已提交
372 373 374 375 376 377 378 379 380
                      framework::product(dim) * sizeof(T));

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

381 382 383 384 385 386 387 388
  template <typename T>
  T& GetKernelConfig(int idx) const {
    PADDLE_ENFORCE(kernel_configs_ && kernel_configs_->size() > idx,
                   "%s selected kernel doesn't have kernel config %lu <= %d",
                   op_.Type().c_str(), kernel_configs_->size(), idx);
    return *boost::get<std::shared_ptr<T>>(kernel_configs_->at(idx));
  }

389
 private:
390 391
  const OperatorBase& op_;
  const Scope& scope_;
392
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
393
  const RuntimeContext& ctx_;
394
  mutable std::vector<KernelConfig>* kernel_configs_;
Q
Qiao Longfei 已提交
395 396
};

397 398 399 400 401 402 403 404 405 406 407 408 409 410
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
411
class OpKernelBase {
Q
qijun 已提交
412
 public:
Q
qijun 已提交
413
  /**
414
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
415 416
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
417
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
418 419
   */

420
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
421

Y
Yu Yang 已提交
422 423 424 425 426 427 428
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
429 430
};

Y
Yu Yang 已提交
431 432
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
433
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
434
  using OpKernelMap =
Y
yuyang18 已提交
435
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
436

Y
Yu Yang 已提交
437 438
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
439 440
      : OperatorBase(type, inputs, outputs, attrs) {}

Y
Yu Yang 已提交
441 442 443 444
  static std::unordered_map<std::string /* op_type */, OpKernelMap>&
  AllOpKernels() {
    static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
    return g_all_op_kernels;
Y
Yu Yang 已提交
445
  }
Y
Yan Chunwei 已提交
446

447
  bool SupportGPU() const override {
Y
Yu Yang 已提交
448 449 450 451 452
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
453 454
  }

455
  virtual void InferShape(InferShapeContext* ctx) const {
S
sneaxiy 已提交
456
    Info().infer_shape_(ctx);
457
  }
Y
Yu Yang 已提交
458

X
Xin Pan 已提交
459 460
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
461

462
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
463

X
polish  
Xin Pan 已提交
464 465
  std::vector<KernelConfig>* GetKernelConfig(const OpKernelType& key) const;

X
Xin Pan 已提交
466
 protected:
467 468 469
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
470 471

 private:
472
  // indicate kernel DataType by input data. By default all input data must be
Y
Yu Yang 已提交
473
  // same.
474
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
475
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
476 477
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
478 479 480 481 482 483 484

  /**
   * Transfer data from scope to a transfered scope. If there is no data need to
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
485 486 487 488
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
489 490 491 492

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
493 494 495

 protected:
  mutable OpKernelConfigsMap kernel_configs_map_;
L
luotao1 已提交
496 497
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
L
luotao1 已提交
498 499
  mutable bool enable_cache_runtime_context = false;
  mutable bool all_kernels_must_compute_runtime_shape = false;
Q
Qiao Longfei 已提交
500 501
};

Y
Yu Yang 已提交
502 503
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
504 505
}  // namespace framework
}  // namespace paddle